repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nelson-liu/scikit-learn | examples/model_selection/plot_learning_curve.py | 76 | 4509 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
jackwluo/py-quantmod | quantmod/ta.py | 1 | 33467 | """Wrappers around Ta-Lib technical indicators
Python native indicators in 'tanolib.py' file.
"""
import numpy as np
import pandas as pd
import talib
from . import utils
from .valid import VALID_TA_KWARGS
# Overlap studies
def add_MA(self, timeperiod=20, matype=0,
type='line', color='secondary', **kwargs):
"""Moving Average (customizable)."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'MA({})'.format(str(timeperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.MA(self.df[self.cl].values,
timeperiod, matype)
def add_SMA(self, timeperiod=20,
type='line', color='secondary', **kwargs):
"""Simple Moving Average."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'SMA({})'.format(str(timeperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.SMA(self.df[self.cl].values,
timeperiod)
def add_EMA(self, timeperiod=26,
type='line', color='secondary', **kwargs):
"""Exponential Moving Average."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'EMA({})'.format(str(timeperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.EMA(self.df[self.cl].values,
timeperiod)
def add_WMA(self, timeperiod=20,
type='line', color='secondary', **kwargs):
"""Weighted Moving Average."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'WMA({})'.format(str(timeperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.WMA(self.df[self.cl].values,
timeperiod)
def add_DEMA(self, timeperiod=26,
type='line', color='secondary', **kwargs):
"""Double Exponential Moving Average."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'DEMA({})'.format(str(timeperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.DEMA(self.df[self.cl].values,
timeperiod)
def add_TEMA(self, timeperiod=26,
type='line', color='secondary', **kwargs):
"""Triple Moving Exponential Average."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'TEMA({})'.format(str(timeperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.TEMA(self.df[self.cl].values,
timeperiod)
def add_T3(self, timeperiod=20, vfactor=0.7,
type='line', color='secondary', **kwargs):
"""T3 Exponential Moving Average."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'T3({}, {})'.format(str(timeperiod), str(vfactor))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.T3(self.df[self.cl].values,
timeperiod, vfactor)
def add_KAMA(self, timeperiod=20,
type='line', color='secondary', **kwargs):
"""Kaufmann Adaptive Moving Average."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'KAMA({})'.format(str(timeperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.KAMA(self.df[self.cl].values,
timeperiod)
def add_TRIMA(self, timeperiod=20,
type='line', color='secondary', **kwargs):
"""Triangular Moving Average."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'TRIMA({})'.format(str(timeperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.TRIMA(self.df[self.cl].values,
timeperiod)
def add_MAMA(self, fastlimit=0.5, slowlimit=0.05,
types=['line', 'line'], colors=['secondary', 'tertiary'],
**kwargs):
"""MESA Adaptive Moving Average.
Note that the first argument of types and colors refers to MAMA while the
second argument refers to FAMA.
"""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
kwargs['type'] = kwargs['kind']
if 'kinds' in kwargs:
types = kwargs['type']
if 'type' in kwargs:
types = [kwargs['type']] * 2
if 'color' in kwargs:
colors = [kwargs['color']] * 2
mama = 'MAMA({},{})'.format(str(fastlimit), str(slowlimit))
fama = 'FAMA({},{})'.format(str(fastlimit), str(slowlimit))
self.pri[mama] = dict(type=types[0], color=colors[0])
self.pri[fama] = dict(type=types[1], color=colors[1])
self.ind[mama], self.ind[fama] = talib.MAMA(self.df[self.cl].values,
fastlimit, slowlimit)
def add_MAVP(self, periods, minperiod=2, maxperiod=30, matype=0,
type='line', color='secondary', **kwargs):
"""Moving Average with Variable Period.
Parameters
----------
periods : Series or array
Moving Average period over timeframe to analyze, as a 1-dimensional
shape of same length as chart.
"""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
if isinstance(periods, pd.Series):
periods = periods.values
elif isinstance(periods, np.ndarray):
pass
else:
raise TypeError("Invalid periods {0}. "
"It should be Series or array."
.format(periods))
name = 'MAVP({},{})'.format(str(minperiod), str(maxperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.MAVP(self.df[self.cl].values,
periods, minperiod, maxperiod, matype)
def add_BBANDS(self, timeperiod=20, nbdevup=2, nbdevdn=2, matype=0,
types=['line_dashed_thin', 'line_dashed_thin'],
colors=['tertiary', 'grey_strong'], **kwargs):
"""Bollinger Bands.
Note that the first argument of types and colors refers to upper and lower
bands while second argument refers to middle band. (Upper and lower are
symmetrical arguments, hence only 2 needed.)
"""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
kwargs['type'] = kwargs['kind']
if 'kinds' in kwargs:
types = kwargs['type']
if 'type' in kwargs:
types = [kwargs['type']] * 2
if 'color' in kwargs:
colors = [kwargs['color']] * 2
name = 'BBANDS({},{},{})'.format(str(timeperiod),
str(nbdevup),
str(nbdevdn))
ubb = name + '[Upper]'
bb = name
lbb = name + '[Lower]'
self.pri[ubb] = dict(type='line_' + types[0][5:],
color=colors[0])
self.pri[bb] = dict(type='area_' + types[1][5:],
color=colors[1], fillcolor='fill')
self.pri[lbb] = dict(type='area_' + types[0][5:],
color=colors[0], fillcolor='fill')
(self.ind[ubb],
self.ind[bb],
self.ind[lbb]) = talib.BBANDS(self.df[self.cl].values,
timeperiod, nbdevup, nbdevdn, matype)
def add_HT_TRENDLINE(self,
type='line', color='secondary', **kwargs):
"""Hilert Transform Instantaneous Trendline."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'HT_TRENDLINE'
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.HT_TRENDLINE(self.df[self.cl].values)
def add_MIDPOINT(self, timeperiod=14,
type='line', color='secondary', **kwargs):
"""Midpoint Price over Period."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'MIDPOINT({})'.format(str(timeperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.MIDPOINT(self.df[self.cl].values)
def add_SAR(self, acceleration=0.02, maximum=0.20,
type='scatter', color='tertiary', **kwargs):
"""Parabolic SAR."""
if not (self.has_high and self.has_low):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'SAR({},{})'.format(str(acceleration), str(maximum))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.SAR(self.df[self.hi].values,
self.df[self.lo].values,
acceleration, maximum)
def add_SAREXT(self, startvalue=0, offsetonreverse=0,
accelerationinitlong=0.02, accelerationlong=0.02,
accelerationmaxlong=0.20, accelerationinitshort=0.02,
accelerationshort=0.02, accelerationmaxshort=0.20,
type='scatter', color='tertiary', **kwargs):
"""Parabolic SAR Extended."""
if not (self.has_high and self.has_low):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = ('SAREXT({},{},{},{},'
'{},{},{},{})'.format(str(startvalue), str(offsetonreverse),
str(accelerationinitlong),
str(accelerationlong),
str(accelerationmaxlong),
str(accelerationinitshort),
str(accelerationshort),
str(accelerationmaxshort)))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.SAREXT(self.df[self.hi].values,
self.df[self.lo].values,
startvalue, offsetonreverse,
accelerationinitlong,
accelerationlong,
accelerationmaxlong,
accelerationinitshort,
accelerationshort,
accelerationmaxshort)
self.ind[name] = self.ind[name].abs() # Bug right now with negative value
# Momentum indicators
def add_APO(self, fastperiod=12, slowperiod=26, matype=0,
type='line', color='secondary', **kwargs):
"""Absolute Price Oscillator."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'APO({}, {})'.format(str(fastperiod), str(slowperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.APO(self.df[self.cl].values,
fastperiod, slowperiod, matype)
def add_AROON(self, timeperiod=14,
types=['line', 'line'],
colors=['increasing', 'decreasing'],
**kwargs):
"""Aroon indicators.
Note that the first argument of types and colors refers to Aroon up while
the second argument refers to Aroon down.
"""
if not (self.has_high and self.has_low):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
kwargs['type'] = kwargs['kind']
if 'kinds' in kwargs:
types = kwargs['type']
if 'type' in kwargs:
types = [kwargs['type']] * 2
if 'color' in kwargs:
colors = [kwargs['color']] * 2
name = 'AROON({})'.format(str(timeperiod))
uaroon = name + ' [Up]'
daroon = name + ' [Dn]'
self.sec[uaroon] = dict(type=types[0], color=colors[0])
self.sec[daroon] = dict(type=types[1], color=colors[1], on=uaroon)
self.ind[uaroon], self.ind[daroon] = talib.AROON(self.df[self.hi].values,
self.df[self.lo].values,
timeperiod)
def add_AROONOSC(self, timeperiod=14,
type='area', color='secondary', **kwargs):
"""Aroon Oscillator."""
if not (self.has_high and self.has_low):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'AROONOSC({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.AROONOSC(self.df[self.hi].values,
self.df[self.lo].values,
timeperiod)
def add_BOP(self,
type='histogram', color='tertiary', **kwargs):
"""Balance of Power."""
if not self.has_OHLC:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'BOP'
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.BOP(self.df[self.op].values,
self.df[self.hi].values,
self.df[self.lo].values,
self.df[self.cl].values)
def add_CCI(self, timeperiod=14,
type='line', color='secondary', **kwargs):
"""Channel Commodity Index."""
if not (self.has_high and self.has_low and self.has_close):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'CCI({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.CCI(self.df[self.hi].values,
self.df[self.lo].values,
self.df[self.cl].values,
timeperiod)
def add_CMO(self, timeperiod=14,
type='line', color='secondary', **kwargs):
"""Chande Momentum Indicator."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'CMO({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.CMO(self.df[self.cl].values,
timeperiod)
def add_ADX(self, timeperiod=14,
type='line', color='secondary', **kwargs):
"""Average Directional Movement Index."""
if not (self.has_high and self.has_low and self.has_close):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'ADX({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.ADX(self.df[self.hi].values,
self.df[self.lo].values,
self.df[self.cl].values,
timeperiod)
def add_ADXR(self, timeperiod=14,
type='line', color='secondary', **kwargs):
"""Average Directional Movement Index Rating."""
if not (self.has_high and self.has_low and self.has_close):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'ADXR({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.ADXR(self.df[self.hi].values,
self.df[self.lo].values,
self.df[self.cl].values,
timeperiod)
def add_DX(self, timeperiod=14,
type='line', color='secondary', **kwargs):
"""Directional Movement Index."""
if not (self.has_high and self.has_low and self.has_close):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'DX({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.DX(self.df[self.hi].values,
self.df[self.lo].values,
self.df[self.cl].values,
timeperiod)
def add_MINUS_DI(self, timeperiod=14,
type='line', color='decreasing', **kwargs):
"""Minus Directional Indicator."""
if not (self.has_high and self.has_low and self.has_close):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'MINUS_DI({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.MINUS_DI(self.df[self.hi].values,
self.df[self.lo].values,
self.df[self.cl].values,
timeperiod)
def add_PLUS_DI(self, timeperiod=14,
type='line', color='increasing', **kwargs):
"""Plus Directional Indicator."""
if not (self.has_high and self.has_low and self.has_close):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'PLUS_DI({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.PLUS_DI(self.df[self.hi].values,
self.df[self.lo].values,
self.df[self.cl].values,
timeperiod)
def add_MINUS_DM(self, timeperiod=14,
type='line', color='decreasing', **kwargs):
"""Minus Directional Movement."""
if not (self.has_high and self.has_low):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'MINUS_DM({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.MINUS_DM(self.df[self.hi].values,
self.df[self.lo].values,
timeperiod)
def add_PLUS_DM(self, timeperiod=14,
type='line', color='increasing', **kwargs):
"""Plus Directional Movement."""
if not (self.has_high and self.has_low):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'PLUS_DM({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.PLUS_DM(self.df[self.hi].values,
self.df[self.lo].values,
timeperiod)
def add_MACD(self, fastperiod=12, slowperiod=26, signalperiod=9,
types=['line', 'line', 'histogram'],
colors=['primary', 'tertiary', 'fill'],
**kwargs):
"""Moving Average Convergence Divergence.
Note that the first argument of types and colors refers to MACD,
the second argument refers to MACD signal line and the third argument
refers to MACD histogram.
"""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
kwargs['type'] = kwargs['kind']
if 'kinds' in kwargs:
types = kwargs['type']
if 'type' in kwargs:
types = [kwargs['type']] * 3
if 'color' in kwargs:
colors = [kwargs['color']] * 3
name = 'MACD({},{},{})'.format(str(fastperiod),
str(slowperiod),
str(signalperiod))
macd = name
smacd = name + '[Sign]'
hmacd = name + '[Hist]'
self.sec[macd] = dict(type=types[0], color=colors[0])
self.sec[smacd] = dict(type=types[1], color=colors[1], on=macd)
self.sec[hmacd] = dict(type=types[2], color=colors[2], on=macd)
(self.ind[macd],
self.ind[smacd],
self.ind[hmacd]) = talib.MACD(self.df[self.cl].values,
fastperiod, slowperiod,
signalperiod)
def add_MACDEXT(self, fastperiod=12, fastmatype=0,
slowperiod=26, slowmatype=0,
signalperiod=9, signalmatype=0,
types=['line', 'line', 'histogram'],
colors=['primary', 'tertiary', 'fill'],
**kwargs):
"""Moving Average Convergence Divergence with Controllable MA Type.
Note that the first argument of types and colors refers to MACD,
the second argument refers to MACD signal line and the third argument
refers to MACD histogram.
"""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
kwargs['type'] = kwargs['kind']
if 'kinds' in kwargs:
types = kwargs['type']
if 'type' in kwargs:
types = [kwargs['type']] * 3
if 'color' in kwargs:
colors = [kwargs['color']] * 3
name = 'MACDEXT({},{},{})'.format(str(fastperiod),
str(slowperiod),
str(signalperiod))
macd = name
smacd = name + '[Sign]'
hmacd = name + '[Hist]'
self.sec[macd] = dict(type=types[0], color=colors[0])
self.sec[smacd] = dict(type=types[1], color=colors[1], on=macd)
self.sec[hmacd] = dict(type=types[2], color=colors[2], on=macd)
(self.ind[macd],
self.ind[smacd],
self.ind[hmacd]) = talib.MACDEXT(self.df[self.cl].values,
fastperiod, fastmatype,
slowperiod, slowmatype,
signalperiod, signalmatype)
def add_MFI(self, timeperiod=14,
type='line', color='secondary', **kwargs):
"""Money Flow Index."""
if not (self.has_high and self.has_low and
self.has_close and self.has_volume):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'MFI({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.MFI(self.df[self.hi].values,
self.df[self.lo].values,
self.df[self.cl].values,
self.df[self.vo].values,
timeperiod)
def add_MOM(self, timeperiod=10,
type='line', color='secondary', **kwargs):
"""Momentum Indicator."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'MOM({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.MOM(self.df[self.cl].values,
timeperiod)
def add_PPO(self, fastperiod=12, slowperiod=26, matype=0,
type='line', color='secondary',
**kwargs):
"""Percent Price Oscillator."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
kwargs['type'] = kwargs['kind']
name = 'PPO({},{})'.format(str(fastperiod), str(slowperiod))
self.ind[name] = talib.PPO(self.df[self.cl].values,
fastperiod, slowperiod,
matype)
def add_ROC(self, timeperiod=10,
type='line', color='tertiary', **kwargs):
"""Rate of Change."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'ROC({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.ROC(self.df[self.cl].values,
timeperiod)
def add_ROCP(self, timeperiod=10,
type='line', color='tertiary', **kwargs):
"""Rate of Change (Percentage)."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'ROCP({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.ROCP(self.df[self.cl].values,
timeperiod)
def add_ROCR(self, timeperiod=10,
type='line', color='tertiary', **kwargs):
"""Rate of Change (Ratio)."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'ROCR({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.ROCR(self.df[self.cl].values,
timeperiod)
def add_ROCR100(self, timeperiod=10,
type='line', color='tertiary', **kwargs):
"""Rate of Change (Ratio * 100)."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'ROCR100({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.ROCR100(self.df[self.cl].values,
timeperiod)
def add_RSI(self, timeperiod=14,
type='line', color='secondary', **kwargs):
"""Relative Strength Index."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'RSI({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.RSI(self.df[self.cl].values,
timeperiod)
def add_STOCH(self, fastk_period=5, slowk_period=3,
slowk_matype=0, slowd_period=3, slowd_matype=0,
types=['line', 'line'],
colors=['primary', 'tertiary'],
**kwargs):
"""Slow Stochastic Oscillator.
Note that the first argument of types and colors refers to Slow Stoch %K,
while second argument refers to Slow Stoch %D
(signal line of %K obtained by MA).
"""
if not (self.has_high and self.has_low and self.has_close):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
kwargs['type'] = kwargs['kind']
if 'kinds' in kwargs:
types = kwargs['type']
if 'type' in kwargs:
types = [kwargs['type']] * 2
if 'color' in kwargs:
colors = [kwargs['color']] * 2
name = 'STOCH({},{},{})'.format(str(fastk_period),
str(slowk_period),
str(slowd_period))
slowk = name + r'[%k]'
slowd = name + r'[%d]'
self.sec[slowk] = dict(type=types[0], color=colors[0])
self.sec[slowd] = dict(type=types[1], color=colors[1], on=slowk)
self.ind[slowk], self.ind[slowd] = talib.STOCH(self.df[self.hi].values,
self.df[self.lo].values,
self.df[self.cl].values,
fastk_period, slowk_period,
slowk_matype, slowd_period,
slowd_matype)
def add_STOCHF(self, fastk_period=5, fastd_period=3, fastd_matype=0,
types=['line', 'line'],
colors=['primary', 'tertiary'],
**kwargs):
"""Fast Stochastic Oscillator.
Note that the first argument of types and colors refers to Fast Stoch %K,
while second argument refers to Fast Stoch %D
(signal line of %K obtained by MA).
"""
if not (self.has_high and self.has_low and self.has_close):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
kwargs['type'] = kwargs['kind']
if 'kinds' in kwargs:
types = kwargs['type']
if 'type' in kwargs:
types = [kwargs['type']] * 2
if 'color' in kwargs:
colors = [kwargs['color']] * 2
name = 'STOCHF({},{})'.format(str(fastk_period),
str(fastd_period))
fastk = name + r'[%k]'
fastd = name + r'[%d]'
self.sec[fastk] = dict(type=types[0], color=colors[0])
self.sec[fastd] = dict(type=types[1], color=colors[1], on=fastk)
self.ind[fastk], self.ind[fastd] = talib.STOCHF(self.df[self.hi].values,
self.df[self.lo].values,
self.df[self.cl].values,
fastk_period, fastd_period,
fastd_matype)
def add_STOCHRSI(self, timeperiod=14,
fastk_period=5, fastd_period=3, fastd_matype=0,
types=['line', 'line'],
colors=['primary', 'tertiary'],
**kwargs):
"""Stochastic Relative Strength Index.
Note that the first argument of types and colors refers to StochRSI %K
while second argument refers to StochRSI %D
(signal line of %K obtained by MA).
"""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
kwargs['type'] = kwargs['kind']
if 'kinds' in kwargs:
types = kwargs['type']
if 'type' in kwargs:
types = [kwargs['type']] * 2
if 'color' in kwargs:
colors = [kwargs['color']] * 2
name = 'STOCHRSI({},{},{})'.format(str(timeperiod),
str(fastk_period),
str(fastd_period))
fastk = name + r'[%k]'
fastd = name + r'[%d]'
self.sec[fastk] = dict(type=types[0], color=colors[0])
self.sec[fastd] = dict(type=types[1], color=colors[1], on=fastk)
self.ind[fastk], self.ind[fastd] = talib.STOCHRSI(self.df[self.cl].values,
timeperiod,
fastk_period,
fastd_period,
fastd_matype)
def add_TRIX(self, timeperiod=15,
type='area', color='secondary', **kwargs):
"""1-day Rate of Change of Triple Smooth EMA."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'TRIX({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.TRIX(self.df[self.cl].values,
timeperiod)
def add_ULTOSC(self, timeperiod=14, timeperiod2=14, timeperiod3=28,
type='line', color='secondary', **kwargs):
"""Ultimate Oscillator."""
if not (self.has_high and self.has_low and self.has_close):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'ULTOSC({})'.format(str(timeperiod),
str(timeperiod2),
str(timeperiod3))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.ULTOSC(self.df[self.hi].values,
self.df[self.lo].values,
self.df[self.cl].values,
timeperiod,
timeperiod2,
timeperiod3)
def add_WILLR(self, timeperiod=14,
type='line', color='secondary', **kwargs):
"""Williams %R."""
if not (self.has_high and self.has_low and self.has_close):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'WILLR({})'.format(str(timeperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.WILLR(self.df[self.hi].values,
self.df[self.lo].values,
self.df[self.cl].values,
timeperiod)
| mit |
zycdragonball/tensorflow | tensorflow/examples/learn/text_classification.py | 12 | 6651 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def estimator_spec_for_softmax_classification(
logits, labels, mode):
"""Returns EstimatorSpec instance for softmax classification."""
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def bag_of_words_model(features, labels, mode):
"""A bag-of-words model. Note it disregards the word order in the text."""
bow_column = tf.feature_column.categorical_column_with_identity(
WORDS_FEATURE, num_buckets=n_words)
bow_embedding_column = tf.feature_column.embedding_column(
bow_column, dimension=EMBEDDING_SIZE)
bow = tf.feature_column.input_layer(
features,
feature_columns=[bow_embedding_column])
logits = tf.layers.dense(bow, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def rnn_model(features, labels, mode):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for softmax
# classification over output classes.
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_transform_train = vocab_processor.fit_transform(x_train)
x_transform_test = vocab_processor.transform(x_test)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
# Subtract 1 because VocabularyProcessor outputs a word-id matrix where word
# ids start from 1 and 0 means 'no word'. But
# categorical_column_with_identity assumes 0-based count and uses -1 for
# missing word.
x_train -= 1
x_test -= 1
model_fn = bag_of_words_model
classifier = tf.estimator.Estimator(model_fn=model_fn)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
fbagirov/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/tests/indexes/test_base.py | 7 | 77312 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import pandas.util.testing as tm
from pandas.indexes.api import Index, MultiIndex
from .common import Base
from pandas.compat import (range, lrange, lzip, u,
zip, PY3, PY36)
import operator
import os
import numpy as np
from pandas import (period_range, date_range, Series,
Float64Index, Int64Index,
CategoricalIndex, DatetimeIndex, TimedeltaIndex,
PeriodIndex)
from pandas.util.testing import assert_almost_equal
from pandas.compat.numpy import np_datetime64_compat
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas as pd
from pandas.lib import Timestamp
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(unicodeIndex=tm.makeUnicodeIndex(100),
strIndex=tm.makeStringIndex(100),
dateIndex=tm.makeDateIndex(100),
periodIndex=tm.makePeriodIndex(100),
tdIndex=tm.makeTimedeltaIndex(100),
intIndex=tm.makeIntIndex(100),
rangeIndex=tm.makeIntIndex(100),
floatIndex=tm.makeFloatIndex(100),
boolIndex=Index([True, False]),
catIndex=tm.makeCategoricalIndex(100),
empty=Index([]),
tuples=MultiIndex.from_tuples(lzip(
['foo', 'bar', 'baz'], [1, 2, 3])))
self.setup_indices()
def create_index(self):
return Index(list('abcde'))
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assertIsInstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
super(TestIndex, self).test_copy_and_deepcopy()
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
tm.assert_index_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assertIsInstance(index, Index)
self.assertEqual(index.name, 'name')
tm.assert_numpy_array_equal(arr, index.values)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_construction_list_mixed_tuples(self):
# 10697
# if we are constructing from a mixed list of tuples, make sure that we
# are independent of the sorting order
idx1 = Index([('A', 1), 'B'])
self.assertIsInstance(idx1, Index) and self.assertNotInstance(
idx1, MultiIndex)
idx2 = Index(['B', ('A', 1)])
self.assertIsInstance(idx2, Index) and self.assertNotInstance(
idx2, MultiIndex)
def test_constructor_from_index_datetimetz(self):
idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
self.assertEqual(result.tz, idx.tz)
result = pd.Index(idx.asobject)
tm.assert_index_equal(result, idx)
self.assertEqual(result.tz, idx.tz)
def test_constructor_from_index_timedelta(self):
idx = pd.timedelta_range('1 days', freq='D', periods=3)
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
result = pd.Index(idx.asobject)
tm.assert_index_equal(result, idx)
def test_constructor_from_index_period(self):
idx = pd.period_range('2015-01-01', freq='D', periods=3)
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
result = pd.Index(idx.asobject)
tm.assert_index_equal(result, idx)
def test_constructor_from_series_datetimetz(self):
idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = pd.Index(pd.Series(idx))
tm.assert_index_equal(result, idx)
self.assertEqual(result.tz, idx.tz)
def test_constructor_from_series_timedelta(self):
idx = pd.timedelta_range('1 days', freq='D', periods=3)
result = pd.Index(pd.Series(idx))
tm.assert_index_equal(result, idx)
def test_constructor_from_series_period(self):
idx = pd.period_range('2015-01-01', freq='D', periods=3)
result = pd.Index(pd.Series(idx))
tm.assert_index_equal(result, idx)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
s = Series([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
result = Index(s)
self.assert_index_equal(result, expected)
result = DatetimeIndex(s)
self.assert_index_equal(result, expected)
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990',
'4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990',
'4-1-1990', '5-1-1990'], freq='MS')
self.assert_index_equal(result, expected)
df = pd.DataFrame(np.random.rand(5, 3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990',
'5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
expected.name = 'date'
self.assert_index_equal(result, expected)
self.assertEqual(df['date'].dtype, object)
exp = pd.Series(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990',
'5-1-1990'], name='date')
self.assert_series_equal(df['date'], exp)
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result, 'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5), np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assert_index_equal(result, expected)
def test_index_ctor_infer_nan_nat(self):
# GH 13467
exp = pd.Float64Index([np.nan, np.nan])
self.assertEqual(exp.dtype, np.float64)
tm.assert_index_equal(Index([np.nan, np.nan]), exp)
tm.assert_index_equal(Index(np.array([np.nan, np.nan])), exp)
exp = pd.DatetimeIndex([pd.NaT, pd.NaT])
self.assertEqual(exp.dtype, 'datetime64[ns]')
tm.assert_index_equal(Index([pd.NaT, pd.NaT]), exp)
tm.assert_index_equal(Index(np.array([pd.NaT, pd.NaT])), exp)
exp = pd.DatetimeIndex([pd.NaT, pd.NaT])
self.assertEqual(exp.dtype, 'datetime64[ns]')
for data in [[pd.NaT, np.nan], [np.nan, pd.NaT],
[np.nan, np.datetime64('nat')],
[np.datetime64('nat'), np.nan]]:
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
exp = pd.TimedeltaIndex([pd.NaT, pd.NaT])
self.assertEqual(exp.dtype, 'timedelta64[ns]')
for data in [[np.nan, np.timedelta64('nat')],
[np.timedelta64('nat'), np.nan],
[pd.NaT, np.timedelta64('nat')],
[np.timedelta64('nat'), pd.NaT]]:
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
# mixed np.datetime64/timedelta64 nat results in object
data = [np.datetime64('nat'), np.timedelta64('nat')]
exp = pd.Index(data, dtype=object)
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
data = [np.timedelta64('nat'), np.datetime64('nat')]
exp = pd.Index(data, dtype=object)
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
tm.assert_index_equal(rs, xp)
tm.assertIsInstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assert_index_equal(result, idx)
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assert_index_equal(result, idx)
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assert_index_equal(result, idx)
def test_constructor_dtypes(self):
for idx in [Index(np.array([1, 2, 3], dtype=int)),
Index(np.array([1, 2, 3], dtype=int), dtype=int),
Index([1, 2, 3], dtype=int)]:
self.assertIsInstance(idx, Int64Index)
# these should coerce
for idx in [Index(np.array([1., 2., 3.], dtype=float), dtype=int),
Index([1., 2., 3.], dtype=int)]:
self.assertIsInstance(idx, Int64Index)
for idx in [Index(np.array([1., 2., 3.], dtype=float)),
Index(np.array([1, 2, 3], dtype=int), dtype=float),
Index(np.array([1., 2., 3.], dtype=float), dtype=float),
Index([1, 2, 3], dtype=float),
Index([1., 2., 3.], dtype=float)]:
self.assertIsInstance(idx, Float64Index)
for idx in [Index(np.array([True, False, True], dtype=bool)),
Index([True, False, True]),
Index(np.array([True, False, True], dtype=bool),
dtype=bool),
Index([True, False, True], dtype=bool)]:
self.assertIsInstance(idx, Index)
self.assertEqual(idx.dtype, object)
for idx in [Index(np.array([1, 2, 3], dtype=int), dtype='category'),
Index([1, 2, 3], dtype='category'),
Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')]),
dtype='category'),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)],
dtype='category')]:
self.assertIsInstance(idx, CategoricalIndex)
for idx in [Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')])),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])]:
self.assertIsInstance(idx, DatetimeIndex)
for idx in [Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')]),
dtype=object),
Index([datetime(2011, 1, 1),
datetime(2011, 1, 2)], dtype=object)]:
self.assertNotIsInstance(idx, DatetimeIndex)
self.assertIsInstance(idx, Index)
self.assertEqual(idx.dtype, object)
for idx in [Index(np.array([np.timedelta64(1, 'D'), np.timedelta64(
1, 'D')])), Index([timedelta(1), timedelta(1)])]:
self.assertIsInstance(idx, TimedeltaIndex)
for idx in [Index(np.array([np.timedelta64(1, 'D'),
np.timedelta64(1, 'D')]), dtype=object),
Index([timedelta(1), timedelta(1)], dtype=object)]:
self.assertNotIsInstance(idx, TimedeltaIndex)
self.assertIsInstance(idx, Index)
self.assertEqual(idx.dtype, object)
def test_constructor_dtypes_datetime(self):
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.date_range('2011-01-01', periods=5, tz=tz)
dtype = idx.dtype
# pass values without timezone, as DatetimeIndex localizes it
for values in [pd.date_range('2011-01-01', periods=5).values,
pd.date_range('2011-01-01', periods=5).asi8]:
for res in [pd.Index(values, tz=tz),
pd.Index(values, dtype=dtype),
pd.Index(list(values), tz=tz),
pd.Index(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
# check compat with DatetimeIndex
for res in [pd.DatetimeIndex(values, tz=tz),
pd.DatetimeIndex(values, dtype=dtype),
pd.DatetimeIndex(list(values), tz=tz),
pd.DatetimeIndex(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
def test_constructor_dtypes_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
dtype = idx.dtype
for values in [idx.values, idx.asi8]:
for res in [pd.Index(values, dtype=dtype),
pd.Index(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
# check compat with TimedeltaIndex
for res in [pd.TimedeltaIndex(values, dtype=dtype),
pd.TimedeltaIndex(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
def test_view_with_args(self):
restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex',
'empty']
for i in restricted:
ind = self.indices[i]
# with arguments
self.assertRaises(TypeError, lambda: ind.view('i8'))
# these are ok
for i in list(set(self.indices.keys()) - set(restricted)):
ind = self.indices[i]
# with arguments
ind.view('i8')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth, 's1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth, 's2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_equals_object(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
# test 0th element
self.assert_index_equal(Index(['a', 'b', 'c', 'd']),
result.insert(0, 'a'))
# test Nth element that follows Python list behavior
self.assert_index_equal(Index(['b', 'c', 'e', 'd']),
result.insert(-1, 'e'))
# test loc +/- neq (0, -1)
self.assert_index_equal(result.insert(1, 'z'), result.insert(-2, 'z'))
# test empty
null_index = Index([])
self.assert_index_equal(Index(['a']), null_index.insert(0, 'a'))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertEqual(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_pydatetime()
tm.assertIsInstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-02-28')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
self.assertFalse(isinstance(result, Index))
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
# self.assertEqual(first_value,
# x['2013-01-01 00:00:00.000000050+0000'])
exp_ts = np_datetime64_compat('2013-01-01 00:00:00.000000050+0000',
'ns')
self.assertEqual(first_value, x[Timestamp(exp_ts)])
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assert_index_equal(result2, expected2)
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assert_index_equal(result3, expected3)
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A', 'B', 'A', 'C'])
idx2 = Index(['B', 'D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assert_index_equal(result, expected)
# preserve names
first = self.strIndex[5:20]
second = self.strIndex[:10]
first.name = 'A'
second.name = 'A'
intersect = first.intersection(second)
self.assertEqual(intersect.name, 'A')
second.name = 'B'
intersect = first.intersection(second)
self.assertIsNone(intersect.name)
first.name = None
second.name = 'B'
intersect = first.intersection(second)
self.assertIsNone(intersect.name)
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
self.assertTrue(tm.equalContents(result, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# preserve names
first = Index(list('ab'), name='A')
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name=None)
tm.assert_index_equal(union, expected)
first = Index(list('ab'), name='A')
second = Index([], name='B')
union = first.union(second)
expected = Index(list('ab'), name=None)
tm.assert_index_equal(union, expected)
first = Index([], name='A')
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name=None)
tm.assert_index_equal(union, expected)
first = Index(list('ab'))
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name='B')
tm.assert_index_equal(union, expected)
first = Index([])
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name='B')
tm.assert_index_equal(union, expected)
first = Index(list('ab'))
second = Index([], name='B')
union = first.union(second)
expected = Index(list('ab'), name='B')
tm.assert_index_equal(union, expected)
first = Index(list('ab'), name='A')
second = Index(list('ab'))
union = first.union(second)
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
first = Index(list('ab'), name='A')
second = Index([])
union = first.union(second)
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
first = Index([], name='A')
second = Index(list('ab'))
union = first.union(second)
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
with tm.assert_produces_warning(RuntimeWarning):
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_add(self):
idx = self.strIndex
expected = Index(self.strIndex.values * 2)
self.assert_index_equal(idx + idx, expected)
self.assert_index_equal(idx + idx.tolist(), expected)
self.assert_index_equal(idx.tolist() + idx, expected)
# test add and radd
idx = Index(list('abc'))
expected = Index(['a1', 'b1', 'c1'])
self.assert_index_equal(idx + '1', expected)
expected = Index(['1a', '1b', '1c'])
self.assert_index_equal('1' + idx, expected)
def test_sub(self):
idx = self.strIndex
self.assertRaises(TypeError, lambda: idx - 'a')
self.assertRaises(TypeError, lambda: idx - idx)
self.assertRaises(TypeError, lambda: idx - idx.tolist())
self.assertRaises(TypeError, lambda: idx.tolist() - idx)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assert_index_equal(result, index)
# empty
result = index.append([])
self.assert_index_equal(result, index)
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
def test_symmetric_difference(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.symmetric_difference(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.symmetric_difference(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH 13514 change: {nan} - {nan} == {}
# (GH 6444, sorting of nans, is no longer an issue)
idx1 = Index([1, np.nan, 2, 3])
idx2 = Index([0, 1, np.nan])
idx3 = Index([0, 1])
result = idx1.symmetric_difference(idx2)
expected = Index([0.0, 2.0, 3.0])
tm.assert_index_equal(result, expected)
result = idx1.symmetric_difference(idx3)
expected = Index([0.0, 2.0, 3.0, np.nan])
tm.assert_index_equal(result, expected)
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.symmetric_difference(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.symmetric_difference(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
self.assertFalse(self.catIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.catIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
# GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
# include us since the default for Timestamp shows these but Index
# formating does not we are skipping)
now = datetime.now()
if not str(now).endswith("000"):
index = Index([now])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0 + 3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0 + 3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_logical_compat(self):
idx = self.create_index()
self.assertEqual(idx.all(), idx.values.all())
self.assertEqual(idx.any(), idx.values.any())
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
method(self.catIndex)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
def test_get_indexer_invalid(self):
# GH10411
idx = Index(np.arange(10))
with tm.assertRaisesRegexp(ValueError, 'tolerance argument'):
idx.get_indexer([1, 0], tolerance=1)
with tm.assertRaisesRegexp(ValueError, 'limit argument'):
idx.get_indexer([1, 0], limit=1)
def test_get_indexer_nearest(self):
idx = Index(np.arange(10))
all_methods = ['pad', 'backfill', 'nearest']
for method in all_methods:
actual = idx.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([0, 5, 9],
dtype=np.intp))
actual = idx.get_indexer([0, 5, 9], method=method, tolerance=0)
tm.assert_numpy_array_equal(actual, np.array([0, 5, 9],
dtype=np.intp))
for method, expected in zip(all_methods, [[0, 1, 8], [1, 2, 9],
[0, 2, 9]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method,
tolerance=1)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
for method, expected in zip(all_methods, [[0, -1, -1], [-1, 2, -1],
[0, 2, -1]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method,
tolerance=0.2)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
with tm.assertRaisesRegexp(ValueError, 'limit argument'):
idx.get_indexer([1, 0], method='nearest', limit=1)
def test_get_indexer_nearest_decreasing(self):
idx = Index(np.arange(10))[::-1]
all_methods = ['pad', 'backfill', 'nearest']
for method in all_methods:
actual = idx.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([9, 4, 0],
dtype=np.intp))
for method, expected in zip(all_methods, [[8, 7, 0], [9, 8, 1],
[9, 7, 0]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
def test_get_indexer_strings(self):
idx = pd.Index(['b', 'c'])
actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='pad')
expected = np.array([-1, 0, 1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(actual, expected)
actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='backfill')
expected = np.array([0, 0, 1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(actual, expected)
with tm.assertRaises(TypeError):
idx.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
with tm.assertRaises(TypeError):
idx.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
def test_get_loc(self):
idx = pd.Index([0, 1, 2])
all_methods = [None, 'pad', 'backfill', 'nearest']
for method in all_methods:
self.assertEqual(idx.get_loc(1, method=method), 1)
if method is not None:
self.assertEqual(idx.get_loc(1, method=method, tolerance=0), 1)
with tm.assertRaises(TypeError):
idx.get_loc([1, 2], method=method)
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
self.assertEqual(idx.get_loc(1.1, method), loc)
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
self.assertEqual(idx.get_loc(1.1, method, tolerance=1), loc)
for method in ['pad', 'backfill', 'nearest']:
with tm.assertRaises(KeyError):
idx.get_loc(1.1, method, tolerance=0.05)
with tm.assertRaisesRegexp(ValueError, 'must be numeric'):
idx.get_loc(1.1, 'nearest', tolerance='invalid')
with tm.assertRaisesRegexp(ValueError, 'tolerance .* valid if'):
idx.get_loc(1.1, tolerance=1)
idx = pd.Index(['a', 'c'])
with tm.assertRaises(TypeError):
idx.get_loc('a', method='nearest')
with tm.assertRaises(TypeError):
idx.get_loc('a', method='pad', tolerance='invalid')
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
# reversed
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
# float slicing
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=float))
n = len(idx)
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
# int slicing with floats
# GH 4892, these are all TypeErrors
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=int))
self.assertRaises(TypeError,
lambda: idx.slice_locs(5.0, 10.0), (3, n))
self.assertRaises(TypeError,
lambda: idx.slice_locs(4.5, 10.5), (3, 8))
idx2 = idx[::-1]
self.assertRaises(TypeError,
lambda: idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertRaises(TypeError,
lambda: idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([0, np.nan, np.nan, 1, 2])
self.assertEqual(idx.slice_locs(np.nan), (1, 5))
def test_slice_locs_negative_step(self):
idx = Index(list('bcdxy'))
SLC = pd.IndexSlice
def check_slice(in_slice, expected):
s_start, s_stop = idx.slice_locs(in_slice.start, in_slice.stop,
in_slice.step)
result = idx[s_start:s_stop:in_slice.step]
expected = pd.Index(list(expected))
self.assert_index_equal(result, expected)
for in_slice, expected in [
(SLC[::-1], 'yxdcb'), (SLC['b':'y':-1], ''),
(SLC['b'::-1], 'b'), (SLC[:'b':-1], 'yxdcb'),
(SLC[:'y':-1], 'y'), (SLC['y'::-1], 'yxdcb'),
(SLC['y'::-4], 'yb'),
# absent labels
(SLC[:'a':-1], 'yxdcb'), (SLC[:'a':-2], 'ydb'),
(SLC['z'::-1], 'yxdcb'), (SLC['z'::-3], 'yc'),
(SLC['m'::-1], 'dcb'), (SLC[:'m':-1], 'yx'),
(SLC['a':'a':-1], ''), (SLC['z':'z':-1], ''),
(SLC['m':'m':-1], '')
]:
check_slice(in_slice, expected)
def test_drop(self):
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
dropped = self.strIndex.drop(drop)
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assert_index_equal(dropped, expected)
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
self.assertRaises(ValueError, self.strIndex.drop, ['1', 'bar'])
# errors='ignore'
mixed = drop.tolist() + ['foo']
dropped = self.strIndex.drop(mixed, errors='ignore')
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore')
expected = self.strIndex[lrange(n)]
self.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assert_index_equal(dropped, expected)
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assert_index_equal(dropped, expected)
# errors='ignore'
self.assertRaises(ValueError, ser.drop, [3, 4])
dropped = ser.drop(4, errors='ignore')
expected = Index([1, 2, 3])
self.assert_index_equal(dropped, expected)
dropped = ser.drop([3, 4, 5], errors='ignore')
expected = Index([1, 2])
self.assert_index_equal(dropped, expected)
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'),
(2, 'B'), (1, 'C'), (2, 'C')],
dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assert_index_equal(int_idx, expected)
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assert_index_equal(union_idx, expected)
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date), values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# set
result = idx.isin(set(values))
tm.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([np.nan]),
np.array([False, True]))
tm.assert_numpy_array_equal(Index(['a', pd.NaT]).isin([pd.NaT]),
np.array([False, True]))
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]),
np.array([False, False]))
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([pd.NaT]),
np.array([False, False]))
# Float64Index overrides isin, so must be checked separately
tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([np.nan]),
np.array([False, True]))
tm.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]),
np.array([False, True]))
tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([pd.NaT]),
np.array([False, True]))
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, idx.isin(values, level=0))
tm.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
tm.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
tm.assert_numpy_array_equal(res, np.array(
[True, True, True, True], dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assert_index_equal(result, self.strIndex)
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_str_attribute(self):
# GH9068
methods = ['strip', 'rstrip', 'lstrip']
idx = Index([' jack', 'jill ', ' jesse ', 'frank'])
for method in methods:
expected = Index([getattr(str, method)(x) for x in idx.values])
tm.assert_index_equal(
getattr(Index.str, method)(idx.str), expected)
# create a few instances that are not able to use .str accessor
indices = [Index(range(5)), tm.makeDateIndex(10),
MultiIndex.from_tuples([('foo', '1'), ('bar', '3')]),
PeriodIndex(start='2000', end='2010', freq='A')]
for idx in indices:
with self.assertRaisesRegexp(AttributeError,
'only use .str accessor'):
idx.str.repeat(2)
idx = Index(['a b c', 'd e', 'f'])
expected = Index([['a', 'b', 'c'], ['d', 'e'], ['f']])
tm.assert_index_equal(idx.str.split(), expected)
tm.assert_index_equal(idx.str.split(expand=False), expected)
expected = MultiIndex.from_tuples([('a', 'b', 'c'), ('d', 'e', np.nan),
('f', np.nan, np.nan)])
tm.assert_index_equal(idx.str.split(expand=True), expected)
# test boolean case, should return np.array instead of boolean Index
idx = Index(['a1', 'a2', 'b1', 'b2'])
expected = np.array([True, True, False, False])
tm.assert_numpy_array_equal(idx.str.startswith('a'), expected)
self.assertIsInstance(idx.str.startswith('a'), np.ndarray)
s = Series(range(4), index=idx)
expected = Series(range(2), index=['a1', 'a2'])
tm.assert_series_equal(s[s.index.str.startswith('a')], expected)
def test_tab_completion(self):
# GH 9910
idx = Index(list('abcd'))
self.assertTrue('str' in dir(idx))
idx = Index(range(4))
self.assertTrue('str' not in dir(idx))
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0, 1]].identical(pd.Index(
[1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
with tm.assert_produces_warning(RuntimeWarning):
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_take_fill_value(self):
# GH 12631
idx = pd.Index(list('ABC'), name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.Index(list('BAC'), name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.Index(['B', 'A', np.nan], name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.Index(['B', 'A', 'C'], name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with tm.assertRaises(IndexError):
idx.take(np.array([1, -5]))
def test_reshape_raise(self):
msg = "reshaping is not supported"
idx = pd.Index([0, 1, 2])
tm.assertRaisesRegexp(NotImplementedError, msg,
idx.reshape, idx.shape)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex(
[pd.Int64Index([]), pd.Float64Index([])], [[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
def test_groupby(self):
idx = Index(range(5))
groups = idx.groupby(np.array([1, 1, 2, 2, 2]))
exp = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])}
tm.assert_dict_equal(groups, exp)
def test_equals_op_multiindex(self):
# GH9785
# test comparisons of multiindex
from pandas.compat import StringIO
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
tm.assert_numpy_array_equal(df.index == df.index,
np.array([True, True]))
mi1 = MultiIndex.from_tuples([(1, 2), (4, 5)])
tm.assert_numpy_array_equal(df.index == mi1, np.array([True, True]))
mi2 = MultiIndex.from_tuples([(1, 2), (4, 6)])
tm.assert_numpy_array_equal(df.index == mi2, np.array([True, False]))
mi3 = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
df.index == mi3
index_a = Index(['foo', 'bar', 'baz'])
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
df.index == index_a
tm.assert_numpy_array_equal(index_a == mi3,
np.array([False, False, False]))
def test_conversion_preserves_name(self):
# GH 10875
i = pd.Index(['01:02:03', '01:02:04'], name='label')
self.assertEqual(i.name, pd.to_datetime(i).name)
self.assertEqual(i.name, pd.to_timedelta(i).name)
def test_string_index_repr(self):
# py3/py2 repr can differ because of "u" prefix
# which also affects to displayed element size
# suppress flake8 warnings
if PY3:
coerce = lambda x: x
else:
coerce = unicode
# short
idx = pd.Index(['a', 'bb', 'ccc'])
if PY3:
expected = u"""Index(['a', 'bb', 'ccc'], dtype='object')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""Index([u'a', u'bb', u'ccc'], dtype='object')"""
self.assertEqual(coerce(idx), expected)
# multiple lines
idx = pd.Index(['a', 'bb', 'ccc'] * 10)
if PY3:
expected = u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object')"""
self.assertEqual(coerce(idx), expected)
# truncated
idx = pd.Index(['a', 'bb', 'ccc'] * 100)
if PY3:
expected = u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object', length=300)"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object', length=300)"""
self.assertEqual(coerce(idx), expected)
# short
idx = pd.Index([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""Index(['あ', 'いい', 'ううう'], dtype='object')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""Index([u'あ', u'いい', u'ううう'], dtype='object')"""
self.assertEqual(coerce(idx), expected)
# multiple lines
idx = pd.Index([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう'],\n"
u" dtype='object')")
self.assertEqual(repr(idx), expected)
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")
self.assertEqual(coerce(idx), expected)
# truncated
idx = pd.Index([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう'],\n"
u" dtype='object', length=300)")
self.assertEqual(repr(idx), expected)
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object', length=300)")
self.assertEqual(coerce(idx), expected)
# Emable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
# short
idx = pd.Index([u'あ', u'いい', u'ううう'])
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう'], "
u"dtype='object')")
self.assertEqual(repr(idx), expected)
else:
expected = (u"Index([u'あ', u'いい', u'ううう'], "
u"dtype='object')")
self.assertEqual(coerce(idx), expected)
# multiple lines
idx = pd.Index([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう'],\n"
u" dtype='object')""")
self.assertEqual(repr(idx), expected)
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")
self.assertEqual(coerce(idx), expected)
# truncated
idx = pd.Index([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', "
u"'いい', 'ううう', 'あ', 'いい',\n"
u" 'ううう'],\n"
u" dtype='object', length=300)")
self.assertEqual(repr(idx), expected)
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう'],\n"
u" dtype='object', length=300)")
self.assertEqual(coerce(idx), expected)
class TestMixedIntIndex(Base, tm.TestCase):
# Mostly the tests from common.py for which the results differ
# in py2 and py3 because ints and strings are uncomparable in py3
# (GH 13514)
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(mixedIndex=Index([0, 'a', 1, 'b', 2, 'c']))
self.setup_indices()
def create_index(self):
return self.mixedIndex
def test_order(self):
idx = self.create_index()
# 9816 deprecated
if PY36:
with tm.assertRaisesRegexp(TypeError, "'>' not supported "
"between instances of 'str' and 'int'"):
with tm.assert_produces_warning(FutureWarning):
idx.order()
elif PY3:
with tm.assertRaisesRegexp(TypeError, "unorderable types"):
with tm.assert_produces_warning(FutureWarning):
idx.order()
else:
with tm.assert_produces_warning(FutureWarning):
idx.order()
def test_argsort(self):
idx = self.create_index()
if PY36:
with tm.assertRaisesRegexp(TypeError, "'>' not supported "
"between instances of 'str' and 'int'"):
result = idx.argsort()
elif PY3:
with tm.assertRaisesRegexp(TypeError, "unorderable types"):
result = idx.argsort()
else:
result = idx.argsort()
expected = np.array(idx).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
idx = self.create_index()
if PY36:
with tm.assertRaisesRegexp(TypeError, "'>' not supported "
"between instances of 'str' and 'int'"):
result = np.argsort(idx)
elif PY3:
with tm.assertRaisesRegexp(TypeError, "unorderable types"):
result = np.argsort(idx)
else:
result = np.argsort(idx)
expected = idx.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_copy_name(self):
# Check that "name" argument passed at initialization is honoured
# GH12309
idx = self.create_index()
first = idx.__class__(idx, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
self.assertIsNot(first, second)
# Not using tm.assert_index_equal() since names differ:
self.assertTrue(idx.equals(first))
self.assertEqual(first.name, 'mario')
self.assertEqual(second.name, 'mario')
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if PY3:
with tm.assert_produces_warning(RuntimeWarning):
# unorderable types
s3 = s1 * s2
else:
s3 = s1 * s2
self.assertEqual(s3.index.name, 'mario')
def test_copy_name2(self):
# Check that adding a "name" parameter to the copy is honored
# GH14302
idx = pd.Index([1, 2], name='MyName')
idx1 = idx.copy()
self.assertTrue(idx.equals(idx1))
self.assertEqual(idx.name, 'MyName')
self.assertEqual(idx1.name, 'MyName')
idx2 = idx.copy(name='NewName')
self.assertTrue(idx.equals(idx2))
self.assertEqual(idx.name, 'MyName')
self.assertEqual(idx2.name, 'NewName')
idx3 = idx.copy(names=['NewName'])
self.assertTrue(idx.equals(idx3))
self.assertEqual(idx.name, 'MyName')
self.assertEqual(idx.names, ['MyName'])
self.assertEqual(idx3.name, 'NewName')
self.assertEqual(idx3.names, ['NewName'])
def test_union_base(self):
idx = self.create_index()
first = idx[3:]
second = idx[:5]
if PY3:
with tm.assert_produces_warning(RuntimeWarning):
# unorderable types
result = first.union(second)
expected = Index(['b', 2, 'c', 0, 'a', 1])
self.assert_index_equal(result, expected)
else:
result = first.union(second)
expected = Index(['b', 2, 'c', 0, 'a', 1])
self.assert_index_equal(result, expected)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if PY3:
with tm.assert_produces_warning(RuntimeWarning):
# unorderable types
result = first.union(case)
self.assertTrue(tm.equalContents(result, idx))
else:
result = first.union(case)
self.assertTrue(tm.equalContents(result, idx))
def test_intersection_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
idx = self.create_index()
first = idx[:5]
second = idx[:3]
result = first.intersection(second)
expected = Index([0, 'a', 1])
self.assert_index_equal(result, expected)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
self.assertTrue(tm.equalContents(result, second))
def test_difference_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
idx = self.create_index()
first = idx[:4]
second = idx[3:]
result = first.difference(second)
expected = Index([0, 1, 'a'])
self.assert_index_equal(result, expected)
def test_symmetric_difference(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
idx = self.create_index()
first = idx[:4]
second = idx[3:]
result = first.symmetric_difference(second)
expected = Index([0, 1, 2, 'a', 'c'])
self.assert_index_equal(result, expected)
def test_logical_compat(self):
idx = self.create_index()
self.assertEqual(idx.all(), idx.values.all())
self.assertEqual(idx.any(), idx.values.any())
def test_dropna(self):
# GH 6194
for dtype in [None, object, 'category']:
idx = pd.Index([1, 2, 3], dtype=dtype)
tm.assert_index_equal(idx.dropna(), idx)
idx = pd.Index([1., 2., 3.], dtype=dtype)
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.Index([1., 2., np.nan, 3.], dtype=dtype)
tm.assert_index_equal(nanidx.dropna(), idx)
idx = pd.Index(['A', 'B', 'C'], dtype=dtype)
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.Index(['A', np.nan, 'B', 'C'], dtype=dtype)
tm.assert_index_equal(nanidx.dropna(), idx)
tm.assert_index_equal(nanidx.dropna(how='any'), idx)
tm.assert_index_equal(nanidx.dropna(how='all'), idx)
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03', pd.NaT])
tm.assert_index_equal(nanidx.dropna(), idx)
idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days'])
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.TimedeltaIndex([pd.NaT, '1 days', '2 days',
'3 days', pd.NaT])
tm.assert_index_equal(nanidx.dropna(), idx)
idx = pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M')
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'],
freq='M')
tm.assert_index_equal(nanidx.dropna(), idx)
msg = "invalid how option: xxx"
with tm.assertRaisesRegexp(ValueError, msg):
pd.Index([1, 2, 3]).dropna(how='xxx')
def test_get_combined_index():
from pandas.core.index import _get_combined_index
result = _get_combined_index([])
tm.assert_index_equal(result, Index([]))
| apache-2.0 |
jigargandhi/UdemyMachineLearning | Machine Learning A-Z Template Folder/Part 4 - Clustering/Section 25 - Hierarchical Clustering/j_hc.py | 1 | 1785 | # -*- coding: utf-8 -*-
#agglomerative bottom up approach
#divisive = top down approach
# Agglomerative
# Step 1 - Each Point as 1 cluster
# Step 2 - Each closest point as 1 cluster
# Step 3 - Each two cluster combine
# Step 4 Repeat step 3 till only one cluster is left
# Closesness of clusters can be measured by
# a. Euclidean distance of centroids
# b. Closest Point
# c. Farthest Point
# d. Avergate distance between all the points
# HC holds memory in dendogram
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataset= pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:,[3,4]].values
# using dendogram to find optimal number of cluster
#ward is a method of distance
import scipy.cluster.hierarchy as sch
dendogram = sch.dendrogram(sch.linkage(X, method = 'ward'))
plt.title('dendrogram')
plt.xlabel('Customers')
plt.ylabel('Euclidean distance')
plt.show()
#fitting hierarchical clustering to the mall dataset
from sklearn.cluster import AgglomerativeClustering
cluster = AgglomerativeClustering(n_clusters = 5, affinity='euclidean', linkage ='ward')
y_hc = cluster.fit_predict(X)
# Only 2d visualization can be done here
plt.scatter(X[y_hc==0, 0],X[y_hc==0, 1], s = 100, color='red', label ='Careful')
plt.scatter(X[y_hc==1, 0],X[y_hc==1, 1], s = 100, color='blue', label ='Cluster 2')
plt.scatter(X[y_hc==2, 0],X[y_hc==2, 1], s = 100, color='green', label ='Cluster 3')
plt.scatter(X[y_hc==3, 0],X[y_hc==3, 1], s = 100, color='cyan', label ='Cluster 4')
plt.scatter(X[y_hc==4, 0],X[y_hc==4, 1], s = 100, color='magenta', label ='Cluster 5')
#plt.scatter(cluster..cluster_centers_[:,0],kmeans.cluster_centers_[:,1], s= 300, color='yellow', label='centroids')
plt.xlabel('Annual Income')
plt.ylabel('Spending Score')
plt.legend()
plt.show()
| mit |
harisbal/pandas | pandas/tests/groupby/test_transform.py | 3 | 28075 | """ test with the .transform """
import pytest
import numpy as np
import pandas as pd
from pandas.util import testing as tm
from pandas import Series, DataFrame, Timestamp, MultiIndex, concat, date_range
from pandas.core.dtypes.common import (
ensure_platform_int, is_timedelta64_dtype)
from pandas.compat import StringIO
from pandas._libs import groupby
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.groupby.groupby import DataError
from pandas.core.config import option_context
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
def test_transform():
data = Series(np.arange(9) // 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
# GH 8046
# make sure that we preserve the input order
df = DataFrame(
np.arange(6, dtype='int64').reshape(
3, 2), columns=["a", "b"], index=[0, 2, 1])
key = [0, 0, 1]
expected = df.sort_index().groupby(key).transform(
lambda x: x - x.mean()).groupby(key).mean()
result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(
key).mean()
assert_frame_equal(result, expected)
def demean(arr):
return arr - arr.mean()
people = DataFrame(np.random.randn(5, 5),
columns=['a', 'b', 'c', 'd', 'e'],
index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])
key = ['one', 'two', 'one', 'two', 'one']
result = people.groupby(key).transform(demean).groupby(key).mean()
expected = people.groupby(key).apply(demean).groupby(key).mean()
assert_frame_equal(result, expected)
# GH 8430
df = tm.makeTimeDataFrame()
g = df.groupby(pd.Grouper(freq='M'))
g.transform(lambda x: x - 1)
# GH 9700
df = DataFrame({'a': range(5, 10), 'b': range(5)})
result = df.groupby('a').transform(max)
expected = DataFrame({'b': range(5)})
tm.assert_frame_equal(result, expected)
def test_transform_fast():
df = DataFrame({'id': np.arange(100000) / 3,
'val': np.random.randn(100000)})
grp = df.groupby('id')['val']
values = np.repeat(grp.mean().values,
ensure_platform_int(grp.count().values))
expected = pd.Series(values, index=df.index, name='val')
result = grp.transform(np.mean)
assert_series_equal(result, expected)
result = grp.transform('mean')
assert_series_equal(result, expected)
# GH 12737
df = pd.DataFrame({'grouping': [0, 1, 1, 3], 'f': [1.1, 2.1, 3.1, 4.5],
'd': pd.date_range('2014-1-1', '2014-1-4'),
'i': [1, 2, 3, 4]},
columns=['grouping', 'f', 'i', 'd'])
result = df.groupby('grouping').transform('first')
dates = [pd.Timestamp('2014-1-1'), pd.Timestamp('2014-1-2'),
pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-4')]
expected = pd.DataFrame({'f': [1.1, 2.1, 2.1, 4.5],
'd': dates,
'i': [1, 2, 2, 4]},
columns=['f', 'i', 'd'])
assert_frame_equal(result, expected)
# selection
result = df.groupby('grouping')[['f', 'i']].transform('first')
expected = expected[['f', 'i']]
assert_frame_equal(result, expected)
# dup columns
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['g', 'a', 'a'])
result = df.groupby('g').transform('first')
expected = df.drop('g', axis=1)
assert_frame_equal(result, expected)
def test_transform_broadcast(tsframe, ts):
grouped = ts.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, ts.index)
for _, gp in grouped:
assert_fp_equal(result.reindex(gp.index), gp.mean())
grouped = tsframe.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
for _, gp in grouped:
agged = gp.mean()
res = result.reindex(gp.index)
for col in tsframe:
assert_fp_equal(res[col], agged[col])
# group columns
grouped = tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
axis=1)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
tm.assert_index_equal(result.columns, tsframe.columns)
for _, gp in grouped:
agged = gp.mean(1)
res = result.reindex(columns=gp.columns)
for idx in gp.index:
assert_fp_equal(res.xs(idx), agged[idx])
def test_transform_axis(tsframe):
# make sure that we are setting the axes
# correctly when on axis=0 or 1
# in the presence of a non-monotonic indexer
# GH12713
base = tsframe.iloc[0:5]
r = len(base.index)
c = len(base.columns)
tso = DataFrame(np.random.randn(r, c),
index=base.index,
columns=base.columns,
dtype='float64')
# monotonic
ts = tso
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
# non-monotonic
ts = tso.iloc[[1, 0] + list(range(2, len(base)))]
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
def test_transform_dtype():
# GH 9807
# Check transform dtype output is preserved
df = DataFrame([[1, 3], [2, 3]])
result = df.groupby(1).transform('mean')
expected = DataFrame([[1.5], [1.5]])
assert_frame_equal(result, expected)
def test_transform_bug():
# GH 5712
# transforming on a datetime column
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
result = df.groupby('A')['B'].transform(
lambda x: x.rank(ascending=False))
expected = Series(np.arange(5, 0, step=-1), name='B')
assert_series_equal(result, expected)
def test_transform_numeric_to_boolean():
# GH 16875
# inconsistency in transforming boolean values
expected = pd.Series([True, True], name='A')
df = pd.DataFrame({'A': [1.1, 2.2], 'B': [1, 2]})
result = df.groupby('B').A.transform(lambda x: True)
assert_series_equal(result, expected)
df = pd.DataFrame({'A': [1, 2], 'B': [1, 2]})
result = df.groupby('B').A.transform(lambda x: True)
assert_series_equal(result, expected)
def test_transform_datetime_to_timedelta():
# GH 15429
# transforming a datetime to timedelta
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
expected = pd.Series([
Timestamp('20130101') - Timestamp('20130101')] * 5, name='A')
# this does date math without changing result type in transform
base_time = df['A'][0]
result = df.groupby('A')['A'].transform(
lambda x: x.max() - x.min() + base_time) - base_time
assert_series_equal(result, expected)
# this does date math and causes the transform to return timedelta
result = df.groupby('A')['A'].transform(lambda x: x.max() - x.min())
assert_series_equal(result, expected)
def test_transform_datetime_to_numeric():
# GH 10972
# convert dt to float
df = DataFrame({
'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
result = df.groupby('a').b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.mean())
expected = Series([-0.5, 0.5], name='b')
assert_series_equal(result, expected)
# convert dt to int
df = DataFrame({
'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
result = df.groupby('a').b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.min())
expected = Series([0, 1], name='b')
assert_series_equal(result, expected)
def test_transform_casting():
# 13046
data = """
idx A ID3 DATETIME
0 B-028 b76cd912ff "2014-10-08 13:43:27"
1 B-054 4a57ed0b02 "2014-10-08 14:26:19"
2 B-076 1a682034f8 "2014-10-08 14:29:01"
3 B-023 b76cd912ff "2014-10-08 18:39:34"
4 B-023 f88g8d7sds "2014-10-08 18:40:18"
5 B-033 b76cd912ff "2014-10-08 18:44:30"
6 B-032 b76cd912ff "2014-10-08 18:46:00"
7 B-037 b76cd912ff "2014-10-08 18:52:15"
8 B-046 db959faf02 "2014-10-08 18:59:59"
9 B-053 b76cd912ff "2014-10-08 19:17:48"
10 B-065 b76cd912ff "2014-10-08 19:21:38"
"""
df = pd.read_csv(StringIO(data), sep=r'\s+',
index_col=[0], parse_dates=['DATETIME'])
result = df.groupby('ID3')['DATETIME'].transform(lambda x: x.diff())
assert is_timedelta64_dtype(result.dtype)
result = df[['ID3', 'DATETIME']].groupby('ID3').transform(
lambda x: x.diff())
assert is_timedelta64_dtype(result.DATETIME.dtype)
def test_transform_multiple(ts):
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
grouped.transform(lambda x: x * 2)
grouped.transform(np.mean)
def test_dispatch_transform(tsframe):
df = tsframe[::5].reindex(tsframe.index)
grouped = df.groupby(lambda x: x.month)
filled = grouped.fillna(method='pad')
fillit = lambda x: x.fillna(method='pad')
expected = df.groupby(lambda x: x.month).transform(fillit)
assert_frame_equal(filled, expected)
def test_transform_select_columns(df):
f = lambda x: x.mean()
result = df.groupby('A')['C', 'D'].transform(f)
selection = df[['C', 'D']]
expected = selection.groupby(df['A']).transform(f)
assert_frame_equal(result, expected)
def test_transform_exclude_nuisance(df):
# this also tests orderings in transform between
# series/frame to make sure it's consistent
expected = {}
grouped = df.groupby('A')
expected['C'] = grouped['C'].transform(np.mean)
expected['D'] = grouped['D'].transform(np.mean)
expected = DataFrame(expected)
result = df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
def test_transform_function_aliases(df):
result = df.groupby('A').transform('mean')
expected = df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
result = df.groupby('A')['C'].transform('mean')
expected = df.groupby('A')['C'].transform(np.mean)
assert_series_equal(result, expected)
def test_series_fast_transform_date():
# GH 13191
df = pd.DataFrame({'grouping': [np.nan, 1, 1, 3],
'd': pd.date_range('2014-1-1', '2014-1-4')})
result = df.groupby('grouping')['d'].transform('first')
dates = [pd.NaT, pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-2'),
pd.Timestamp('2014-1-4')]
expected = pd.Series(dates, name='d')
assert_series_equal(result, expected)
def test_transform_length():
# GH 9697
df = pd.DataFrame({'col1': [1, 1, 2, 2], 'col2': [1, 2, 3, np.nan]})
expected = pd.Series([3.0] * 4)
def nsum(x):
return np.nansum(x)
results = [df.groupby('col1').transform(sum)['col2'],
df.groupby('col1')['col2'].transform(sum),
df.groupby('col1').transform(nsum)['col2'],
df.groupby('col1')['col2'].transform(nsum)]
for result in results:
assert_series_equal(result, expected, check_names=False)
def test_transform_coercion():
# 14457
# when we are transforming be sure to not coerce
# via assignment
df = pd.DataFrame(dict(A=['a', 'a'], B=[0, 1]))
g = df.groupby('A')
expected = g.transform(np.mean)
result = g.transform(lambda x: np.mean(x))
assert_frame_equal(result, expected)
def test_groupby_transform_with_int():
# GH 3740, make sure that we might upcast on item-by-item transform
# floats
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=Series(1, dtype='float64'),
C=Series(
[1, 2, 3, 1, 2, 3], dtype='float64'), D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=Series(
[-1, 0, 1, -1, 0, 1], dtype='float64')))
assert_frame_equal(result, expected)
# int case
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1,
C=[1, 2, 3, 1, 2, 3], D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=[-1, 0, 1, -1, 0, 1]))
assert_frame_equal(result, expected)
# int that needs float conversion
s = Series([2, 3, 4, 10, 5, -1])
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1, C=s, D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
s1 = s.iloc[0:3]
s1 = (s1 - s1.mean()) / s1.std()
s2 = s.iloc[3:6]
s2 = (s2 - s2.mean()) / s2.std()
expected = DataFrame(dict(B=np.nan, C=concat([s1, s2])))
assert_frame_equal(result, expected)
# int downcasting
result = df.groupby('A').transform(lambda x: x * 2 / 2)
expected = DataFrame(dict(B=1, C=[2, 3, 4, 10, 5, -1]))
assert_frame_equal(result, expected)
def test_groupby_transform_with_nan_group():
# GH 9941
df = pd.DataFrame({'a': range(10),
'b': [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]})
result = df.groupby(df.b)['a'].transform(max)
expected = pd.Series([1., 1., 2., 3., np.nan, 6., 6., 9., 9., 9.],
name='a')
assert_series_equal(result, expected)
def test_transform_mixed_type():
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
group['g'] = group['d'] * 2
return group[:1]
grouped = df.groupby('c')
result = grouped.apply(f)
assert result['d'].dtype == np.float64
# this is by definition a mutating operation!
with option_context('mode.chained_assignment', None):
for key, group in grouped:
res = f(group)
assert_frame_equal(res, result.loc[key])
def _check_cython_group_transform_cumulative(pd_op, np_op, dtype):
"""
Check a group transform that executes a cumulative function.
Parameters
----------
pd_op : callable
The pandas cumulative function.
np_op : callable
The analogous one in NumPy.
dtype : type
The specified dtype of the data.
"""
is_datetimelike = False
data = np.array([[1], [2], [3], [4]], dtype=dtype)
ans = np.zeros_like(data)
labels = np.array([0, 0, 0, 0], dtype=np.int64)
pd_op(ans, data, labels, is_datetimelike)
tm.assert_numpy_array_equal(np_op(data), ans[:, 0],
check_dtype=False)
def test_cython_group_transform_cumsum(any_real_dtype):
# see gh-4095
dtype = np.dtype(any_real_dtype).type
pd_op, np_op = groupby.group_cumsum, np.cumsum
_check_cython_group_transform_cumulative(pd_op, np_op, dtype)
def test_cython_group_transform_cumprod():
# see gh-4095
dtype = np.float64
pd_op, np_op = groupby.group_cumprod_float64, np.cumproduct
_check_cython_group_transform_cumulative(pd_op, np_op, dtype)
def test_cython_group_transform_algos():
# see gh-4095
is_datetimelike = False
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.int64)
data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')
actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumprod_float64(actual, data, labels, is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumsum(actual, data, labels, is_datetimelike)
expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
# timedelta
is_datetimelike = True
data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]
actual = np.zeros_like(data, dtype='int64')
groupby.group_cumsum(actual, data.view('int64'), labels,
is_datetimelike)
expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(
2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),
np.timedelta64(5, 'ns')])
tm.assert_numpy_array_equal(actual[:, 0].view('m8[ns]'), expected)
@pytest.mark.parametrize(
"op, args, targop",
[('cumprod', (), lambda x: x.cumprod()),
('cumsum', (), lambda x: x.cumsum()),
('shift', (-1, ), lambda x: x.shift(-1)),
('shift', (1, ), lambda x: x.shift())])
def test_cython_transform_series(op, args, targop):
# GH 4095
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
# series
for data in [s, s_missing]:
# print(data.head())
expected = data.groupby(labels).transform(targop)
tm.assert_series_equal(
expected,
data.groupby(labels).transform(op, *args))
tm.assert_series_equal(expected, getattr(
data.groupby(labels), op)(*args))
@pytest.mark.parametrize("op", ['cumprod', 'cumsum'])
@pytest.mark.parametrize("skipna", [False, True])
@pytest.mark.parametrize('input, exp', [
# When everything is NaN
({'key': ['b'] * 10, 'value': np.nan},
pd.Series([np.nan] * 10, name='value')),
# When there is a single NaN
({'key': ['b'] * 10 + ['a'] * 2,
'value': [3] * 3 + [np.nan] + [3] * 8},
{('cumprod', False): [3.0, 9.0, 27.0] + [np.nan] * 7 + [3.0, 9.0],
('cumprod', True): [3.0, 9.0, 27.0, np.nan, 81., 243., 729.,
2187., 6561., 19683., 3.0, 9.0],
('cumsum', False): [3.0, 6.0, 9.0] + [np.nan] * 7 + [3.0, 6.0],
('cumsum', True): [3.0, 6.0, 9.0, np.nan, 12., 15., 18.,
21., 24., 27., 3.0, 6.0]})])
def test_groupby_cum_skipna(op, skipna, input, exp):
df = pd.DataFrame(input)
result = df.groupby('key')['value'].transform(op, skipna=skipna)
if isinstance(exp, dict):
expected = exp[(op, skipna)]
else:
expected = exp
expected = pd.Series(expected, name='value')
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"op, args, targop",
[('cumprod', (), lambda x: x.cumprod()),
('cumsum', (), lambda x: x.cumsum()),
('shift', (-1, ), lambda x: x.shift(-1)),
('shift', (1, ), lambda x: x.shift())])
def test_cython_transform_frame(op, args, targop):
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
strings = list('qwertyuiopasdfghjklz')
strings_missing = strings[:]
strings_missing[5] = np.nan
df = DataFrame({'float': s,
'float_missing': s_missing,
'int': [1, 1, 1, 1, 2] * 200,
'datetime': pd.date_range('1990-1-1', periods=1000),
'timedelta': pd.timedelta_range(1, freq='s',
periods=1000),
'string': strings * 50,
'string_missing': strings_missing * 50},
columns=['float', 'float_missing', 'int', 'datetime',
'timedelta', 'string', 'string_missing'])
df['cat'] = df['string'].astype('category')
df2 = df.copy()
df2.index = pd.MultiIndex.from_product([range(100), range(10)])
# DataFrame - Single and MultiIndex,
# group by values, index level, columns
for df in [df, df2]:
for gb_target in [dict(by=labels), dict(level=0), dict(by='string')
]: # dict(by='string_missing')]:
# dict(by=['int','string'])]:
gb = df.groupby(**gb_target)
# whitelisted methods set the selection before applying
# bit a of hack to make sure the cythonized shift
# is equivalent to pre 0.17.1 behavior
if op == 'shift':
gb._set_group_selection()
if op != 'shift' and 'int' not in gb_target:
# numeric apply fastpath promotes dtype so have
# to apply separately and concat
i = gb[['int']].apply(targop)
f = gb[['float', 'float_missing']].apply(targop)
expected = pd.concat([f, i], axis=1)
else:
expected = gb.apply(targop)
expected = expected.sort_index(axis=1)
tm.assert_frame_equal(expected,
gb.transform(op, *args).sort_index(
axis=1))
tm.assert_frame_equal(
expected,
getattr(gb, op)(*args).sort_index(axis=1))
# individual columns
for c in df:
if c not in ['float', 'int', 'float_missing'
] and op != 'shift':
pytest.raises(DataError, gb[c].transform, op)
pytest.raises(DataError, getattr(gb[c], op))
else:
expected = gb[c].apply(targop)
expected.name = c
tm.assert_series_equal(expected,
gb[c].transform(op, *args))
tm.assert_series_equal(expected,
getattr(gb[c], op)(*args))
def test_transform_with_non_scalar_group():
# GH 10165
cols = pd.MultiIndex.from_tuples([
('syn', 'A'), ('mis', 'A'), ('non', 'A'),
('syn', 'C'), ('mis', 'C'), ('non', 'C'),
('syn', 'T'), ('mis', 'T'), ('non', 'T'),
('syn', 'G'), ('mis', 'G'), ('non', 'G')])
df = pd.DataFrame(np.random.randint(1, 10, (4, 12)),
columns=cols,
index=['A', 'C', 'G', 'T'])
tm.assert_raises_regex(ValueError, 'transform must return '
'a scalar value for each '
'group.*',
df.groupby(axis=1, level=1).transform,
lambda z: z.div(z.sum(axis=1), axis=0))
@pytest.mark.parametrize('cols,exp,comp_func', [
('a', pd.Series([1, 1, 1], name='a'), tm.assert_series_equal),
(['a', 'c'], pd.DataFrame({'a': [1, 1, 1], 'c': [1, 1, 1]}),
tm.assert_frame_equal)
])
@pytest.mark.parametrize('agg_func', [
'count', 'rank', 'size'])
def test_transform_numeric_ret(cols, exp, comp_func, agg_func):
if agg_func == 'size' and isinstance(cols, list):
pytest.xfail("'size' transformation not supported with "
"NDFrameGroupy")
# GH 19200
df = pd.DataFrame(
{'a': pd.date_range('2018-01-01', periods=3),
'b': range(3),
'c': range(7, 10)})
result = df.groupby('b')[cols].transform(agg_func)
if agg_func == 'rank':
exp = exp.astype('float')
comp_func(result, exp)
@pytest.mark.parametrize("mix_groupings", [True, False])
@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize("val1,val2", [
('foo', 'bar'), (1, 2), (1., 2.)])
@pytest.mark.parametrize("fill_method,limit,exp_vals", [
("ffill", None,
[np.nan, np.nan, 'val1', 'val1', 'val1', 'val2', 'val2', 'val2']),
("ffill", 1,
[np.nan, np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan]),
("bfill", None,
['val1', 'val1', 'val1', 'val2', 'val2', 'val2', np.nan, np.nan]),
("bfill", 1,
[np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan, np.nan])
])
def test_group_fill_methods(mix_groupings, as_series, val1, val2,
fill_method, limit, exp_vals):
vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan]
_exp_vals = list(exp_vals)
# Overwrite placeholder values
for index, exp_val in enumerate(_exp_vals):
if exp_val == 'val1':
_exp_vals[index] = val1
elif exp_val == 'val2':
_exp_vals[index] = val2
# Need to modify values and expectations depending on the
# Series / DataFrame that we ultimately want to generate
if mix_groupings: # ['a', 'b', 'a, 'b', ...]
keys = ['a', 'b'] * len(vals)
def interweave(list_obj):
temp = list()
for x in list_obj:
temp.extend([x, x])
return temp
_exp_vals = interweave(_exp_vals)
vals = interweave(vals)
else: # ['a', 'a', 'a', ... 'b', 'b', 'b']
keys = ['a'] * len(vals) + ['b'] * len(vals)
_exp_vals = _exp_vals * 2
vals = vals * 2
df = DataFrame({'key': keys, 'val': vals})
if as_series:
result = getattr(
df.groupby('key')['val'], fill_method)(limit=limit)
exp = Series(_exp_vals, name='val')
assert_series_equal(result, exp)
else:
result = getattr(df.groupby('key'), fill_method)(limit=limit)
exp = DataFrame({'key': keys, 'val': _exp_vals})
assert_frame_equal(result, exp)
@pytest.mark.parametrize("fill_method", ['ffill', 'bfill'])
def test_pad_stable_sorting(fill_method):
# GH 21207
x = [0] * 20
y = [np.nan] * 10 + [1] * 10
if fill_method == 'bfill':
y = y[::-1]
df = pd.DataFrame({'x': x, 'y': y})
expected = df.copy()
result = getattr(df.groupby('x'), fill_method)()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("test_series", [True, False])
@pytest.mark.parametrize("periods,fill_method,limit", [
(1, 'ffill', None), (1, 'ffill', 1),
(1, 'bfill', None), (1, 'bfill', 1),
(-1, 'ffill', None), (-1, 'ffill', 1),
(-1, 'bfill', None), (-1, 'bfill', 1)])
def test_pct_change(test_series, periods, fill_method, limit):
vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
exp_vals = Series(vals).pct_change(periods=periods,
fill_method=fill_method,
limit=limit).tolist()
df = DataFrame({'key': ['a'] * len(vals) + ['b'] * len(vals),
'vals': vals * 2})
grp = df.groupby('key')
def get_result(grp_obj):
return grp_obj.pct_change(periods=periods,
fill_method=fill_method,
limit=limit)
if test_series:
exp = pd.Series(exp_vals * 2)
exp.name = 'vals'
grp = grp['vals']
result = get_result(grp)
tm.assert_series_equal(result, exp)
else:
exp = DataFrame({'vals': exp_vals * 2})
result = get_result(grp)
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("func", [np.any, np.all])
def test_any_all_np_func(func):
# GH 20653
df = pd.DataFrame([['foo', True],
[np.nan, True],
['foo', True]], columns=['key', 'val'])
exp = pd.Series([True, np.nan, True], name='val')
res = df.groupby('key')['val'].transform(func)
tm.assert_series_equal(res, exp)
| bsd-3-clause |
zonca/petsc4py | demo/ode/orego.py | 1 | 3701 | # Oregonator: stiff 3-variable oscillatory ODE system from chemical reactions,
# problem OREGO in Hairer&Wanner volume 2
# See also http://www.scholarpedia.org/article/Oregonator
import sys, petsc4py
petsc4py.init(sys.argv)
from petsc4py import PETSc
class Orego(object):
n = 3
comm = PETSc.COMM_SELF
def evalSolution(self, t, x):
assert t == 0.0, "only for t=0.0"
x.setArray([1, 2, 3])
def evalFunction(self, ts, t, x, xdot, f):
f.setArray([xdot[0] - 77.27*(x[1] + x[0]*(1 - 8.375e-6*x[0] - x[1])),
xdot[1] - 1/77.27*(x[2] - (1 + x[0])*x[1]),
xdot[2] - 0.161*(x[0] - x[2])])
def evalJacobian(self, ts, t, x, xdot, a, A, B):
B[:,:] = [[a - 77.27*((1 - 8.375e-6*x[0] - x[1]) - 8.375e-6*x[0]), -77.27*(1 - x[0]), 0],
[1/77.27*x[1], a + 1/77.27*(1 + x[0]), -1/77.27],
[-0.161, 0, a + 0.161]]
B.assemble()
if A != B: A.assemble()
return True # same nonzero pattern
OptDB = PETSc.Options()
ode = Orego()
J = PETSc.Mat().createDense([ode.n, ode.n], comm=ode.comm)
x = PETSc.Vec().createSeq(ode.n, comm=ode.comm)
f = x.duplicate()
ts = PETSc.TS().create(comm=ode.comm)
ts.setType(ts.Type.ROSW) # Rosenbrock-W. ARKIMEX is a nonlinearly implicit alternative.
ts.setIFunction(ode.evalFunction, f)
ts.setIJacobian(ode.evalJacobian, J)
history = []
def monitor(ts, i, t, x):
xx = x[:].tolist()
history.append((i, t, xx))
ts.setMonitor(monitor)
ts.setTime(0.0)
ts.setTimeStep(0.1)
ts.setMaxTime(360)
ts.setMaxSteps(2000)
ts.setMaxSNESFailures(-1) # allow an unlimited number of failures (step will be rejected and retried)
# Set a different tolerance on each variable. Can use a scalar or a vector for either or both atol and rtol.
vatol = x.duplicate(array=[1e-2, 1e-1, 1e-4])
ts.setTolerances(atol=vatol,rtol=1e-3) # adaptive controller attempts to match this tolerance
snes = ts.getSNES() # Nonlinear solver
snes.setTolerances(max_it=10) # Stop nonlinear solve after 10 iterations (TS will retry with shorter step)
ksp = snes.getKSP() # Linear solver
ksp.setType(ksp.Type.PREONLY) # Just use the preconditioner without a Krylov method
pc = ksp.getPC() # Preconditioner
pc.setType(pc.Type.LU) # Use a direct solve
ts.setFromOptions() # Apply run-time options, e.g. -ts_adapt_monitor -ts_type arkimex -snes_converged_reason
ode.evalSolution(0.0, x)
ts.solve(x)
print('steps %d (%d rejected, %d SNES fails), nonlinear its %d, linear its %d'
% (ts.getStepNumber(), ts.getStepRejections(), ts.getSNESFailures(),
ts.getSNESIterations(), ts.getKSPIterations()))
if OptDB.getBool('plot_history', True):
try:
from matplotlib import pylab
from matplotlib import rc
except ImportError:
print("matplotlib not available")
raise SystemExit
import numpy as np
ii = np.asarray([v[0] for v in history])
tt = np.asarray([v[1] for v in history])
xx = np.asarray([v[2] for v in history])
rc('text', usetex=True)
pylab.suptitle('Oregonator: TS \\texttt{%s}' % ts.getType())
pylab.subplot(2,2,0)
pylab.subplots_adjust(wspace=0.3)
pylab.semilogy(ii[:-1], np.diff(tt), )
pylab.xlabel('step number')
pylab.ylabel('timestep')
for i in range(0,3):
pylab.subplot(2,2,i+1)
pylab.semilogy(tt, xx[:,i], "rgb"[i])
pylab.xlabel('time')
pylab.ylabel('$x_%d$' % i)
# pylab.savefig('orego-history.png')
pylab.show()
| bsd-2-clause |
LiaoPan/scikit-learn | sklearn/linear_model/randomized_l1.py | 95 | 23365 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
kedz/cuttsum | trec2015/sbin/article-extractor-results-viewer.py | 1 | 1518 | import cuttsum.events
import cuttsum.corpora
from cuttsum.pipeline import ArticlesResource
import pandas as pd
import datetime
def main():
results = []
res = ArticlesResource()
for ext in ["gold", "goose"]:
for event in cuttsum.events.get_2013_events():
if event.query_id.startswith("TS13"):
corpus = cuttsum.corpora.EnglishAndUnknown2013()
else:
raise Exception()
min_hour = datetime.datetime(datetime.MAXYEAR, 1, 1)
max_hour = datetime.datetime(datetime.MINYEAR, 1, 1)
total = 0
for hour, path, si in res.streamitem_iter(event, corpus, ext):
if hour < min_hour:
min_hour = hour
if hour > max_hour:
max_hour = hour
total += 1
if total == 0:
continue
results.append({"event": event.fs_name(),
"event start": event.list_event_hours()[0],
"event stop": event.list_event_hours()[-1],
"article start": min_hour,
"article stop": max_hour,
"total": total,
"annotator": ext})
df = pd.DataFrame(results,
columns=["event", "annotator", "event start", "event stop",
"article start", "article stop", "total", "annotator"])
print df
if __name__ == u"__main__":
main()
| apache-2.0 |
ejulio/blog-posts | cross-validation-testando-o-desempenho-de-um-classificador/cross-validation.py | 1 | 2761 | import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.multiclass import OneVsOneClassifier
from sklearn.svm import SVC
from sklearn import cross_validation
from sklearn.metrics import confusion_matrix
import argparse
# data
categories = ['alt.atheism', 'soc.religion.christian',
'comp.graphics', 'sci.med']
dataset = fetch_20newsgroups(subset = 'train', categories = categories,
shuffle = True, random_state = 42)
# classifier: one vs one SVM
classifier = OneVsOneClassifier(SVC(kernel = 'linear', random_state = 84))
# features: tokenizer & tf-idf
count_vector = CountVectorizer()
tfidf = TfidfTransformer()
def fit_classifier(train_data, train_labels):
# count_vector = CountVectorizer(stop_words = 'english', ngram_range = (1, 1))
train_counts = count_vector.fit_transform(train_data)
train_tfidf = tfidf.fit_transform(train_counts)
classifier.fit(train_tfidf, train_labels)
def predict(test_data):
test_counts = count_vector.transform(test_data)
test_tfidf = tfidf.transform(test_counts)
return classifier.predict(test_tfidf)
def run_confusion_matrix():
print("confusion matrix")
# split the data into train and test
train_data, test_data, train_labels, test_labels = cross_validation.train_test_split(
dataset.data, dataset.target, test_size = 0.1, random_state = 10)
fit_classifier(train_data, train_labels)
predicted = predict(test_data)
# 0 = alt.atheism, 1 = comp.graphics, 2 = sci.med, 3 = soc.religion.christian
print(confusion_matrix(test_labels, predicted, labels = [0, 1, 2, 3]))
def run_cross_validation():
print("cross validation")
test_counts = count_vector.fit_transform(dataset.data)
test_tfidf = tfidf.fit_transform(test_counts)
scores = cross_validation.cross_val_score(classifier, test_tfidf,
dataset.target, cv = 5)
print(scores)
print("Accuracy: {} +/- {}".format(scores.mean(), scores.std() * 2))
def run_example(text):
print("Classifying {}".format(text))
fit_classifier(dataset.data, dataset.target)
category = predict([text])
print("Classified as {}".format(dataset.target_names[category]))
ap = argparse.ArgumentParser()
ap.add_argument("-cm", "--confusion-matrix", type = bool,
help = "Show confusion matrix example")
ap.add_argument("-cv", "--cross-validation", type = bool,
help = "Run k-fold cross validation")
ap.add_argument("-e", "--example",
help = "Classify the given text")
args = vars(ap.parse_args())
if args.get("confusion_matrix"):
run_confusion_matrix()
elif args.get("cross_validation"):
run_cross_validation()
elif args.get("example") != None:
run_example(args["example"])
else:
print("I don't know what to do!") | mit |
mblondel/scikit-learn | sklearn/utils/validation.py | 7 | 22125 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse is None:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
sparse_type = spmatrix.format
if dtype is None:
dtype = spmatrix.dtype
if sparse_type in accept_sparse:
# correct type
if dtype == spmatrix.dtype:
# correct dtype
if copy:
spmatrix = spmatrix.copy()
else:
# convert dtype
spmatrix = spmatrix.astype(dtype)
else:
# create new
spmatrix = spmatrix.asformat(accept_sparse[0]).astype(dtype)
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
if hasattr(spmatrix, "data"):
spmatrix.data = np.array(spmatrix.data, copy=False, order=order)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
ensure_min_samples=1, ensure_min_features=1):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
if sp.issparse(array):
if dtype == "numeric":
dtype = None
array = _ensure_sparse_format(array, accept_sparse, dtype, order,
copy, force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
if dtype == "numeric":
if hasattr(array, "dtype") and array.dtype.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
array = np.array(array, dtype=dtype, order=order, copy=copy)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def warn_if_not_float(X, estimator='This algorithm'):
"""Warning utility function to check that data type is floating point.
Returns True if a warning was raised (i.e. the input is not float) and
False otherwise, for easier input validation.
"""
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
if X.dtype.kind != 'f':
warnings.warn("%s assumes floating point values as input, "
"got %s" % (estimator, X.dtype))
return True
return False
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
| bsd-3-clause |
AutonomyLab/husky | opencv-utilities/image-tools.py | 1 | 10355 | #! /usr/bin/env python
import sys, cv, cv2, os
import numpy as np
import subprocess, signal
import math
import atexit
import cPickle as pickle
from sklearn.cluster import DBSCAN
from sklearn import metrics, preprocessing
import pymeanshift as pms
from optparse import OptionParser
import time
parser = OptionParser()
parser.add_option("-i", "--input", dest="input_dir", help="directory with frames")
parser.add_option("-s", "--start", dest="start_frame", default="0", help="frame to start on")
parser.add_option("-m", "--mode", dest="mode", default="1", help="image processing mode")
parser.add_option("-r", "--framerate", dest="framerate", default="30", help="playback rate")
parser.add_option("-l", "--list", dest="list", action="store_true", default=False, help="list modes?")
parser.add_option("-c", "--crop-husky", dest="crop", action="store_true", default=False, help="crop out the image header from the Husky?")
parser.add_option("-p", "--playback", dest="playback", action="store_true", default=False, help="just play back the frames, don't process them")
parser.add_option("--save", dest="save", action="store", default=None, help="directory to save the rendered frames to")
parser.add_option("--headless", dest="headless", action="store_true", default=False, help="processing only: show no vizualizations")
(options, args) = parser.parse_args()
if not options.headless:
cv2.namedWindow("display", cv2.cv.CV_WINDOW_NORMAL)
mog = cv2.BackgroundSubtractorMOG()
def original(img):
return img
def grayscale(img):
return cv2.cvtColor(img, cv.CV_BGR2GRAY)
def sobelx(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
gradient = cv2.Sobel(img_grey, ddepth=cv.CV_64F, dx=1, dy=0, ksize=5)
return np.uint8(np.absolute(gradient))
def sobely(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
gradient = cv2.Sobel(img_grey, ddepth=cv.CV_64F, dx=0, dy=1, ksize=5)
return np.uint8(np.absolute(gradient))
def sobelboth(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
gradient = cv2.Sobel(img_grey, ddepth=cv.CV_64F, dx=1, dy=1, ksize=5)
return np.uint8(np.absolute(gradient))
def laplacian(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
lapl = cv2.Laplacian(img_grey, cv2.CV_64F)
return cv2.convertScaleAbs(lapl)
def canny(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
return cv2.Canny(img_grey, 100, 200)
def gaussian(img):
return cv2.GaussianBlur(img, (5,5), 1)
def bilateral(img):
return cv2.bilateralFilter(img, 9, 75, 75)
def segmented(img):
(segmented_image, labels_image, number_regions) = pms.segment(img, spatial_radius=6, range_radius=4.5, min_density=50)
return segmented_image
def segmented_downsampled(img):
downsampled = cv2.resize(img, (0,0), fx=0.25, fy=0.25)
(segmented_image, labels_image, number_regions) = pms.segment(downsampled, spatial_radius=6, range_radius=4.5, min_density=50)
return segmented_image
def scharrx(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
return cv2.Scharr(img_grey, ddepth=cv.CV_64F, dx=1, dy=0)
def scharry(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
return cv2.Scharr(img_grey, ddepth=cv.CV_64F, dx=0, dy=1)
def scharrboth(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
return cv2.Scharr(img_grey, ddepth=cv.CV_64F, dx=1, dy=1)
def opencv_segmentation(img):
return cv2.pyrMeanShiftFiltering(img, sp=12, sr=9)
def grabcut(img):
mask = np.zeros(img.shape[:2], dtype=np.uint8)
fg = np.zeros((1,65), np.float64)
bg = np.zeros((1,65), np.float64)
return cv2.grabCut(img, mask, None, bg, fg, 10)
def hough_circles(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
circles = cv2.HoughCircles(img_grey, cv2.cv.CV_HOUGH_GRADIENT, 1, 10, param1=100, param2=30, minRadius=5, maxRadius=20)
if circles == None:
circles = []
for circle_list in circles:
for circle in circle_list:
cv2.circle(img, (int(circle[0]), int(circle[1])), int(circle[2]), (255,0,0))
return img
def hough_lines(img):
edge = cv2.cvtColor(img, cv.CV_BGR2GRAY)
lines = cv2.HoughLines(edge, 1, np.pi/180, 130)
edge = cv2.cvtColor(edge, cv.CV_GRAY2RGB)
if lines == None:
lines = []
for line_list in lines:
for rho,theta in line_list:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b)) # Here i have used int() instead of rounding the decimal value, so 3.8 --> 3
y1 = int(y0 + 1000*(a)) # But if you want to round the number, then use np.around() function, then 3.8 --> 4.0
x2 = int(x0 - 1000*(-b)) # But we need integers, so use int() function after that, ie int(np.around(x))
y2 = int(y0 - 1000*(a))
cv2.line(edge,(x1,y1),(x2,y2),(255,0,0))
return edge
def hough_circles_edge(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
img_grey = cv2.Canny(img_grey, 100, 200)
circles = cv2.HoughCircles(img_grey, cv2.cv.CV_HOUGH_GRADIENT, 1, 10, param1=100, param2=30, minRadius=5, maxRadius=20)
if circles == None:
circles = []
for circle_list in circles:
for circle in circle_list:
cv2.circle(img, (int(circle[0]), int(circle[1])), int(circle[2]), (255,0,0))
return img
def hough_lines_edge(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
edge = cv2.Canny(img_grey, 100, 200)
lines = cv2.HoughLines(edge, 1, np.pi/180, 130)
edge = cv2.cvtColor(edge, cv.CV_GRAY2RGB)
if lines == None:
lines = []
for line_list in lines:
for rho,theta in line_list:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b)) # Here i have used int() instead of rounding the decimal value, so 3.8 --> 3
y1 = int(y0 + 1000*(a)) # But if you want to round the number, then use np.around() function, then 3.8 --> 4.0
x2 = int(x0 - 1000*(-b)) # But we need integers, so use int() function after that, ie int(np.around(x))
y2 = int(y0 - 1000*(a))
cv2.line(edge,(x1,y1),(x2,y2),(255,0,0))
return edge
def harris_corners(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
dst = cv2.cornerHarris(img_grey, 2, 3, 0.04)
dst = cv2.dilate(dst,None)
img[dst>0.01*dst.max()]=[0,0,255]
return img
def harris_corners_edge(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
img_grey = cv2.Canny(img_grey, 100, 200)
dst = cv2.cornerHarris(img_grey, 2, 3, 0.04)
dst = cv2.dilate(dst,None)
img[dst>0.01*dst.max()]=[0,0,255]
cv2.imshow("display", img)
def background_subtraction(img):
return mog.apply(img)
def histogram_equalization(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
return cv2.equalizeHist(img_grey)
def contours(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
edge = cv2.Canny(img_grey, 100, 200)
(contours, hierarchy) = cv2.findContours(edge, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
color = np.random.randint(0,255,(3)).tolist()
cv2.drawContours(img, [cnt], 0, color, 2)
return img
def moments(img):
img_grey = cv2.cvtColor(img, cv.CV_BGR2GRAY)
edge = cv2.Canny(img_grey, 100, 200)
(contours, hierarchy) = cv2.findContours(edge, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
moments = cv2.moments(cnt)
if moments['m00'] != 0:
cx = int(moments['m10']/moments['m00'])
cy = int(moments['m01']/moments['m00'])
moment_area = moments['m00']
contour_area = cv2.contourArea(cnt)
cv2.drawContours(img, [cnt], 0, (0,255,0), 1)
cv2.circle(img, (cx, cy), 5, (0, 0, 255), -1)
return img
options.mode = "m%s" % options.mode
modes = dict(
m0=("original", original),
m1=("grayscale", grayscale),
m2=("sobel gradient x", sobelx),
m3=("sobel gradient y", sobely),
m4=("sobel gradient x and y", sobelboth),
m5=("laplacian", laplacian),
m6=("canny edges", canny),
m7=("gaussian blur", gaussian),
m8=("bilateral blur/filter", bilateral),
m9=("mean shift segmentation", segmented),
m10=("downsampled segmentation", segmented_downsampled),
m11=("scharr gradient x", scharrx),
m12=("scharr gradient y", scharry),
m13=("opencv mean shift (segmentation?)", opencv_segmentation),
m14=("grabcut (segmentation?)", grabcut),
m15=("hough circle detection", hough_circles),
m16=("hough line detection", hough_lines),
m17=("hough circles (edge image)", hough_circles_edge),
m18=("hough lines (edge image)", hough_lines_edge),
m19=("harris corner detection", harris_corners),
m20=("harris corners (edge image)", harris_corners_edge),
m21=("background subtraction (MOG)", background_subtraction),
m22=("histogram equalization", histogram_equalization),
m23=("contours", contours),
m24=("moments", moments)
)
if options.list:
print "list of modes:"
keys = modes.keys()
keys.sort()
for key in keys:
print "%s: %s" % (key[1:], modes[key][0])
sys.exit(0)
framerate = int(options.framerate)
frame = int(options.start_frame)
try:
while True:
framefile = "%s%sframe%04d.jpg" % (options.input_dir, os.sep, frame)
print framefile
if not os.path.isfile(framefile):
break
img = cv2.imread(framefile)
if options.crop:
img = img[20:, :]
start_time = time.time()
if options.playback:
if not options.headless:
cv2.imshow("display", img)
else:
img = modes[options.mode][1](img)
if not options.headless:
cv2.imshow("display", img)
if options.save != None:
outfile = "%s%sframe%04d.jpg" % (options.save, os.sep, frame)
cv2.imwrite(outfile, img)
end_time = time.time()
print "Frame took %s seconds to process" % (end_time-start_time)
cv2.waitKey(int(1.0/framerate*1000))
frame += 1
except IOError, e:
pass # DONE
| gpl-3.0 |
ekumenlabs/terminus | setup.py | 1 | 1109 | """
Copyright (C) 2017 Open Source Robotics Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from setuptools import setup
setup(name='terminus',
version='0.1',
description='City generator for Gazebo',
url='https://github.com/ekumenlabs/terminus',
license='MIT',
packages=['terminus'],
install_requires=[
'jinja2',
'shapely',
'numpy',
'imposm.parser',
'scipy',
'matplotlib',
'Pillow',
'PyYAML',
'mock',
'python-slugify',
'sortedcontainers'
],
zip_safe=False)
| apache-2.0 |
ZenDevelopmentSystems/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/core/common.py | 2 | 18644 | """
Misc tools for implementing data structures
"""
import sys
import warnings
from datetime import datetime, timedelta
from functools import partial
import inspect
import collections
import numpy as np
from pandas._libs import lib, tslib
from pandas import compat
from pandas.compat import long, zip, iteritems
from pandas.core.config import get_option
from pandas.core.dtypes.generic import ABCSeries, ABCIndex
from pandas.core.dtypes.common import _NS_DTYPE
from pandas.core.dtypes.inference import _iterable_not_string
from pandas.core.dtypes.missing import isna, isnull, notnull # noqa
from pandas.api import types
from pandas.core.dtypes import common
# compat
from pandas.errors import ( # noqa
PerformanceWarning, UnsupportedFunctionCall, UnsortedIndexError)
# back-compat of public API
# deprecate these functions
m = sys.modules['pandas.core.common']
for t in [t for t in dir(types) if not t.startswith('_')]:
def outer(t=t):
def wrapper(*args, **kwargs):
warnings.warn("pandas.core.common.{t} is deprecated. "
"import from the public API: "
"pandas.api.types.{t} instead".format(t=t),
DeprecationWarning, stacklevel=3)
return getattr(types, t)(*args, **kwargs)
return wrapper
setattr(m, t, outer(t))
# back-compat for non-public functions
# deprecate these functions
for t in ['is_datetime_arraylike',
'is_datetime_or_timedelta_dtype',
'is_datetimelike',
'is_datetimelike_v_numeric',
'is_datetimelike_v_object',
'is_datetimetz',
'is_int_or_datetime_dtype',
'is_period_arraylike',
'is_string_like',
'is_string_like_dtype']:
def outer(t=t):
def wrapper(*args, **kwargs):
warnings.warn("pandas.core.common.{t} is deprecated. "
"These are not longer public API functions, "
"but can be imported from "
"pandas.api.types.{t} instead".format(t=t),
DeprecationWarning, stacklevel=3)
return getattr(common, t)(*args, **kwargs)
return wrapper
setattr(m, t, outer(t))
# deprecate array_equivalent
def array_equivalent(*args, **kwargs):
warnings.warn("'pandas.core.common.array_equivalent' is deprecated and "
"is no longer public API", DeprecationWarning, stacklevel=2)
from pandas.core.dtypes import missing
return missing.array_equivalent(*args, **kwargs)
class SettingWithCopyError(ValueError):
pass
class SettingWithCopyWarning(Warning):
pass
class AbstractMethodError(NotImplementedError):
"""Raise this error instead of NotImplementedError for abstract methods
while keeping compatibility with Python 2 and Python 3.
"""
def __init__(self, class_instance):
self.class_instance = class_instance
def __str__(self):
msg = "This method must be defined in the concrete class of {name}"
return (msg.format(name=self.class_instance.__class__.__name__))
def flatten(l):
"""Flatten an arbitrarily nested sequence.
Parameters
----------
l : sequence
The non string sequence to flatten
Notes
-----
This doesn't consider strings sequences.
Returns
-------
flattened : generator
"""
for el in l:
if _iterable_not_string(el):
for s in flatten(el):
yield s
else:
yield el
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
if obj.name != name:
return None
return name
def _maybe_match_name(a, b):
a_has = hasattr(a, 'name')
b_has = hasattr(b, 'name')
if a_has and b_has:
if a.name == b.name:
return a.name
else:
return None
elif a_has:
return a.name
elif b_has:
return b.name
return None
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
msg = 'object of type {typ!r} has no info axis'
raise TypeError(msg.format(typ=type(obj).__name__))
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
def _maybe_box(indexer, values, obj, key):
# if we have multiples coming back, box em
if isinstance(values, np.ndarray):
return obj[indexer.get_loc(key)]
# return the value
return values
def _maybe_box_datetimelike(value):
# turn a datetime like into a Timestamp/timedelta as needed
if isinstance(value, (np.datetime64, datetime)):
value = tslib.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslib.Timedelta(value)
return value
_values_from_object = lib.values_from_object
def is_bool_indexer(key):
if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)):
if key.dtype == np.object_:
key = np.asarray(_values_from_object(key))
if not lib.is_bool_array(key):
if isna(key).any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
return False
return True
elif key.dtype == np.bool_:
return True
elif isinstance(key, list):
try:
arr = np.asarray(key)
return arr.dtype == np.bool_ and len(arr) == len(key)
except TypeError: # pragma: no cover
return False
return False
def _default_index(n):
from pandas.core.index import RangeIndex
return RangeIndex(0, n, name=None)
def _mut_exclusive(**kwargs):
item1, item2 = kwargs.items()
label1, val1 = item1
label2, val2 = item2
if val1 is not None and val2 is not None:
msg = 'mutually exclusive arguments: {label1!r} and {label2!r}'
raise TypeError(msg.format(label1=label1, label2=label2))
elif val1 is not None:
return val1
else:
return val2
def _not_none(*args):
"""Returns a generator consisting of the arguments that are not None"""
return (arg for arg in args if arg is not None)
def _any_none(*args):
"""Returns a boolean indicating if any argument is None"""
for arg in args:
if arg is None:
return True
return False
def _all_none(*args):
"""Returns a boolean indicating if all arguments are None"""
for arg in args:
if arg is not None:
return False
return True
def _any_not_none(*args):
"""Returns a boolean indicating if any argument is not None"""
for arg in args:
if arg is not None:
return True
return False
def _all_not_none(*args):
"""Returns a boolean indicating if all arguments are not None"""
for arg in args:
if arg is None:
return False
return True
def _count_not_none(*args):
"""Returns the count of arguments that are not None"""
return sum(x is not None for x in args)
def _try_sort(iterable):
listed = list(iterable)
try:
return sorted(listed)
except Exception:
return listed
def iterpairs(seq):
"""
Parameters
----------
seq : sequence
Returns
-------
iterator returning overlapping pairs of elements
Examples
--------
>>> list(iterpairs([1, 2, 3, 4]))
[(1, 2), (2, 3), (3, 4)]
"""
# input may not be sliceable
seq_it = iter(seq)
seq_it_next = iter(seq)
next(seq_it_next)
return zip(seq_it, seq_it_next)
def split_ranges(mask):
""" Generates tuples of ranges which cover all True value in mask
>>> list(split_ranges([1,0,0,1,0]))
[(0, 1), (3, 4)]
"""
ranges = [(0, len(mask))]
for pos, val in enumerate(mask):
if not val: # this pos should be ommited, split off the prefix range
r = ranges.pop()
if pos > r[0]: # yield non-zero range
yield (r[0], pos)
if pos + 1 < len(mask): # save the rest for processing
ranges.append((pos + 1, len(mask)))
if ranges:
yield ranges[-1]
def _long_prod(vals):
result = long(1)
for x in vals:
result *= x
return result
class groupby(dict):
"""
A simple groupby different from the one in itertools.
Does not require the sequence elements to be sorted by keys,
however it is slower.
"""
def __init__(self, seq, key=lambda x: x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
try:
__iter__ = dict.iteritems
except AttributeError: # pragma: no cover
# Python 3
def __iter__(self):
return iter(dict.items(self))
def map_indices_py(arr):
"""
Returns a dictionary with (element, index) pairs for each element in the
given array/list
"""
return dict([(x, i) for i, x in enumerate(arr)])
def union(*seqs):
result = set([])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result |= seq
return type(seqs[0])(list(result))
def difference(a, b):
return type(a)(list(set(a) - set(b)))
def intersection(*seqs):
result = set(seqs[0])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result &= seq
return type(seqs[0])(list(result))
def _asarray_tuplesafe(values, dtype=None):
from pandas.core.index import Index
if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')):
values = list(values)
elif isinstance(values, Index):
return values.values
if isinstance(values, list) and dtype in [np.object_, object]:
return lib.list_to_object_array(values)
result = np.asarray(values, dtype=dtype)
if issubclass(result.dtype.type, compat.string_types):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
if isinstance(values, list):
return lib.list_to_object_array(values)
else:
# Making a 1D array that safely contains tuples is a bit tricky
# in numpy, leading to the following
try:
result = np.empty(len(values), dtype=object)
result[:] = values
except ValueError:
# we have a list-of-list
result[:] = [tuple(x) for x in values]
return result
def _index_labels_to_array(labels):
if isinstance(labels, (compat.string_types, tuple)):
labels = [labels]
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
except TypeError: # non-iterable
labels = [labels]
labels = _asarray_tuplesafe(labels)
return labels
def _maybe_make_list(obj):
if obj is not None and not isinstance(obj, (tuple, list)):
return [obj]
return obj
def is_null_slice(obj):
""" we have a null slice """
return (isinstance(obj, slice) and obj.start is None and
obj.stop is None and obj.step is None)
def is_true_slices(l):
"""
Find non-trivial slices in "l": return a list of booleans with same length.
"""
return [isinstance(k, slice) and not is_null_slice(k) for k in l]
def is_full_slice(obj, l):
""" we have a full length slice """
return (isinstance(obj, slice) and obj.start == 0 and obj.stop == l and
obj.step is None)
def _get_callable_name(obj):
# typical case has name
if hasattr(obj, '__name__'):
return getattr(obj, '__name__')
# some objects don't; could recurse
if isinstance(obj, partial):
return _get_callable_name(obj.func)
# fall back to class name
if hasattr(obj, '__call__'):
return obj.__class__.__name__
# everything failed (probably because the argument
# wasn't actually callable); we return None
# instead of the empty string in this case to allow
# distinguishing between no name and a name of ''
return None
def _apply_if_callable(maybe_callable, obj, **kwargs):
"""
Evaluate possibly callable input using obj and kwargs if it is callable,
otherwise return as it is
Parameters
----------
maybe_callable : possibly a callable
obj : NDFrame
**kwargs
"""
if callable(maybe_callable):
return maybe_callable(obj, **kwargs)
return maybe_callable
def _where_compat(mask, arr1, arr2):
if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE:
new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8'))
return new_vals.view(_NS_DTYPE)
if arr1.dtype == _NS_DTYPE:
arr1 = tslib.ints_to_pydatetime(arr1.view('i8'))
if arr2.dtype == _NS_DTYPE:
arr2 = tslib.ints_to_pydatetime(arr2.view('i8'))
return np.where(mask, arr1, arr2)
def _dict_compat(d):
"""
Helper function to convert datetimelike-keyed dicts to Timestamp-keyed dict
Parameters
----------
d: dict like object
Returns
-------
dict
"""
return dict((_maybe_box_datetimelike(key), value)
for key, value in iteritems(d))
def standardize_mapping(into):
"""
Helper function to standardize a supplied mapping.
.. versionadded:: 0.21.0
Parameters
----------
into : instance or subclass of collections.Mapping
Must be a class, an initialized collections.defaultdict,
or an instance of a collections.Mapping subclass.
Returns
-------
mapping : a collections.Mapping subclass or other constructor
a callable object that can accept an iterator to create
the desired Mapping.
See Also
--------
DataFrame.to_dict
Series.to_dict
"""
if not inspect.isclass(into):
if isinstance(into, collections.defaultdict):
return partial(
collections.defaultdict, into.default_factory)
into = type(into)
if not issubclass(into, collections.Mapping):
raise TypeError('unsupported type: {into}'.format(into=into))
elif into == collections.defaultdict:
raise TypeError(
'to_dict() only accepts initialized defaultdicts')
return into
def sentinel_factory():
class Sentinel(object):
pass
return Sentinel()
# ----------------------------------------------------------------------
# Detect our environment
def in_interactive_session():
""" check if we're running in an interactive shell
returns True if running under python/ipython interactive shell
"""
def check_main():
import __main__ as main
return (not hasattr(main, '__file__') or
get_option('mode.sim_interactive'))
try:
return __IPYTHON__ or check_main() # noqa
except:
return check_main()
def in_qtconsole():
"""
check if we're inside an IPython qtconsole
.. deprecated:: 0.14.1
This is no longer needed, or working, in IPython 3 and above.
"""
try:
ip = get_ipython() # noqa
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'qtconsole' in front_end.lower():
return True
except:
return False
return False
def in_ipnb():
"""
check if we're inside an IPython Notebook
.. deprecated:: 0.14.1
This is no longer needed, or working, in IPython 3 and above.
"""
try:
ip = get_ipython() # noqa
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'notebook' in front_end.lower():
return True
except:
return False
return False
def in_ipython_frontend():
"""
check if we're inside an an IPython zmq frontend
"""
try:
ip = get_ipython() # noqa
return 'zmq' in str(type(ip)).lower()
except:
pass
return False
def _random_state(state=None):
"""
Helper function for processing random_state arguments.
Parameters
----------
state : int, np.random.RandomState, None.
If receives an int, passes to np.random.RandomState() as seed.
If receives an np.random.RandomState object, just returns object.
If receives `None`, returns np.random.
If receives anything else, raises an informative ValueError.
Default None.
Returns
-------
np.random.RandomState
"""
if types.is_integer(state):
return np.random.RandomState(state)
elif isinstance(state, np.random.RandomState):
return state
elif state is None:
return np.random
else:
raise ValueError("random_state must be an integer, a numpy "
"RandomState, or None")
def _get_distinct_objs(objs):
"""
Return a list with distinct elements of "objs" (different ids).
Preserves order.
"""
ids = set()
res = []
for obj in objs:
if not id(obj) in ids:
ids.add(id(obj))
res.append(obj)
return res
def _pipe(obj, func, *args, **kwargs):
"""
Apply a function ``func`` to object ``obj`` either by passing obj as the
first argument to the function or, in the case that the func is a tuple,
interpret the first element of the tuple as a function and pass the obj to
that function as a keyword argument whose key is the value of the second
element of the tuple.
Parameters
----------
func : callable or tuple of (callable, string)
Function to apply to this object or, alternatively, a
``(callable, data_keyword)`` tuple where ``data_keyword`` is a
string indicating the keyword of `callable`` that expects the
object.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : dict, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
msg = '%s is both the pipe target and a keyword argument' % target
raise ValueError(msg)
kwargs[target] = obj
return func(*args, **kwargs)
else:
return func(obj, *args, **kwargs)
| apache-2.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/tests/test_bbox_tight.py | 3 | 3629 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import numpy as np
from matplotlib import rcParams
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.patches as mpatches
from matplotlib.ticker import FuncFormatter
@image_comparison(baseline_images=['bbox_inches_tight'], remove_text=True,
savefig_kwarg=dict(bbox_inches='tight'), tol=15)
def test_bbox_inches_tight():
#: Test that a figure saved using bbox_inches='tight' is clipped correctly
data = [[ 66386, 174296, 75131, 577908, 32015],
[ 58230, 381139, 78045, 99308, 160454],
[ 89135, 80552, 152558, 497981, 603535],
[ 78415, 81858, 150656, 193263, 69638],
[139361, 331509, 343164, 781380, 52269]]
colLabels = rowLabels = [''] * 5
rows = len(data)
ind = np.arange(len(colLabels)) + 0.3 # the x locations for the groups
cellText = []
width = 0.4 # the width of the bars
yoff = np.array([0.0] * len(colLabels))
# the bottom values for stacked bar chart
fig, ax = plt.subplots(1, 1)
for row in xrange(rows):
plt.bar(ind, data[row], width, bottom=yoff)
yoff = yoff + data[row]
cellText.append([''])
plt.xticks([])
plt.legend([''] * 5, loc=(1.2, 0.2))
# Add a table at the bottom of the axes
cellText.reverse()
the_table = plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels, loc='bottom')
@image_comparison(baseline_images=['bbox_inches_tight_suptile_legend'],
remove_text=False, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_suptile_legend():
plt.plot(list(xrange(10)), label='a straight line')
plt.legend(bbox_to_anchor=(0.9, 1), loc=2, )
plt.title('Axis title')
plt.suptitle('Figure title')
# put an extra long y tick on to see that the bbox is accounted for
def y_formatter(y, pos):
if int(y) == 4:
return 'The number 4'
else:
return str(y)
plt.gca().yaxis.set_major_formatter(FuncFormatter(y_formatter))
plt.xlabel('X axis')
@image_comparison(baseline_images=['bbox_inches_tight_clipping'],
remove_text=True, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_clipping():
# tests bbox clipping on scatter points, and path clipping on a patch
# to generate an appropriately tight bbox
plt.scatter(list(xrange(10)), list(xrange(10)))
ax = plt.gca()
ax.set_xlim([0, 5])
ax.set_ylim([0, 5])
# make a massive rectangle and clip it with a path
patch = mpatches.Rectangle([-50, -50], 100, 100,
transform=ax.transData,
facecolor='blue', alpha=0.5)
path = mpath.Path.unit_regular_star(5).deepcopy()
path.vertices *= 0.25
patch.set_clip_path(path, transform=ax.transAxes)
plt.gcf().artists.append(patch)
@image_comparison(baseline_images=['bbox_inches_tight_raster'],
remove_text=True, savefig_kwarg={'bbox_inches': 'tight'})
def test_bbox_inches_tight_raster():
"""Test rasterization with tight_layout"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1.0, 2.0], rasterized=True)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
masfaraud/genmechanics | genmechanics/dynamic_positions.py | 1 | 58477 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import os
import webbrowser
import math
import random
import numpy as npy
import cma
from matplotlib.colors import hsv_to_rgb
import matplotlib.pyplot as plt
from matplotlib.patches import Arrow
import networkx as nx
# from jinja2 import Environment, PackageLoader, select_autoescape
from dessia_common.core import DessiaObject
#from numpy import zeros
from scipy.optimize import minimize
import volmdlr as vm
from genmechanics.core import Part, Mechanism
from genmechanics.templates import babylon_template
class Parameter(DessiaObject):
def __init__(self, lower_bound, upper_bound, periodicity=None):
DessiaObject.__init__(self,
lower_bound=lower_bound,
upper_bound=upper_bound,
periodicity=periodicity)
def random_value(self):
return random.uniform(self.lower_bound, self.upper_bound)
def are_values_equal(self, value1, value2, tol=1e-3):
if self.periodicity is not None:
value1 = value1 % self.periodicity
value2 = value2 % self.periodicity
return math.isclose(value1, value2, abs_tol=tol)
def optimizer_bounds(self):
if self.periodicity is not None:
return (self.lower_bound-0.5*self.periodicity,
self.upper_bound+0.5*self.periodicity)
else:
return (self.lower_bound, self.upper_bound)
class Linkage(DessiaObject):
_eq_is_data_eq = False
_non_serializable_attributes = ['part1_position_function',
'part2_position_function',
'part1_basis_function',
'part2_basis_function']
def __init__(self,
part1, part1_position_function, part1_basis_function,
part2, part2_position_function, part2_basis_function,
positions_require_kinematic_parameters,
basis_require_kinematic_parameters,
kinematic_parameters,
name=''):
"""
"""
DessiaObject.__init__(self,
part1=part1,
part1_position_function=part1_position_function,
part1_basis_function=part1_basis_function,
part2=part2,
part2_position_function=part2_position_function,
part2_basis_function=part2_basis_function,
positions_require_kinematic_parameters=positions_require_kinematic_parameters,
basis_require_kinematic_parameters=basis_require_kinematic_parameters,
kinematic_parameters=kinematic_parameters,
number_kinematic_parameters=len(kinematic_parameters),
name=name)
def equivalence_hash(self):
h = 0
if hasattr(self, 'part1_position'):
h += hash(self.part1_position)
if hasattr(self, 'part2_position'):
h += hash(self.part2_position)
if hasattr(self, 'part1_basis'):
h += hash(self.part1_basis)
if hasattr(self, 'part2_basis'):
h += hash(self.part2_basis)
return h
def is_equivalent(self, other_linkage):
if self.__class__ != other_linkage.__class__:
return False
if hasattr(self, 'part1_position'):
if self.part1_position != other_linkage.part1_position:
return False
if hasattr(self, 'part2_position'):
if self.part2_position != other_linkage.part2_position:
return False
if hasattr(self, 'part1_basis'):
if self.part1_basis != other_linkage.part1_basis:
return False
if hasattr(self, 'part2_basis'):
if self.part2_basis != other_linkage.part2_basis:
return False
return True
def frame(self, linkage_parameters_values, side):
if side:
part1_frame = self.part1_basis_function(linkage_parameters_values)\
.to_frame(self.part1_position_function(linkage_parameters_values))
part2_frame = -self.part2_basis_function(linkage_parameters_values)\
.to_frame(-self.part2_position_function(linkage_parameters_values))
return part1_frame + part2_frame
else:
part1_frame = -self.part1_basis_function(linkage_parameters_values)\
.to_frame(-self.part1_position_function(linkage_parameters_values))
part2_frame = self.part2_basis_function(linkage_parameters_values)\
.to_frame(self.part2_position_function(linkage_parameters_values))
return part2_frame + part1_frame
def babylonjs(self, initial_linkage_parameters,
part1_parent=None, part2_parent=None):
part1_position = self.part1_position_function(initial_linkage_parameters)
part2_position = self.part2_position_function(initial_linkage_parameters)
s = ''
if part1_parent is not None:
s += 'var linkage_part1_mesh = BABYLON.MeshBuilder.CreateSphere("default_linkage part1", {diameter: 0.02}, scene);\n'
s += 'linkage_part1_mesh.position = new BABYLON.Vector3({}, {}, {});\n'.format(*part1_position.vector)
s += 'linkage_part1_mesh.parent = {};\n'.format(part1_parent)
if part2_parent:
s += 'var linkage_part2_mesh = BABYLON.MeshBuilder.CreateSphere("default_linkage part2", {diameter: 0.02}, scene);\n'
s += 'linkage_part2_mesh.position = new BABYLON.Vector3({}, {}, {});\n'.format(*part2_position.vector)
s += 'linkage_part2_mesh.parent = {};\n'.format(part2_parent)
return s
class RevoluteLinkage(Linkage):
holonomic = True
def __init__(self,
part1, part1_position, part1_basis,
part2, part2_position, part2_basis, name='RevoluteLinkage'):
"""
:param part2_basis: a basis defining orientation of linkage on part2
"""
def part1_basis_f(q):
return part1_basis.rotation(part1_basis.u, q[0], copy=True)
def part2_basis_f(q):
return part2_basis
DessiaObject.__init__(self,
part1_position=part1_position,
part2_position=part2_position,
part1_basis=part1_basis,
part2_basis=part2_basis)
Linkage.__init__(self,
part1, lambda q: part1_position, part1_basis_f,
part2, lambda q: part2_position, part2_basis_f,
False, True,
[Parameter(0., 2*math.pi, 2*math.pi)], name=name)
def babylonjs(self, initial_linkage_parameters,
part1_parent=None, part2_parent=None):
s = ''
if part1_parent is not None:
s += 'var path1 = [new BABYLON.Vector3({}, {}, {}), new BABYLON.Vector3({}, {}, {})];\n'.format(*(self.part1_position-0.03*self.part1_basis.u),
*(self.part1_position+0.03*self.part1_basis.u))
s += 'var linkage_part1_mesh = BABYLON.MeshBuilder.CreateTube("revolute part1", {path: path1, radius: 0.01, sideOrientation:BABYLON.Mesh.DOUBLESIDE}, scene);\n'
s += 'linkage_part1_mesh.enableEdgesRendering();\n'
s += 'linkage_part1_mesh.edgesWidth = 0.4;\n'
s += 'linkage_part1_mesh.edgesColor = new BABYLON.Color4(0, 0, 0, 1);\n'
s += 'linkage_part1_mesh.parent = {};\n'.format(part1_parent)
if part2_parent is not None:
s += 'var path2 = [new BABYLON.Vector3({}, {}, {}), new BABYLON.Vector3({}, {}, {})];\n'.format(*(self.part2_position-0.03*self.part2_basis.u),
*(self.part2_position+0.03*self.part2_basis.u))
s += 'var linkage_part2_mesh = BABYLON.MeshBuilder.CreateTube("revolute part2", {path: path2, radius: 0.015, sideOrientation:BABYLON.Mesh.DOUBLESIDE}, scene);\n'
s += 'linkage_part2_mesh.enableEdgesRendering();\n'
s += 'linkage_part2_mesh.edgesWidth = 0.4;\n'
s += 'linkage_part2_mesh.edgesColor = new BABYLON.Color4(0, 0, 0, 1);\n'
s += 'linkage_part2_mesh.parent = {};\n'.format(part2_parent)
return s
class SlidingRevoluteLinkage(Linkage):
holonomic = True
def __init__(self,
part1, part1_position, part1_basis,
part2, part2_position, part2_basis, name='SlidingRevoluteLinkage'):
"""
:param part2_basis: a basis defining orientation of linkage on part2
The first kineamtic parameter is the translation, the second the rotation
"""
def part1_position_f(q):
return part1_position + q[0]*part1_basis.u
def part2_position_f(q):
return part2_position
def part1_basis_f(q):
return part1_basis.Rotation(part1_basis.u, q[1], copy=True)
DessiaObject.__init__(self,
part1_position=part1_position,
part2_position=part2_position,
part1_basis=part1_basis,
part2_basis=part2_basis)
Linkage.__init__(self,
part1, part1_position_f, part1_basis_f,
part2, part2_position_f, lambda q: part2_basis,
True, True,
[Parameter(0., 2*math.pi, 2*math.pi),
Parameter(-1., 1., None)], name)
def babylonjs(self, initial_linkage_parameters,
part1_parent=None, part2_parent=None):
# part1_position = self.part1_position_function(initial_linkage_parameters)
# part1_basis = self.part1_position(initial_linkage_parameters)
# part2_position = self.part2_position_function(initial_linkage_parameters)
# part2_basis = self.part2_position(initial_linkage_parameters)
s = ''
if part1_parent is not None:
s += 'var path1 = [new BABYLON.Vector3({}, {}, {}), new BABYLON.Vector3({}, {}, {})];\n'.format(*(self.part1_position-0.1*self.part1_basis.u),
*(self.part1_position+0.1*self.part1_basis.u))
s += 'var linkage_part1_mesh = BABYLON.MeshBuilder.CreateTube("revolute part1", {path: path1, radius: 0.01, sideOrientation:BABYLON.Mesh.DOUBLESIDE}, scene);\n'
s += 'linkage_part1_mesh.enableEdgesRendering();\n'
s += 'linkage_part1_mesh.edgesWidth = 0.4;\n'
s += 'linkage_part1_mesh.edgesColor = new BABYLON.Color4(0, 0, 0, 1);\n'
s += 'linkage_part1_mesh.parent = {};\n'.format(part1_parent)
if part2_parent is not None:
s += 'var path2 = [new BABYLON.Vector3({}, {}, {}), new BABYLON.Vector3({}, {}, {})];\n'.format(*(self.part2_position-0.03*self.part2_basis.u),
*(self.part2_position+0.03*self.part2_basis.u))
s += 'var linkage_part2_mesh = BABYLON.MeshBuilder.CreateTube("revolute part2", {path: path2, radius: 0.015, sideOrientation:BABYLON.Mesh.DOUBLESIDE}, scene);\n'
s += 'linkage_part2_mesh.enableEdgesRendering();\n'
s += 'linkage_part2_mesh.edgesWidth = 0.4;\n'
s += 'linkage_part2_mesh.edgesColor = new BABYLON.Color4(0, 0, 0, 1);\n'
s += 'linkage_part2_mesh.parent = {};\n'.format(part2_parent)
return s
class PrismaticLinkage(Linkage):
holonomic = True
def __init__(self,
part1, part1_position, part1_basis,
part2, part2_position, part2_basis, name='PrismaticLinkage'):
"""
:param part2_basis: a basis defining orientation of linkage on part2
"""
def part1_position_f(q):
return part1_position + q[0]*part1_basis.u
def part2_position_f(q):
return part2_position
DessiaObject.__init__(self,
part1_position=part1_position,
part2_position=part2_position,
part1_basis=part1_basis,
part2_basis=part2_basis)
Linkage.__init__(self,
part1, part1_position_f, lambda q: part1_basis,
part2, part2_position_f, lambda q: part2_basis,
True, False,
[Parameter(-1, 1, None)], name)
def babylonjs(self, initial_linkage_parameters, part1_parent=None, part2_parent=None):
bp1 = self.part1_basis_function(initial_linkage_parameters)
bp2 = self.part2_basis_function(initial_linkage_parameters)
s = ''
if part1_parent is not None:
s += 'var linkage_part1_mesh = BABYLON.MeshBuilder.CreateBox("prismatic part1", {depth:0.015, height:0.015, width:0.25}, scene);\n'
s += 'linkage_part1_mesh.parent = {};\n'.format(part1_parent)
s += 'linkage_part1_mesh.position = new BABYLON.Vector3({}, {}, {});\n'.format(*self.part1_position_function(initial_linkage_parameters))
s += 'linkage_part1_mesh.rotation = BABYLON.Vector3.RotationFromAxis(new BABYLON.Vector3({}, {}, {}),new BABYLON.Vector3({}, {}, {}), new BABYLON.Vector3({}, {}, {}));\n'.format(*bp1.u, *bp1.v, *bp1.w)
s += 'linkage_part1_mesh.enableEdgesRendering();\n'
s += 'linkage_part1_mesh.edgesWidth = 0.3;\n'
s += 'linkage_part1_mesh.edgesColor = new BABYLON.Color4(0, 0, 0, 1);\n'
if part2_parent is not None:
s += 'var linkage_part2_mesh = BABYLON.MeshBuilder.CreateBox("prismatic part2", {depth:0.03, height:0.03, width:0.06}, scene);\n'
s += 'linkage_part2_mesh.parent = {};\n'.format(part2_parent)
s += 'linkage_part2_mesh.position = new BABYLON.Vector3({}, {}, {});\n'.format(*self.part2_position_function(initial_linkage_parameters))
s += 'linkage_part2_mesh.rotation = BABYLON.Vector3.RotationFromAxis(new BABYLON.Vector3({}, {}, {}),new BABYLON.Vector3({}, {}, {}), new BABYLON.Vector3({}, {}, {}));\n'.format(*bp2.u, *bp2.v, *bp2.w)
s += 'linkage_part2_mesh.enableEdgesRendering();\n'
s += 'linkage_part2_mesh.edgesWidth = 0.3;\n'
s += 'linkage_part2_mesh.edgesColor = new BABYLON.Color4(0, 0, 0, 1);\n'
return s
class LimitedBallLinkage(Linkage):
holonomic = True
def __init__(self,
part1, part1_position, part1_basis,
part2, part2_position, part2_basis,
name='LimitedBallLinkage'):
"""
Allowed movements are:
- a rotation around part1 basis u
- a rotation around part1 basis v
"""
def part1_basis_f(q):
return part1_basis.Rotation(part1_basis.u, q[0], copy=True)\
.Rotation(part1_basis.v, q[1], copy=True)
def part2_basis_f(q):
return part2_basis
DessiaObject.__init__(self,
part1_position=part1_position,
part2_position=part2_position,
part1_basis=part1_basis,
part2_basis=part2_basis)
Linkage.__init__(self,
part1, lambda q: part1_position, part1_basis_f,
part2, lambda q: part2_position, part2_basis_f,
False, True,
[Parameter(0., 2*math.pi, 2*math.pi),
Parameter(0., 2*math.pi, 2*math.pi)],
name)
class BallLinkage(Linkage):
holonomic = True
def __init__(self,
part1, part1_position, part1_basis,
part2, part2_position, part2_basis,
name='BallLinkage'):
"""
"""
DessiaObject.__init__(self,
part1_position=part1_position,
part2_position=part2_position,
part1_basis=part1_basis,
part2_basis=part2_basis)
part1_basis_f, part2_basis_f = self.basis_functions()
Linkage.__init__(self,
part1, lambda q: part1_position, part1_basis_f,
part2, lambda q: part2_position, part2_basis_f,
False, True,
[Parameter(0., 2*math.pi, 2*math.pi),
Parameter(0., 2*math.pi, 2*math.pi),
Parameter(0., 2*math.pi, 2*math.pi)], name)
def update_part1_point(self, new_position):
self.part1_position = new_position
self.part1_position_function = lambda q: new_position
def update_part2_point(self, new_position):
self.part2_position = new_position
self.part2_position_function = lambda q: new_position
def basis_functions(self):
def part1_basis_f(q):
return self.part1_basis.Rotation(self.part1_basis.u, q[0], copy=True)\
.Rotation(self.part1_basis.v, q[1], copy=True)\
.Rotation(self.part1_basis.w, q[2], copy=True)
def part2_basis_f(q):
return self.part2_basis
return part1_basis_f, part2_basis_f
class NoConfigurationFoundError(Exception):
pass
class MovingMechanism(Mechanism):
def __init__(self, linkages, ground, name=''):
Mechanism.__init__(self,
linkages,
ground,
{},
None,
None,
name=name)
# self.parts_setting_path = {}
self._settings_path = {}
# Settings
self.settings_graph()
n_kp = 0
self.kinematic_parameters_mapping = {}
for linkage in self.linkages_kinematic_setting:
for i in range(linkage.number_kinematic_parameters):
self.kinematic_parameters_mapping[linkage, i] = n_kp + i
n_kp += linkage.number_kinematic_parameters
def settings_graph(self):
graph = self.holonomic_graph.copy()
self.opened_linkages = []
graph_cycles = nx.cycle_basis(graph)
while len(graph_cycles) != 0:
# Deleting first cycle of graph
ground_distance = [(l, len(nx.shortest_path(graph, l, self.ground)))\
for l in graph_cycles[0]\
if l in self.linkages\
and not l in self.opened_linkages\
and not l.positions_require_kinematic_parameters
]
linkage_to_delete = max(ground_distance, key=lambda x:x[1])[0]
self.opened_linkages.append(linkage_to_delete)
graph.remove_node(linkage_to_delete)
graph_cycles = nx.cycle_basis(graph)
self.linkages_kinematic_setting = [l for l in self.linkages if l not in self.opened_linkages]
self.settings_graph = graph
def plot_settings_graph(self):
s="""<html>
<head>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/vis/4.20.0/vis.min.js"></script>
<link href="https://cdnjs.cloudflare.com/ajax/libs/vis/4.20.0/vis.min.css" rel="stylesheet" type="text/css" />
<style type="text/css">
#mynetwork {
border: 1px solid lightgray;
}
</style>
</head>
<body>
<div id="mynetwork"></div>
<script type="text/javascript">
var nodes = new vis.DataSet([\n"""
index={}
for ipart,part in enumerate(self.parts+[self.ground]):
index[part]=ipart
s+="{{id: {}, label: '{}'}},\n".format(ipart,part.name)
# s+=']);\n'
n=len(self.parts)+1
# index[self.ground]=n
# n+=1
for il,linkage in enumerate(self.linkages_kinematic_setting):
index[linkage] = n+il
s+="{{id: {}, label: '{}'}},\n".format(n+il,linkage.name)
s+=']);\n'
s+="var edges = new vis.DataSet(["
for linkage in self.linkages_kinematic_setting:
s+='{{from: {}, to: {}}},\n'.format(index[linkage],index[linkage.part1])
s+='{{from: {}, to: {}}},\n'.format(index[linkage],index[linkage.part2])
s+=']);'
s+="""
// create a network
var container = document.getElementById('mynetwork');
// provide the data in the vis format
var data = {
nodes: nodes,
edges: edges
};
var options = {};
// initialize your network!
var network = new vis.Network(container, data, options);
</script>
</body>
</html>"""
with open('gm_graph_viz.html','w') as file:
file.write(s)
webbrowser.open('file://' + os.path.realpath('gm_graph_viz.html'))
def settings_path(self, part1, part2):
if (part1, part2) in self._settings_path:
return self._settings_path[part1, part2]
elif (part2, part1) in self._settings_path:
path = [(p2, linkage, not linkage_side, p1) for (p1, linkage, linkage_side, p2) in self._settings_path[part2, part1][::-1]]
self._settings_path[part1, part2] = path
return self._settings_path[part1, part2]
else:
path = []
try:
raw_path = list(nx.shortest_path(self.settings_graph, part1, part2))
except nx.NetworkXNoPath:
self.plot_settings_graph()
raise nx.NetworkXError('No path between {} and {}'.format(part1.name, part2.name))
for path_part1, linkage, path_part2 in zip(raw_path[:-2:2], raw_path[1::2], raw_path[2::2]+[part2]):
path.append((path_part1, linkage, linkage.part1==path_part1, path_part2))
self._settings_path[part1, part2] = path
return path
def part_global_frame(self, part, kinematic_parameters_values):
frame = vm.OXYZ
for part1, linkage, linkage_side, part2 in self.settings_path(self.ground,
part):
linkage_parameters_values = self.extract_linkage_parameters_values(linkage,
kinematic_parameters_values)
linkage_frame = linkage.frame(linkage_parameters_values, side=linkage_side)
frame = frame + linkage_frame
return frame
def part_relative_frame(self, part, reference_part, kinematic_parameters_values):
frame = vm.OXYZ
for part1, linkage, linkage_side, part2 in self.settings_path(reference_part, part):
linkage_parameters_values = self.extract_linkage_parameters_values(linkage, kinematic_parameters_values)
linkage_frame = linkage.frame(linkage_parameters_values, side=linkage_side)
frame = frame + linkage_frame
return frame
def linkage_global_position(self, linkage, global_parameter_values):
if linkage.positions_require_kinematic_parameters:
ql = self.extract_linkage_parameters_values(linkage,
global_parameter_values)
else:
ql = []
part1_frame = self.part_global_frame(linkage.part1, global_parameter_values)
return part1_frame.old_coordinates(linkage.part1_position_function(ql))
def extract_linkage_parameters_values(self, linkage, global_parameter_values):
linkage_parameters = [global_parameter_values[self.kinematic_parameters_mapping[linkage, i]]\
for i in range(linkage.number_kinematic_parameters)]
return linkage_parameters
def global_to_linkages_parameter_values(self, global_parameter_values):
linkages_parameter_values = {}
for linkage in self.linkages_kinematic_setting:
linkages_parameter_values[linkage] = self.extract_linkage_parameters_values(linkage, global_parameter_values)
return linkages_parameter_values
def opened_linkage_gap(self, linkage, global_parameter_values):
if linkage.positions_require_kinematic_parameters:
ql = self.extract_linkage_parameters_values(linkage, global_parameter_values)
else:
ql = []
position1 = self.part_global_frame(linkage.part1, global_parameter_values).old_coordinates(linkage.part1_position_function(ql))
position2 = self.part_global_frame(linkage.part2, global_parameter_values).old_coordinates(linkage.part2_position_function(ql))
return position2 - position1
def opened_linkage_misalignment(self, linkage, global_parameter_values):
ql = self.extract_linkage_parameters_values(linkage, global_parameter_values)
basis1 = self.part_global_frame(linkage.part1, global_parameter_values).Basis()
basis2 = self.part_global_frame(linkage.part2, global_parameter_values).Basis()
basis = basis2 - basis1 - linkage.basis(ql)
return basis
def opened_linkages_residue(self, q):
residue = 0.
for linkage in self.opened_linkages:
residue += self.opened_linkage_gap(linkage, q).norm()
return residue
def reduced_x_to_full_x(self, xr, basis_vector, free_parameters_dofs):
x = basis_vector[:]
for qrv, i in zip(xr, free_parameters_dofs):
x[i] = qrv
return x
def full_x_to_reduced_x(self, x, free_parameters_dofs):
return [x[i] for i in free_parameters_dofs]
def geometric_closing_residue_function(self, basis_vector,
free_parameters_dofs):
def residue_function(xr):
x = self.reduced_x_to_full_x(xr, basis_vector, free_parameters_dofs)
return self.opened_linkages_residue(x)
return residue_function
def _optimization_settings(self, imposed_parameters):
# Free parameter identification
free_parameters_dofs = []
free_parameters = []
n_free_parameters = 0
n_parameters = len(self.kinematic_parameters_mapping.items())
basis_vector = [0] * n_parameters
for i in range(n_parameters):
if i in imposed_parameters:
basis_vector[i] = imposed_parameters[i]
else:
free_parameters_dofs.append(i)
n_free_parameters += 1
bounds = []
for (linkage, iparameter), idof in self.kinematic_parameters_mapping.items():
if idof in free_parameters_dofs:
parameter = linkage.kinematic_parameters[iparameter]
free_parameters.append(parameter)
bounds.append(parameter.optimizer_bounds())
bounds_cma = [[], []]
for bmin, bmax in bounds:
bounds_cma[0].append(bmin)
bounds_cma[1].append(bmax)
return basis_vector, free_parameters_dofs, free_parameters, n_free_parameters, bounds, bounds_cma
def find_configurations(self, imposed_parameters,
number_max_configurations,
number_starts=10, tol=1e-5,
starting_point=None):
# initial_imposed_parameters = {k: v[0] for k,v in steps_imposed_parameters.items()}
basis_vector, free_parameters_dofs, free_parameters, n_free_parameters, bounds, bounds_cma\
= self._optimization_settings(imposed_parameters)
geometric_closing_residue = self.geometric_closing_residue_function(basis_vector,
free_parameters_dofs)
# Starting n times
starting_points = []
for istart in range(number_starts):
if starting_point is None:
xr0 = [0]*n_free_parameters
for i, parameter in enumerate(free_parameters):
xr0[i] = parameter.random_value()
else:
xr0 = [starting_point[i] for i in free_parameters_dofs]
# result = minimize(geometric_closing_residue, xr0, bounds=bounds,
# tol=0.1*tol)
# fopt = result.fun
# if fopt < tol:
# xr_opt = result.x
# else:
xr_opt, fopt = cma.fmin(geometric_closing_residue, xr0, 0.2,
options={'bounds':bounds_cma,
'ftarget': tol,
'verbose': -9,
'maxiter': 2000})[0:2]
if fopt <= tol:
found_x = False
for x in starting_points:
equal = True
for parameter, xi1, xi2 in zip(free_parameters, x, xr_opt):
if not parameter.are_values_equal(xi1, xi2):
equal = False
break
if equal:
found_x = True
if not found_x:
xopt = self.reduced_x_to_full_x(xr_opt, basis_vector, free_parameters_dofs)
starting_points.append(xopt[:])
yield xopt
if len(starting_points) >= number_max_configurations:
break
print('Found {} configurations'.format(len(starting_points)))
raise NoConfigurationFoundError
def solve_from_initial_configuration(self, initial_parameter_values,
steps_imposed_parameters,
number_step_retries=5,
max_failed_steps=3,
tol=1e-4):
"""
returns a MechanismConfigurations object. The initial point deduced from initial_parameter_values
is the first step of the MechanismConfigurations object.
"""
x0 = initial_parameter_values
step_imposed_parameters = {k: v[0] for k, v in steps_imposed_parameters.items()}
basis_vector, free_parameters_dofs, free_parameters, n_free_parameters, bounds, bounds_cma\
= self._optimization_settings(step_imposed_parameters)
xr0 = self.full_x_to_reduced_x(x0, free_parameters_dofs)
n_steps = len(list(steps_imposed_parameters.values())[0])
linkage_steps_parameters = [self.global_to_linkages_parameter_values(x0)]
number_failed_steps = 0
failed_step = False
for istep in range(n_steps):
step_imposed_parameters = {k: v[istep] for k, v in steps_imposed_parameters.items()}
# basis vector needs update at each time step!
basis_vector, free_parameters_dofs, free_parameters, n_free_parameters, bounds, bounds_cma\
= self._optimization_settings(step_imposed_parameters)
geometric_closing_residue = self.geometric_closing_residue_function(basis_vector,
free_parameters_dofs)
if n_free_parameters > 0:
step_converged = False
n_tries_step = 1
while (not step_converged) and (n_tries_step<= number_step_retries):
result = minimize(geometric_closing_residue,
npy.array(xr0)+0.01*(npy.random.random(n_free_parameters)-0.5),
tol=0.1*tol, bounds=bounds)
xr_opt = result.x
fopt = result.fun
if fopt > tol:
xr_opt, fopt = cma.fmin(geometric_closing_residue, xr0, 0.1,
options={'bounds':bounds_cma,
# 'tolfun':0.5*tol,
'verbose':-9,
'ftarget': tol,
'maxiter': 500})[0:2]
n_tries_step += 1
step_converged = fopt < tol
if step_converged:
xr0 = xr_opt[:]
x = self.reduced_x_to_full_x(xr_opt, basis_vector, free_parameters_dofs)
# qs.append(x[:])
linkage_steps_parameters.append(self.global_to_linkages_parameter_values(x))
else:
print('@istep {}: residue: {}'.format(istep, fopt))
number_failed_steps += 1
if number_failed_steps >= max_failed_steps:
print('Failed {} steps, stopping configuration computation'.format(max_failed_steps))
failed_step = True
break
else:
linkage_steps_parameters.append(self.global_to_linkages_parameter_values(basis_vector))
# qs.append(basis_vector)
if not failed_step:
return MechanismConfigurations(self,
steps_imposed_parameters,
linkage_steps_parameters)
# def solve_configurations(steps_imposed_parameters,
# number_max_configurations)
#
# for configuration in self.find_initial_configurations(steps_imposed_parameters,
# number_max_configurations,
# number_starts=10, tol=1e-5):
#
# yield self.solve_from_initial_configuration(self, initial_parameter_values,
# steps_imposed_parameters,
# number_step_retries=5,
# max_failed_steps=3,
# tol=1e-4)
def istep_from_value_on_list(list_, value):
for ipoint, (point1, point2) in enumerate(zip(list_[:-1],
list_[1:])):
interval = sorted((point1, point2))
if (interval[0] <= value) and (value <= interval[1]):
alpha = (value-point1)/(point2-point1)
if alpha < 0 or alpha > 1:
raise ValueError
return ipoint + alpha
values = [p for p in list_]
min_values = min(values)
max_values = max(values)
raise ValueError('Specified value not found in list_: {} not in [{}, {}]'.format(value, min_values, max_values))
def istep_from_value_on_trajectory(trajectory, value, axis):
for ipoint, (point1, point2) in enumerate(zip(trajectory[:-1],
trajectory[1:])):
interval = sorted((point1[axis], point2[axis]))
if (interval[0] <= value) and (value <= interval[1]):
alpha = (value-point1[axis])/(point2[axis]-point1[axis])
if alpha < 0 or alpha > 1:
raise ValueError
return ipoint + alpha
values = [p[axis] for p in trajectory]
min_values = min(values)
max_values = max(values)
raise ValueError('Specified value not found in trajectory: {} not in [{}, {}]'.format(value, min_values, max_values))
def point_from_istep_on_trajectory(trajectory, istep):
istep1 = int(istep)
if istep1 == istep:
# No interpolation needed
return trajectory[int(istep)]
else:
alpha = istep - istep1
point1 = trajectory[istep1]
point2 = trajectory[istep1+1]
return (1-alpha)*point1+(alpha)*point2
def trajectory_point_from_value(trajectory, value, axis):
for ipoint, (point1, point2) in enumerate(zip(trajectory[:-1],
trajectory[1:])):
interval = sorted((point1[axis], point2[axis]))
if (interval[0] <= value) and (value < interval[1]):
alpha = (value - point1[axis])/(point2[axis] - point1[axis])
return (1-alpha)*point1 + alpha*point2
return None
def trajectory_derivative(trajectory, istep, delta_istep):
istep1 = istep-0.5*delta_istep
istep2 = istep+0.5*delta_istep
if istep1 < 0:
istep1 = 0
istep2 = delta_istep
if istep2 > len(trajectory)-1:
istep2 = len(trajectory)-1
istep1 = istep2 - delta_istep
if istep1 < 0:
raise ValueError('Delta istep is too large!')
point1 = point_from_istep_on_trajectory(trajectory, istep1)
point2 = point_from_istep_on_trajectory(trajectory, istep2)
return (point2-point1)
class MechanismConfigurations(DessiaObject):
def __init__(self,
mechanism,
steps_imposed_parameters,
linkage_steps_parameters):
# number_steps = len(linkage_steps_parameters)
# Deducing steps from mechanism
self.steps = []
for linkage_param in linkage_steps_parameters:
step = [0]*len(mechanism.linkages_kinematic_setting)
for (linkage, linkage_dof_number), global_dof_number in mechanism.kinematic_parameters_mapping.items():
step[global_dof_number] = linkage_param[linkage][linkage_dof_number]
self.steps.append(step)
number_steps = len(self.steps)
DessiaObject.__init__(self,
mechanism=mechanism,
steps_imposed_parameters=steps_imposed_parameters,
linkage_steps_parameters=linkage_steps_parameters,
number_steps=number_steps)
if not self.is_valid():
raise ValueError
self.trajectories = {}
def is_valid(self):
return True
def opened_linkages_residue(self):
residues = []
for step in self.steps:
residues.append(self.mechanism.opened_linkages_residue(step))
return residues
def interpolate_step(self, istep):
"""
:param istep: can be a float
"""
istep1 = int(istep)
alpha = istep - istep1
if alpha == 0.:
return self.steps[istep1]
return [(1-alpha)*s1+alpha*s2 for s1, s2 in zip(self.steps[istep1],
self.steps[istep1+1])]
def plot_kinematic_parameters(self,
linkage1, kinematic_parameter1,
linkage2, kinematic_parameter2
):
x = []
y = []
# dof1 = self.mechanism.kinematic_parameters_mapping[linkage1, kinematic_parameter1]
# dof2 = self.mechanism.kinematic_parameters_mapping[linkage2, kinematic_parameter2]
for step in self.linkage_steps_parameters:
x.append(step[linkage1][kinematic_parameter1])
y.append(step[linkage2][kinematic_parameter2])
fig, ax = plt.subplots()
ax.plot(x, y, marker='o')
ax.set_xlabel('Parameter {} of linkage {}'.format(kinematic_parameter1+1, linkage1.name))
ax.set_ylabel('Parameter {} of linkage {}'.format(kinematic_parameter2+1, linkage2.name))
ax.grid()
return fig, ax
def trajectory(self, point, part, reference_part):
if (point, part, reference_part) in self.trajectories:
return self.trajectories[point, part, reference_part]
trajectory = []
for step in self.steps:
frame1 = self.mechanism.part_global_frame(part, step)
frame2 = self.mechanism.part_global_frame(reference_part, step)
frame = frame1 - frame2
trajectory.append(frame.OldCoordinates(point))
self.trajectories[point, part, reference_part] = trajectory
return trajectory
def plot2D_trajectory(self, point, part, reference_part,
x=vm.X3D, y=vm.Y3D, equal_aspect=True):
xt = []
yt = []
for traj_point in self.trajectory(point, part, reference_part):
xp, yp = traj_point.PlaneProjection2D(x, y)
xt.append(xp)
yt.append(yp)
fig, ax = plt.subplots()
ax.plot(xt, yt, marker='o')
ax.grid()
ax.set_xlabel(str(x))
ax.set_ylabel(str(y))
ax.set_title('Trajectory of point {} on part {} relatively to part {}'.format(str(point), part.name, reference_part.name))
if equal_aspect:
ax.set_aspect('equal')
return fig, ax
def plot_trajectory(self, point, part, reference_part, equal_aspect=True):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xt = []
yt = []
zt = []
for point in self.trajectory(point, part, reference_part):
xp, yp, zp = point
xt.append(xp)
yt.append(yp)
zt.append(zp)
# fig, ax = plt.subplots()
ax.plot(xt, yt, zt, marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title('Trajectory of point {} on part {} relatively to part {}'.format(str(point), part.name, reference_part.name))
if equal_aspect:
ax.set_aspect('equal')
# fig.canvas.set_window_title('Trajectory')
return fig, ax
def part_local_point_global_speed(self, part, point, istep):
"""
"""
if (istep < 0) or (istep > self.number_steps-1):
raise ValueError('istep {} outside of bounds max:{}'.format(istep,
self.number_steps-1))
elif istep < 0.5:
# Backward extrapolation from speeds 1 and 2
frame1 = self.mechanism.part_global_frame(part, self.steps[0])
frame2 = self.mechanism.part_global_frame(part, self.steps[1])
frame3 = self.mechanism.part_global_frame(part, self.steps[2])
p1 = frame1.old_coordinates(point)
p2 = frame2.old_coordinates(point)
p3 = frame3.old_coordinates(point)
v1 = p2 - p1
v2 = p3 - p2
alpha = istep - 0.5
return (1-alpha)*v1 + alpha*v2
elif istep > self.number_steps-1.5:
# forward extrapolation from speeds n-1 and n
i1 = int(istep-0.5)
frame1 = self.mechanism.part_global_frame(part, self.steps[-3])
frame2 = self.mechanism.part_global_frame(part, self.steps[-2])
frame3 = self.mechanism.part_global_frame(part, self.steps[-1])
p1 = frame1.old_coordinates(point)
p2 = frame2.old_coordinates(point)
p3 = frame3.old_coordinates(point)
v1 = p2 - p1
v2 = p3 - p2
alpha = istep - (self.number_steps - 2.5)
return (1-alpha)*v1 + alpha*v2
else:
int_istep = int(istep)
if int_istep+0.5 == istep:
# Using exact derivative
frame1 = self.mechanism.part_global_frame(part, self.steps[int_istep])
frame2 = self.mechanism.part_global_frame(part, self.steps[int_istep+1])
p1 = frame1.old_coordinates(point)
p2 = frame2.old_coordinates(point)
return p2 - p1
else:
# interpolation in between
i1 = int(istep-0.5)
frame1 = self.mechanism.part_global_frame(part, self.steps[i1])
frame2 = self.mechanism.part_global_frame(part, self.steps[i1+1])
frame3 = self.mechanism.part_global_frame(part, self.steps[i1+2])
p1 = frame1.old_coordinates(point)
p2 = frame2.old_coordinates(point)
p3 = frame3.old_coordinates(point)
v1 = p2 - p1
v2 = p3 - p2
alpha = istep - i1 - 0.5
return (1-alpha)*v1 + alpha*v2
def part_global_rotation_vector(self, part, istep):
step = self.interpolate_step(istep)
frame = self.mechanism.part_global_frame(part, step)
point1 = vm.O3D
point1_speed = self.part_local_point_global_speed(part, point1, istep)
for point2 in [vm.X3D, vm.Y3D, vm.Z3D]:
point2_speed = self.part_local_point_global_speed(part, point2, istep)
delta_speeds = point2_speed - point1_speed
if not math.isclose(delta_speeds.norm(), 0, abs_tol=1e-8):
break
p21 = frame.old_coordinates(point2) - frame.old_coordinates(point1)
R = delta_speeds.cross(p21)#/d_p21_2
return R
def part_instant_rotation_global_axis_point(self, part, istep):
w = self.part_global_rotation_vector(part, istep)
w2 = w.dot(w)
if math.isclose(w2, 0, abs_tol=1e-8):
return None
step = self.interpolate_step(istep)
frame = self.mechanism.part_global_frame(part, step)
for point in [vm.O3D, 0.1*vm.X3D, 0.1*vm.Y3D, 0.1*vm.Z3D]:
vp = self.part_local_point_global_speed(part, point, istep)
if not math.isclose(vp.norm(), 0, abs_tol=1e-6):
return frame.old_coordinates(point) - w.cross(vp)/w2
raise ValueError
def plot2D(self, x=vm.X3D, y=vm.Y3D, isteps=None, plot_frames=False,
plot_rotation_axis=False):
fig, ax = plt.subplots()
# Linkage colors
np = len(self.mechanism.parts)
colors = {p: hsv_to_rgb((ip / np, 0.78, 0.87)) for ip, p in enumerate(self.mechanism.parts)}
colors[self.mechanism.ground] = (0,0,0)
# i: to_hex(
# ) for i in range(nlines)}
if isteps == None:
steps = self.steps[:]
else:
steps = [self.steps[i] for i in isteps]
# # Fetching wireframes lines
# wireframes = {}
# for part in self.mechanism.parts:
# # Fetching local points
# part_points = []
# for linkage in self.mechanism.part_linkages:
for istep, step in enumerate(steps):
linkage_positions = {}
part_frames = {}
for linkage in self.mechanism.linkages:
# flp1.origin.PlaneProjection2D(x, y).MPLPlot(ax=ax)
if linkage.positions_require_kinematic_parameters:
ql = self.mechanism.extract_linkage_parameters_values(linkage,
step)
else:
ql = []
part1_frame = self.mechanism.part_global_frame(linkage.part1,
step)
part_frames[linkage.part1] = part1_frame
#
linkage_position1 = part1_frame.old_coordinates(linkage.part1_position_function(ql))
linkage_position1_2D = linkage_position1.PlaneProjection2D(x, y)
part2_frame = self.mechanism.part_global_frame(linkage.part2,
step)
part_frames[linkage.part2] = part2_frame
#
linkage_position2 = part2_frame.old_coordinates(linkage.part2_position_function(ql))
linkage_position2_2D = linkage_position1.PlaneProjection2D(x, y)
if linkage_position1 != linkage_position2:
ax.text(*linkage_position1_2D, linkage.name+' position1')
ax.text(*linkage_position2_2D, linkage.name+' position2')
error = linkage_position2_2D - linkage_position1_2D
ax.add_patch(Arrow(*linkage_position1_2D,
*error, 0.05))
else:
if istep == 0:
ax.text(*linkage_position1_2D, linkage.name)
linkage_positions[linkage, linkage.part1] = linkage_position1
linkage_positions[linkage, linkage.part2] = linkage_position2
part_linkages = self.mechanism.part_linkages()
del part_linkages[self.mechanism.ground]
for ipart, (part, linkages) in enumerate(part_linkages.items()):
# middle_point = vm.o2D
# for linkage in linkages:
# middle_point += linkage_positions[linkage, part]
# for point in part.interest_points:
# middle_point += point
# middle_point /= (len(linkages) + len(part.interest_points))
# xm, ym = middle_point.vector
points = []
for linkage in linkages:
points.append(linkage_positions[linkage, part])
points.extend([part_frames[part].old_coordinates(p) for p in part.interest_points])
xm, ym = vm.Point3D.mean_point(points).PlaneProjection2D(x, y).vector
if istep == 0:
ax.text(xm, ym, part.name + ' step 0',
ha="center", va="center",
bbox=dict(boxstyle="square",
ec=colors[part],
fc=(1., 1, 1),
))
else:
if ipart == 0:
ax.text(xm, ym, 'step {}'.format(istep),
ha="center", va="center",
bbox=dict(boxstyle="square",
ec=colors[part],
fc=(1., 1, 1),
))
# for linkage in linkages:
# x1, y1 = linkage_positions[linkage, part]
# ax.plot([x1, xm], [y1, ym], color=colors[part])
for line in Part.wireframe_lines(points):
line.MPLPlot2D(x, y, ax, color=colors[part], width=5)
part_frame = self.mechanism.part_global_frame(part, step)
for point in part.interest_points:
x1, y1 = part_frame.old_coordinates(point).plane_projection2d(x, y)
ax.plot([x1, xm], [y1, ym], color=colors[part])
if plot_frames:
part_frame = self.mechanism.part_global_frame(part, step)
part_frame.plot2d(x=x, y=y, ax=ax)
if plot_rotation_axis:
axis = self.part_global_rotation_vector(part, istep)
point = self.part_instant_rotation_global_axis_point(part, istep)
if point is not None:
axis.normalize()
line = vm.edges.Line3D(point-axis, point+axis)
line.plane_projection2d(x, y).MPLPlot(ax=ax, color=colors[part], dashed=True)
ax.set_aspect('equal')
ax.set_xlabel(str(x))
ax.set_ylabel(str(y))
ax.margins(.1)
def babylonjs(self, page='gm_babylonjs', plot_frames=False,
plot_trajectories=True, plot_instant_rotation_axis=False,
use_cdn=False):
page+='.html'
np = len(self.mechanism.parts)
colors = {p: hsv_to_rgb((ip / np, 0.78, 0.87)) for ip, p in enumerate(self.mechanism.parts)}
part_points = {p: [] for p in self.mechanism.parts}
part_points[self.mechanism.ground] = []
# part_frames = {}
for part, linkages in self.mechanism.part_linkages().items():
for linkage in linkages:
if linkage.positions_require_kinematic_parameters:
ql = self.linkage_steps_parameters[0][linkage]
else:
ql = []
if part == linkage.part1:
linkage_position = linkage.part1_position_function(ql)
else:
linkage_position = linkage.part2_position_function(ql)
part_points[part].append(linkage_position)
for point in part.interest_points:
part_points[part].append(point)
meshes_string = 'var parts_parent = [];\n'
for part in self.mechanism.parts:
meshes_string += 'var part_children = [];\n'
lines = part.wireframe_lines(part_points[part])
meshes_string += lines[0].babylon_script(name='part_parent', color=colors[part])
meshes_string += 'parts_parent.push(part_parent);\n'
for l in lines[1:]:
meshes_string += l.Babylon(color=colors[part], parent='part_parent')
# meshes_string += 'part_meshes.push(line);\n'
# # Adding interest points
# for point in part.interest_points:
# meshes_string += 'var point = BABYLON.MeshBuilder.CreateSphere("interest_point", {diameter: 0.01}, scene);\n'
# meshes_string += 'point.position = new BABYLON.Vector3({}, {}, {});'.format(*point.vector)
# meshes_string += 'part_meshes.push(point);'
if plot_frames:
meshes_string += vm.OXYZ.babylonjs(parent='part_parent', size=0.1)
# if plot_instant_rotation_axis:
# rotation_axis = self.par
if plot_instant_rotation_axis:
for part in self.mechanism.parts:
line = vm.edges.LineSegment3D(-0.5*vm.X3D, 0.5*vm.X3D)
meshes_string += line.babylon_script(name='rotation_axis', color=colors[part], type_='dashed')
meshes_string += 'parts_parent.push(rotation_axis);\n'
linkages_string = ''
for linkage in self.mechanism.linkages:
if linkage not in self.mechanism.opened_linkages:
ql = self.linkage_steps_parameters[0][linkage]
else:
ql = []
if linkage.part1 in self.mechanism.parts:
part1_parent = 'parts_parent[{}]'.format(self.mechanism.parts.index(linkage.part1))
else:
part1_parent = None
if linkage.part2 in self.mechanism.parts:
part2_parent = 'parts_parent[{}]'.format(self.mechanism.parts.index(linkage.part2))
else:
part2_parent = None
linkages_string += linkage.babylonjs(ql,
part1_parent=part1_parent,
part2_parent=part2_parent)
# Computing positions and orientations
positions = []
orientations = []
linkage_positions = []
# n_steps = len(self.steps)
for istep, step in enumerate(self.steps):
step_positions = []
step_orientations = []
step_linkage_positions = []
# step_linkage_positions = []
for part in self.mechanism.parts:
frame = round(self.mechanism.part_global_frame(part,
step))
step_positions.append(list(frame.origin))
step_orientations.append([list(frame.u),
list(frame.v),
list(frame.w)])
if plot_instant_rotation_axis:
for part in self.mechanism.parts:
axis_point = self.part_instant_rotation_global_axis_point(part,
istep)
if axis_point is None:
u = vm.X3D.copy()
v = vm.Y3D.copy()
w = vm.Z3D.copy()
axis_point = vm.Point3D(100, 100, 100)
else:
u = self.part_global_rotation_vector(part, istep)
u.normalize()
v = u.random_unit_normal_vector()
w = u.cross(v)
step_positions.append(list(axis_point))
step_orientations.append([list(u),
list(v),
list(w)])
for linkage in self.mechanism.linkages:
step_linkage_positions.append(list(self.mechanism.linkage_global_position(linkage, step)))
positions.append(step_positions)
orientations.append(step_orientations)
linkage_positions.append(step_linkage_positions)
trajectories = []
if plot_trajectories:
for trajectory in self.trajectories.values():
trajectories.append([list(p) for p in trajectory])
script = babylon_template.substitute(center=(0, 0, 0),
name=self.name,
length=2*0.5,
meshes_string=meshes_string,
linkages_string=linkages_string,
positions=positions,
orientations=orientations,
linkage_positions=linkage_positions,
trajectories=trajectories)
with open(page,'w') as file:
file.write(script)
webbrowser.open('file://' + os.path.realpath(page)) | gpl-3.0 |
PLStenger/plstenger.github.io | markdown_generator/talks.py | 199 | 4000 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit |
rex-xxx/mt6572_x201 | cts/suite/audio_quality/test_description/processing/check_spectrum.py | 5 | 5840 | #!/usr/bin/python
# Copyright (C) 2012 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from consts import *
import numpy as np
import scipy as sp
import scipy.fftpack as fft
import matplotlib.pyplot as plt
import sys
sys.path.append(sys.path[0])
import calc_delay
# check if amplitude ratio of DUT / Host signal
# lies in the given error boundary
# input: host record
# device record,
# sampling rate
# low frequency in Hz,
# high frequency in Hz,
# allowed error in negative side for pass in %,
# allowed error ih positive side for pass
# output: min value in negative side, normalized to 1.0
# max value in positive side
# calculated amplittude ratio in magnitude (DUT / Host)
def do_check_spectrum(hostData, DUTData, samplingRate, fLow, fHigh, margainLow, margainHigh):
# reduce FFT resolution to have averaging effects
N = 512 if (len(hostData) > 512) else len(hostData)
iLow = N * fLow / samplingRate + 1 # 1 for DC
if iLow > (N / 2 - 1):
iLow = (N / 2 - 1)
iHigh = N * fHigh / samplingRate + 1 # 1 for DC
if iHigh > (N / 2 + 1):
iHigh = N / 2 + 1
print fLow, iLow, fHigh, iHigh, samplingRate
Phh, freqs = plt.psd(hostData, NFFT=N, Fs=samplingRate, Fc=0, detrend=plt.mlab.detrend_none,\
window=plt.mlab.window_hanning, noverlap=0, pad_to=None, sides='onesided',\
scale_by_freq=False)
Pdd, freqs = plt.psd(DUTData, NFFT=N, Fs=samplingRate, Fc=0, detrend=plt.mlab.detrend_none,\
window=plt.mlab.window_hanning, noverlap=0, pad_to=None, sides='onesided',\
scale_by_freq=False)
print len(Phh), len(Pdd)
print "Phh", abs(Phh[iLow:iHigh])
print "Pdd", abs(Pdd[iLow:iHigh])
amplitudeRatio = np.sqrt(abs(Pdd[iLow:iHigh]/Phh[iLow:iHigh]))
ratioMean = np.mean(amplitudeRatio)
amplitudeRatio = amplitudeRatio / ratioMean
print "Normialized ratio", amplitudeRatio
print "ratio mean for normalization", ratioMean
positiveMax = abs(max(amplitudeRatio))
negativeMin = abs(min(amplitudeRatio))
passFail = True if (positiveMax < (margainHigh / 100.0 + 1.0)) and\
((1.0 - negativeMin) < margainLow / 100.0) else False
RatioResult = np.zeros(len(amplitudeRatio), dtype=np.int16)
for i in range(len(amplitudeRatio)):
RatioResult[i] = amplitudeRatio[i] * 1024 # make fixed point
print "positiveMax", positiveMax, "negativeMin", negativeMin
return (passFail, negativeMin, positiveMax, RatioResult)
def toMono(stereoData):
n = len(stereoData)/2
monoData = np.zeros(n)
for i in range(n):
monoData[i] = stereoData[2 * i]
return monoData
def check_spectrum(inputData, inputTypes):
output = []
outputData = []
outputTypes = []
# basic sanity check
inputError = False
if (inputTypes[0] != TYPE_MONO) and (inputTypes[0] != TYPE_STEREO):
inputError = True
if (inputTypes[1] != TYPE_MONO) and (inputTypes[1] != TYPE_STEREO):
inputError = True
if (inputTypes[2] != TYPE_I64):
inputError = True
if (inputTypes[3] != TYPE_I64):
inputError = True
if (inputTypes[4] != TYPE_I64):
inputError = True
if (inputTypes[5] != TYPE_DOUBLE):
inputError = True
if (inputTypes[6] != TYPE_DOUBLE):
inputError = True
if inputError:
print "input error"
output.append(RESULT_ERROR)
output.append(outputData)
output.append(outputTypes)
return output
hostData = inputData[0]
if inputTypes[0] == TYPE_STEREO:
hostData = toMono(hostData)
dutData = inputData[1]
if inputTypes[1] == TYPE_STEREO:
dutData = toMono(dutData)
samplingRate = inputData[2]
fLow = inputData[3]
fHigh = inputData[4]
margainLow = inputData[5]
margainHigh = inputData[6]
delay = 0
N = 0
hostData_ = hostData
dutData_ = dutData
if len(hostData) > len(dutData):
delay = calc_delay.calc_delay(hostData, dutData)
N = len(dutData)
hostData_ = hostData[delay:delay+N]
if len(hostData) < len(dutData):
delay = calc_delay.calc_delay(dutData, hostData)
N = len(hostData)
dutData_ = dutData[delay:delay+N]
print "delay ", delay, "deviceRecording samples ", N
(passFail, minError, maxError, TF) = do_check_spectrum(hostData_, dutData_,\
samplingRate, fLow, fHigh, margainLow, margainHigh)
if passFail:
output.append(RESULT_PASS)
else:
output.append(RESULT_OK)
outputData.append(minError)
outputTypes.append(TYPE_DOUBLE)
outputData.append(maxError)
outputTypes.append(TYPE_DOUBLE)
outputData.append(TF)
outputTypes.append(TYPE_MONO)
output.append(outputData)
output.append(outputTypes)
return output
# test code
if __name__=="__main__":
sys.path.append(sys.path[0])
mod = __import__("gen_random")
peakAmpl = 10000
durationInMSec = 1000
samplingRate = 44100
fLow = 500
fHigh = 15000
data = getattr(mod, "do_gen_random")(peakAmpl, durationInMSec, samplingRate, fHigh,\
stereo=False)
print len(data)
(passFail, minVal, maxVal, ampRatio) = do_check_spectrum(data, data, samplingRate, fLow, fHigh,\
1.0, 1.0)
plt.plot(ampRatio)
plt.show()
| gpl-2.0 |
shikhardb/scikit-learn | doc/sphinxext/gen_rst.py | 142 | 40026 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
SheffieldML/TVB | plot_classification_comparison.py | 1 | 4006 | # Copyright (c) 2014, James Hensman, Max Zwiessele
# Distributed under the terms of the GNU General public License, see LICENSE.txt
import numpy as np
import matplotlib
#matplotlib.use('pdf')
import pylab as pb
#pb.ion(); pb.close('all')
import os
from scipy import stats
dirname = 'raw_results_classification2'
fnames = [e for e in os.listdir('raw_results_classification2') if e[-11:]=='raw_results']
data = [np.loadtxt(os.path.join(dirname,fn)) for fn in fnames]
means = np.vstack([stats.nanmean(e, 0) for e in data])
stds = np.vstack([stats.nanstd(e, 0) for e in data])
x = np.arange(len(data))
width=0.35
def do_plots(i1, i2, lab1="", lab2=""):
pb.figure(figsize=(10,4))
error_kw = {'elinewidth':1.2, 'ecolor':'k', 'capsize':5, 'mew':1.2}
pb.bar(x, means[:,i1], yerr=2*stds[:,i1], width=width, color='b', label=lab1, error_kw=error_kw)
pb.bar(x+width, means[:,i2], yerr=2*stds[:,i2], color='r', width=width, label=lab2, error_kw=error_kw)
pb.xticks(x+width,[fn.split('raw')[0] for fn in fnames], rotation=45)
for xx, m, s in zip(x, means[:,i1], 2*stds[:,i1]):
pb.text(xx+0.5*width, 1.0*(m+s), '%.3f'%m,ha='center', va='bottom', fontsize='small')
for xx, m, s in zip(x+width, means[:,i2], 2*stds[:,i2]):
pb.text(xx+0.5*width, 1.0*(m+s), '%.3f'%m,ha='center', va='bottom', fontsize='small')
#pb.legend(loc=0)
pb.ylim(0,1.05*np.max(means[:,[1,5]].flatten() + 2*stds[:,[1,5]].flatten()))
pb.ylabel(r'$-\log\, p(y_\star)$')
pb.subplots_adjust(bottom=0.2)
pb.legend(loc=0)
#do_plots(1,5, "TVB", "EP")
#pb.savefig('nlps.pdf')
#
#do_plots(7,3, "TVB (EP params)", "EP (TVB params)")
#pb.savefig('cross_compare.pdf')
#
#do_plots(1,7, "TVB (TVB params)", "TVB (EP params)")
#pb.savefig('TVB_param_compare.pdf')
#
#do_plots(0,4, "TVB", "EP")
#pb.title('hold out error')
#pb.savefig('errors.pdf')
def whiskers(i1, i2, lab1="", lab2=""):
width = 0.35
l1 = pb.boxplot([d[:, i1] for d in data] , positions=np.arange(len(data))-1.03*width/2., widths=width)
l2 = pb.boxplot([d[:, i2] for d in data] , positions=np.arange(len(data))+1.03*width/2., widths=width)
pb.xticks(np.arange(len(data)),[fn.split('raw')[0].replace('_',' ') for fn in fnames], rotation=45)
pb.xlim(-1.2*width, len(data)-1+1.2*width)
for key, lines in l1.iteritems():
pb.setp(lines, lw=1)
if key == "boxes":
pb.setp(lines, color='b', lw=1.4)
if key == 'whiskers':
pb.setp(lines, color='b')
if key == 'fliers':
pb.setp(lines, color='b')
if key == 'medians':
pb.setp(lines, color='k', lw=1.4)
for key, lines in l2.iteritems():
pb.setp(lines, lw=1.2)
if key == "boxes":
pb.setp(lines, color='g', lw=1.4)
if key == 'whiskers':
pb.setp(lines, color='g')
if key == 'fliers':
pb.setp(lines, color='g')
if key == 'medians':
pb.setp(lines, color='k', lw=1.4)
#pb.setp(l2['boxes'], color='g')
#pb.setp(l1['medians'], color='b')
#pb.setp(l2['medians'], color='g')
#pb.setp(l1['whiskers'], color='b')
#pb.setp(l2['whiskers'], color='g')
#os.makedirs('classification_plots')
import matplotlib as mpl; mpl.rcParams['text.usetex'] = False
pb.close('all')
pb.figure('holdout', figsize=(8,3))
pb.ylabel(u'Fraction error')
whiskers(0,4, "TVB", "EP")
pb.tight_layout()
pb.savefig('/home/maxz/Documents/publications/TVB/aistats2014/classification_plots/holdout.pgf')
pb.close('all')
pb.figure('crossparameters', figsize=(8,3))
pb.ylabel(u'Fraction error')
whiskers(6,2, "TVB", "EP")
pb.tight_layout()
pb.savefig('/home/maxz/Documents/publications/TVB/aistats2014/classification_plots/crossparams.pgf')
pb.figure('negprob', figsize=(8,3))
whiskers(1,5, "TVB", "EP")
pb.ylabel(u'$-\log{p}(\mathbf{y}^{\star})$')
pb.tight_layout()
pb.savefig('/home/maxz/Documents/publications/TVB/aistats2014/classification_plots/negprob.pgf')
mpl.rcParams['text.usetex'] = True
| gpl-3.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/numpy/lib/twodim_base.py | 83 | 26903 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| apache-2.0 |
mrahim/adni_rs_fmri_analysis | base_connectivity.py | 1 | 10358 | # -*- coding: utf-8 -*-
"""
Created on Wed May 27 15:24:36 2015
@author: [email protected]
"""
###############################################################################
# Connectivity
###############################################################################
import os
import numpy as np
from scipy import stats, linalg
from sklearn.covariance import GraphLassoCV, LedoitWolf, OAS, \
ShrunkCovariance
from sklearn.datasets.base import Bunch
from sklearn.base import BaseEstimator, TransformerMixin
from nilearn.input_data import NiftiLabelsMasker, NiftiMapsMasker
import nibabel as nib
from joblib import Parallel, delayed
from nilearn.datasets import fetch_msdl_atlas
from fetch_data import set_cache_base_dir
from embedding import CovEmbedding, vec_to_sym
from nilearn.image import index_img
CACHE_DIR = set_cache_base_dir()
def atlas_rois_to_coords(atlas_name, rois):
"""Returns coords of atlas ROIs
"""
affine = nib.load(atlas_name).get_affine()
data = nib.load(atlas_name).get_data()
centroids = []
if len(data.shape) == 4:
for i in range(data.shape[-1]):
voxels = np.where(data[..., i] > 0)
centroid = np.mean(voxels, axis=1)
dvoxels = data[..., i]
dvoxels = dvoxels[voxels]
voxels = np.asarray(voxels).T
centroid = np.average(voxels, axis=0, weights=dvoxels)
centroid = np.append(centroid, 1)
centroid = np.dot(affine, centroid)[:-1]
centroids.append(centroid)
else:
vals = np.unique(data)
for i in range(len(vals)):
centroid = np.mean(np.where(data == i), axis=1)
centroid = np.append(centroid, 1)
centroid = np.dot(affine, centroid)[:-1]
centroids.append(centroid)
centroids = np.asarray(centroids)[rois]
return centroids
def fetch_dmn_atlas(atlas_name, atlas):
""" Returns a bunch containing the DMN rois
and their coordinates
"""
if atlas_name == 'msdl':
rois = np.arange(3, 7)
rois_names = ['L-DMN', 'M-DMN', 'F-DMN', 'R-DMN']
elif atlas_name == 'mayo':
rois = np.concatenate((range(39, 43), range(47, 51),
range(52, 56), range(62, 68)))
rois_names = ['adDMN_L', 'adDMN_R', 'avDMN_L', 'avDMN_R', 'dDMN_L_Lat',
'dDMN_L_Med', 'dDMN_R_Lat', 'dDMN_R_Med', 'pDMN_L_Lat',
'pDMN_L_Med', 'pDMN_R_Lat', 'pDMN_R_Med', 'tDMN_L',
'tDMN_R', 'vDMN_L_Lat', 'vDMN_L_Med', 'vDMN_R_Lat',
'vDMN_R_Med']
elif atlas_name == 'canica':
rois = np.concatenate((range(20, 23), [36]))
rois_names = ['DMN']*4
n_rois = len(rois)
centroids = atlas_rois_to_coords(atlas, rois)
return Bunch(n_rois=n_rois, rois=rois, rois_names=rois_names,
rois_centroids=centroids)
def nii_shape(img):
""" Returns the img shape
"""
if isinstance(img, nib.Nifti1Image):
return img.shape
else:
return nib.load(img).shape
def fetch_atlas(atlas_name, rois=False):
"""Retruns selected atlas path
"""
if atlas_name == 'msdl':
atlas = fetch_msdl_atlas()['maps']
elif atlas_name == 'harvard_oxford':
atlas = os.path.join(CACHE_DIR, 'atlas',
'HarvardOxford-cortl-maxprob-thr0-2mm.nii.gz')
elif atlas_name == 'juelich':
atlas = os.path.join(CACHE_DIR, 'atlas',
'Juelich-maxprob-thr0-2mm.nii.gz')
elif atlas_name == 'mayo':
atlas = os.path.join(CACHE_DIR, 'atlas', 'atlas_68_rois.nii.gz')
elif atlas_name == 'canica':
atlas = os.path.join(CACHE_DIR, 'atlas', 'atlas_canica_61_rois.nii.gz')
elif atlas_name == 'canica141':
atlas = os.path.join(CACHE_DIR, 'atlas', 'atlas_canica_141_rois.nii.gz')
elif atlas_name == 'tvmsdl':
atlas = os.path.join(CACHE_DIR, 'atlas', 'atlas_tv_msdl.nii.gz')
dmn = None
if (atlas_name in ['msdl', 'mayo', 'canica']) and rois:
dmn = fetch_dmn_atlas(atlas_name, atlas)
atlas_img = index_img(atlas, dmn['rois'])
atlas = os.path.join(CACHE_DIR, 'atlas', 'atlas_dmn.nii.gz')
atlas_img.to_filename(atlas)
return atlas, dmn
def partial_corr(C):
"""
Returns the sample linear partial correlation coefficients
between pairs of variables in C, controlling
for the remaining variables in C.
Parameters
----------
C : array-like, shape (n, p)
Array with the different variables.
Each column of C is taken as a variable
Returns
-------
P : array-like, shape (p, p)
P[i, j] contains the partial correlation
of C[:, i] and C[:, j] controlling
for the remaining variables in C.
"""
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i+1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]
res_j = C[:, j] - C[:, idx].dot(beta_i)
res_i = C[:, i] - C[:, idx].dot(beta_j)
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr
def do_mask_img(masker, func, confound=None):
""" Masking functional acquisitions
"""
c = None
if not confound is None:
c = np.loadtxt(confound)
return masker.transform(func, c)
def compute_connectivity_subject(conn, masker, func, confound=None):
""" Returns connectivity of one fMRI for a given atlas
"""
ts = do_mask_img(masker, func, confound)
if conn == 'gl':
fc = GraphLassoCV(max_iter=1000)
elif conn == 'lw':
fc = LedoitWolf()
elif conn == 'oas':
fc = OAS()
elif conn == 'scov':
fc = ShrunkCovariance()
fc = Bunch(covariance_=0, precision_=0)
if conn == 'corr' or conn == 'pcorr':
fc = Bunch(covariance_=0, precision_=0)
fc.covariance_ = np.corrcoef(ts)
fc.precision_ = partial_corr(ts)
else:
fc.fit(ts)
ind = np.tril_indices(ts.shape[1], k=-1)
return fc.covariance_[ind], fc.precision_[ind]
class Connectivity(BaseEstimator, TransformerMixin):
""" Connectivity Estimator
computes the functional connectivity of a list of 4D niimgs,
according to ROIs defined on an atlas.
First, the timeseries on ROIs are extracted.
Then, the connectivity is computed for each pair of ROIs.
The result is a ravel of half symmetric matrix.
Parameters
----------
atlas : atlas filepath
metric : metric name (gl, lw, oas, scov, corr, pcorr)
mask : mask filepath
detrend : masker param
low_pass: masker param
high_pass : masker param
t_r : masker param
smoothing : masker param
resampling_target : masker param
memory : masker param
memory_level : masker param
n_jobs : masker param
Attributes
----------
fc_ : functional connectivity (covariance and precision)
"""
def __init__(self, atlas_name, metric, mask, rois=False, detrend=True,
low_pass=.1, high_pass=.01, t_r=3.,
resampling_target='data', smoothing_fwhm=6.,
memory='', memory_level=2, n_jobs=1):
self.fc_ = None
self.atlas, self.rois = fetch_atlas(atlas_name, rois)
self.metric = metric
self.mask = mask
self.n_jobs = n_jobs
if len(nii_shape(self.atlas)) == 4:
self.masker = NiftiMapsMasker(maps_img=self.atlas,
mask_img=self.mask,
detrend=detrend,
low_pass=low_pass,
high_pass=high_pass,
t_r=t_r,
resampling_target=resampling_target,
smoothing_fwhm=smoothing_fwhm,
memory=memory,
memory_level=memory_level,
verbose=5)
else:
self.masker = NiftiLabelsMasker(labels_img=self.atlas,
mask_img=self.mask,
detrend=detrend,
low_pass=low_pass,
high_pass=high_pass,
t_r=t_r,
resampling_target=resampling_target,
smoothing_fwhm=smoothing_fwhm,
memory=memory,
memory_level=memory_level,
verbose=5)
def fit(self, imgs, confounds=None):
""" compute connectivities
"""
self.masker.fit()
if self.metric == 'correlation' or \
self.metric == 'partial correlation' or \
self.metric == 'tangent':
if confounds is None:
ts = Parallel(n_jobs=self.n_jobs, verbose=5)(delayed(
do_mask_img)(self.masker, func) for func in imgs)
else:
ts = Parallel(n_jobs=self.n_jobs, verbose=5)(delayed(
do_mask_img)(self.masker, func, confound)
for func, confound in zip(imgs, confounds))
cov_embedding = CovEmbedding(kind=self.metric)
p_ = np.asarray(vec_to_sym(cov_embedding.fit_transform(ts)))
ind = np.tril_indices(p_.shape[1], k=-1)
self.fc_ = np.asarray([p_[i, ...][ind] for i in range(p_.shape[0])])
else:
p_ = Parallel(n_jobs=self.n_jobs, verbose=5)(delayed(
compute_connectivity_subject)(self.metric,
self.masker, func, confound)
for func, confound in zip(imgs, confounds))
self.fc_ = np.asarray(p_)[:, 0, :]
return self.fc_
| gpl-2.0 |
DinoCow/airflow | tests/providers/amazon/aws/transfers/test_hive_to_dynamodb.py | 7 | 4590 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import json
import unittest
from unittest import mock
import pandas as pd
import airflow.providers.amazon.aws.transfers.hive_to_dynamodb
from airflow.models.dag import DAG
from airflow.providers.amazon.aws.hooks.dynamodb import AwsDynamoDBHook
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
try:
from moto import mock_dynamodb2
except ImportError:
mock_dynamodb2 = None
class TestHiveToDynamoDBOperator(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.sql = 'SELECT 1'
self.hook = AwsDynamoDBHook(aws_conn_id='aws_default', region_name='us-east-1')
@staticmethod
def process_data(data, *args, **kwargs):
return json.loads(data.to_json(orient='records'))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default')
self.assertIsNotNone(hook.get_conn())
@mock.patch(
'airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid')], columns=['id', 'name']),
)
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_records_with_schema(self, mock_get_pandas_df):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{'AttributeName': 'id', 'KeyType': 'HASH'},
],
AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}],
ProvisionedThroughput={'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10},
)
operator = airflow.providers.amazon.aws.transfers.hive_to_dynamodb.HiveToDynamoDBOperator(
sql=self.sql,
table_name="test_airflow",
task_id='hive_to_dynamodb_check',
table_keys=['id'],
dag=self.dag,
)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
@mock.patch(
'airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid'), ('1', 'gupta')], columns=['id', 'name']),
)
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_pre_process_records_with_schema(self, mock_get_pandas_df):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{'AttributeName': 'id', 'KeyType': 'HASH'},
],
AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}],
ProvisionedThroughput={'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10},
)
operator = airflow.providers.amazon.aws.transfers.hive_to_dynamodb.HiveToDynamoDBOperator(
sql=self.sql,
table_name='test_airflow',
task_id='hive_to_dynamodb_check',
table_keys=['id'],
pre_process=self.process_data,
dag=self.dag,
)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
| apache-2.0 |
witgo/spark | python/pyspark/sql/group.py | 23 | 10681 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.pandas.group_ops import PandasGroupedOpsMixin
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
__all__ = ["GroupedData"]
def dfapi(f):
def _api(self):
name = f.__name__
jdf = getattr(self._jgd, name)()
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
def df_varargs_api(f):
def _api(self, *cols):
name = f.__name__
jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols))
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
class GroupedData(PandasGroupedOpsMixin):
"""
A set of methods for aggregations on a :class:`DataFrame`,
created by :func:`DataFrame.groupBy`.
.. versionadded:: 1.3
"""
def __init__(self, jgd, df):
self._jgd = jgd
self._df = df
self.sql_ctx = df.sql_ctx
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. versionadded:: 1.3.0
Parameters
----------
exprs : dict
a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
Notes
-----
Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
Examples
--------
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name='Alice', count(1)=1), Row(name='Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name='Alice', min(age)=2), Row(name='Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name='Alice', min_udf(age)=2), Row(name='Bob', min_udf(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx)
@dfapi
def count(self):
"""Counts the number of records for each group.
.. versionadded:: 1.3.0
Examples
--------
>>> sorted(df.groupBy(df.age).count().collect())
[Row(age=2, count=1), Row(age=5, count=1)]
"""
@df_varargs_api
def mean(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().mean('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().mean('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
def avg(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().avg('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().avg('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
def max(self, *cols):
"""Computes the max value for each numeric columns for each group.
.. versionadded:: 1.3.0
Examples
--------
>>> df.groupBy().max('age').collect()
[Row(max(age)=5)]
>>> df3.groupBy().max('age', 'height').collect()
[Row(max(age)=5, max(height)=85)]
"""
@df_varargs_api
def min(self, *cols):
"""Computes the min value for each numeric column for each group.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().min('age').collect()
[Row(min(age)=2)]
>>> df3.groupBy().min('age', 'height').collect()
[Row(min(age)=2, min(height)=80)]
"""
@df_varargs_api
def sum(self, *cols):
"""Compute the sum for each numeric columns for each group.
.. versionadded:: 1.3.0
Parameters
----------
cols : str
column names. Non-numeric columns are ignored.
Examples
--------
>>> df.groupBy().sum('age').collect()
[Row(sum(age)=7)]
>>> df3.groupBy().sum('age', 'height').collect()
[Row(sum(age)=7, sum(height)=165)]
"""
def pivot(self, pivot_col, values=None):
"""
Pivots a column of the current :class:`DataFrame` and perform the specified aggregation.
There are two versions of pivot function: one that requires the caller to specify the list
of distinct values to pivot on, and one that does not. The latter is more concise but less
efficient, because Spark needs to first compute the list of distinct values internally.
.. versionadded:: 1.6.0
Parameters
----------
pivot_col : str
Name of the column to pivot.
values :
List of values that will be translated to columns in the output DataFrame.
Examples
--------
# Compute the sum of earnings for each year by course with each course as a separate column
>>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
[Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)]
# Or without specifying column values (less efficient)
>>> df4.groupBy("year").pivot("course").sum("earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
>>> df5.groupBy("sales.year").pivot("sales.course").sum("sales.earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
"""
if values is None:
jgd = self._jgd.pivot(pivot_col)
else:
jgd = self._jgd.pivot(pivot_col, values)
return GroupedData(jgd, self._df)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.group
globs = pyspark.sql.group.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.group tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
Row(name='Bob', age=5, height=85)]).toDF()
globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000),
Row(course="Java", year=2012, earnings=20000),
Row(course="dotNET", year=2012, earnings=5000),
Row(course="dotNET", year=2013, earnings=48000),
Row(course="Java", year=2013, earnings=30000)]).toDF()
globs['df5'] = sc.parallelize([
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=10000)),
Row(training="junior", sales=Row(course="Java", year=2012, earnings=20000)),
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=5000)),
Row(training="junior", sales=Row(course="dotNET", year=2013, earnings=48000)),
Row(training="expert", sales=Row(course="Java", year=2013, earnings=30000))]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.group, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/extension/base/ops.py | 2 | 5999 | import operator
import pytest
import pandas as pd
from pandas.core import ops
from .base import BaseExtensionTests
class BaseOpsUtil(BaseExtensionTests):
def get_op_from_name(self, op_name):
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
def check_opname(self, s, op_name, other, exc=Exception):
op = self.get_op_from_name(op_name)
self._check_op(s, op, other, op_name, exc)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
expected = s.combine(other, op)
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=Exception):
# divmod has multiple return values, so check separately
if exc is None:
result_div, result_mod = op(s, other)
if op is divmod:
expected_div, expected_mod = s // other, s % other
else:
expected_div, expected_mod = other // s, other % s
self.assert_series_equal(result_div, expected_div)
self.assert_series_equal(result_mod, expected_mod)
else:
with pytest.raises(exc):
divmod(s, other)
class BaseArithmeticOpsTests(BaseOpsUtil):
"""Various Series and DataFrame arithmetic ops methods.
Subclasses supporting various ops should set the class variables
to indicate that they support ops of that kind
* series_scalar_exc = TypeError
* frame_scalar_exc = TypeError
* series_array_exc = TypeError
* divmod_exc = TypeError
"""
series_scalar_exc = TypeError
frame_scalar_exc = TypeError
series_array_exc = TypeError
divmod_exc = TypeError
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# series & scalar
op_name = all_arithmetic_operators
s = pd.Series(data)
self.check_opname(s, op_name, s.iloc[0], exc=self.series_scalar_exc)
@pytest.mark.xfail(run=False, reason="_reduce needs implementation")
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op_name = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self.check_opname(df, op_name, data[0], exc=self.frame_scalar_exc)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op_name = all_arithmetic_operators
s = pd.Series(data)
self.check_opname(
s, op_name, pd.Series([s.iloc[0]] * len(s)), exc=self.series_array_exc
)
def test_divmod(self, data):
s = pd.Series(data)
self._check_divmod_op(s, divmod, 1, exc=self.divmod_exc)
self._check_divmod_op(1, ops.rdivmod, s, exc=self.divmod_exc)
def test_divmod_series_array(self, data, data_for_twos):
s = pd.Series(data)
self._check_divmod_op(s, divmod, data)
other = data_for_twos
self._check_divmod_op(other, ops.rdivmod, s)
other = pd.Series(other)
self._check_divmod_op(other, ops.rdivmod, s)
def test_add_series_with_extension_array(self, data):
s = pd.Series(data)
result = s + data
expected = pd.Series(data + data)
self.assert_series_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op_name = all_arithmetic_operators
with pytest.raises(AttributeError):
getattr(data, op_name)
def test_direct_arith_with_series_returns_not_implemented(self, data):
# EAs should return NotImplemented for ops with Series.
# Pandas takes care of unboxing the series and calling the EA's op.
other = pd.Series(data)
if hasattr(data, "__add__"):
result = data.__add__(other)
assert result is NotImplemented
else:
raise pytest.skip(
"{} does not implement add".format(data.__class__.__name__)
)
class BaseComparisonOpsTests(BaseOpsUtil):
"""Various Series and DataFrame comparison ops methods."""
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
if op_name == "__eq__":
assert getattr(data, op_name)(other) is NotImplemented
assert not op(s, other).all()
elif op_name == "__ne__":
assert getattr(data, op_name)(other) is NotImplemented
assert op(s, other).all()
else:
# array
assert getattr(data, op_name)(other) is NotImplemented
# series
s = pd.Series(data)
with pytest.raises(TypeError):
op(s, other)
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
self._compare_other(s, data, op_name, 0)
def test_compare_array(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
other = pd.Series([data[0]] * len(data))
self._compare_other(s, data, op_name, other)
def test_direct_arith_with_series_returns_not_implemented(self, data):
# EAs should return NotImplemented for ops with Series.
# Pandas takes care of unboxing the series and calling the EA's op.
other = pd.Series(data)
if hasattr(data, "__eq__"):
result = data.__eq__(other)
assert result is NotImplemented
else:
raise pytest.skip(
"{} does not implement __eq__".format(data.__class__.__name__)
)
| apache-2.0 |
robin-lai/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
liangz0707/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
cl4rke/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
CVML/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
xyguo/scikit-learn | sklearn/feature_extraction/text.py | 15 | 50250 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
eranchetz/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/dates.py | 54 | 33991 | #!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing
on the shoulders of python :mod:`datetime`, the add-on modules
:mod:`pytz` and :mod:`dateutils`. :class:`datetime` objects are
converted to floating point numbers which represent the number of days
since 0001-01-01 UTC. The helper functions :func:`date2num`,
:func:`num2date` and :func:`drange` are used to facilitate easy
conversion to and from :mod:`datetime` and numeric ranges.
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pytz.sourceforge.net>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <http://labix.org/python-dateutil>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, eg MO, TU
* :class:`MonthLocator`: locate months, eg 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutils.rrule` (`dateutil
<https://moin.conectiva.com.br/DateUtil>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
Date formatters
---------------
Here all all the date formatters:
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
import re, time, math, datetime
import pytz
# compatability for 2008c and older versions
try:
import pytz.zoneinfo
except ImportError:
pytz.zoneinfo = pytz.tzinfo
pytz.zoneinfo.UTC = pytz.UTC
import matplotlib
import numpy as np
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
from pytz import timezone
from dateutil.rrule import rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, \
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY
from dateutil.relativedelta import relativedelta
import dateutil.parser
__all__ = ( 'date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'DateLocator', 'RRuleLocator',
'YearLocator', 'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'rrule', 'MO', 'TU', 'WE', 'TH', 'FR',
'SA', 'SU', 'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
UTC = pytz.timezone('UTC')
def _get_rc_timezone():
s = matplotlib.rcParams['timezone']
return pytz.timezone(s)
HOURS_PER_DAY = 24.
MINUTES_PER_DAY = 60.*HOURS_PER_DAY
SECONDS_PER_DAY = 60.*MINUTES_PER_DAY
MUSECONDS_PER_DAY = 1e6*SECONDS_PER_DAY
SEC_PER_MIN = 60
SEC_PER_HOUR = 3600
SEC_PER_DAY = SEC_PER_HOUR * 24
SEC_PER_WEEK = SEC_PER_DAY * 7
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if hasattr(dt, 'hour'):
base += (dt.hour/HOURS_PER_DAY + dt.minute/MINUTES_PER_DAY +
dt.second/SECONDS_PER_DAY + dt.microsecond/MUSECONDS_PER_DAY
)
return base
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
"""
if tz is None: tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24*remainder, 1)
minute, remainder = divmod(60*remainder, 1)
second, remainder = divmod(60*remainder, 1)
microsecond = int(1e6*remainder)
if microsecond<10: microsecond=0 # compensate for rounding errors
dt = datetime.datetime(
dt.year, dt.month, dt.day, int(hour), int(minute), int(second),
microsecond, tzinfo=UTC).astimezone(tz)
if microsecond>999990: # compensate for rounding errors
dt += datetime.timedelta(microseconds=1e6-microsecond)
return dt
class strpdate2num:
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
def datestr2num(d):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`. *d* can be a single string or a
sequence of strings.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d)
return date2num(dt)
else:
return date2num([dateutil.parser.parse(s) for s in d])
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC.
"""
if not cbook.iterable(d): return _to_ordinalf(d)
else: return np.asarray([_to_ordinalf(val) for val in d])
def julian2num(j):
'Convert a Julian date (or sequence) to a matplotlib date (or sequence).'
if cbook.iterable(j): j = np.asarray(j)
return j + 1721425.5
def num2julian(n):
'Convert a matplotlib date (or sequence) to a Julian date (or sequence).'
if cbook.iterable(n): n = np.asarray(n)
return n - 1721425.5
def num2date(x, tz=None):
"""
*x* is a float value which gives number of days (fraction part
represents hours, minutes, seconds) since 0001-01-01 00:00:00 UTC.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None: tz = _get_rc_timezone()
if not cbook.iterable(x): return _from_ordinalf(x, tz)
else: return [_from_ordinalf(val, tz) for val in x]
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
step = (delta.days + delta.seconds/SECONDS_PER_DAY +
delta.microseconds/MUSECONDS_PER_DAY)
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
return np.arange(f1, f2, step)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is an :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None: tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _findall(self, text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i=j+1
return sites
# Dalke: I hope I did this math right. Every 28 years the
# calendar repeats, except through century leap years excepting
# the 400 year leap years. But only if you're using the Gregorian
# calendar.
def strftime(self, dt, fmt):
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year > 1900:
return cbook.unicode_safe(dt.strftime(fmt))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6*(delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year)//28)*28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = self._findall(s1, str(year))
s2 = time.strftime(fmt, (year+28,) + timetuple[1:])
sites2 = self._findall(s2, str(year+28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%4d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site+4:]
return cbook.unicode_safe(s)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None: tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind>=len(self.t) or ind<=0: return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None):
self._locator = locator
self._formatter = DateFormatter("%b %d %Y %H:%M:%S %Z", tz)
self._tz = tz
def __call__(self, x, pos=0):
scale = float( self._locator._get_unit() )
if ( scale == 365.0 ):
self._formatter = DateFormatter("%Y", self._tz)
elif ( scale == 30.0 ):
self._formatter = DateFormatter("%b %Y", self._tz)
elif ( (scale == 1.0) or (scale == 7.0) ):
self._formatter = DateFormatter("%b %d %Y", self._tz)
elif ( scale == (1.0/24.0) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
elif ( scale == (1.0/(24*60)) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
elif ( scale == (1.0/(24*3600)) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
else:
self._formatter = DateFormatter("%b %d %Y %H:%M:%S %Z", self._tz)
return self._formatter(x, pos)
class rrulewrapper:
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
hms0d = {'byhour':0, 'byminute':0,'bysecond':0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None: tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
self.tz = tz
def datalim_to_dt(self):
dmin, dmax = self.axis.get_data_interval()
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
vmin, vmax = self.axis.get_view_interval()
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def nonsingular(self, vmin, vmax):
unit = self._get_unit()
vmin -= 2*unit
vmax += 2*unit
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try: dmin, dmax = self.viewlim_to_dt()
except ValueError: return []
if dmin>dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
self.rule.set(dtstart=dmin-delta, until=dmax+delta)
dates = self.rule.between(dmin, dmax, True)
return date2num(dates)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
if ( freq == YEARLY ):
return 365
elif ( freq == MONTHLY ):
return 30
elif ( freq == WEEKLY ):
return 7
elif ( freq == DAILY ):
return 1
elif ( freq == HOURLY ):
return (1.0/24.0)
elif ( freq == MINUTELY ):
return (1.0/(24*60))
elif ( freq == SECONDLY ):
return (1.0/(24*3600))
else:
# error
return -1 #or should this just return '1'?
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin>dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
self.rule.set(dtstart=dmin-delta, until=dmax+delta)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin: vmin=dmin
vmax = self.rule.after(dmax, True)
if not vmax: vmax=dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None):
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if ( self._freq == YEARLY ):
return 365.0
elif ( self._freq == MONTHLY ):
return 30.0
elif ( self._freq == WEEKLY ):
return 7.0
elif ( self._freq == DAILY ):
return 1.0
elif ( self._freq == HOURLY ):
return 1.0/24
elif ( self._freq == MINUTELY ):
return 1.0/(24*60)
elif ( self._freq == SECONDLY ):
return 1.0/(24*3600)
else:
# error
return -1
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
numYears = (delta.years * 1.0)
numMonths = (numYears * 12.0) + delta.months
numDays = (numMonths * 31.0) + delta.days
numHours = (numDays * 24.0) + delta.hours
numMinutes = (numHours * 60.0) + delta.minutes
numSeconds = (numMinutes * 60.0) + delta.seconds
numticks = 5
# self._freq = YEARLY
interval = 1
bymonth = 1
bymonthday = 1
byhour = 0
byminute = 0
bysecond = 0
if ( numYears >= numticks ):
self._freq = YEARLY
elif ( numMonths >= numticks ):
self._freq = MONTHLY
bymonth = range(1, 13)
if ( (0 <= numMonths) and (numMonths <= 14) ):
interval = 1 # show every month
elif ( (15 <= numMonths) and (numMonths <= 29) ):
interval = 3 # show every 3 months
elif ( (30 <= numMonths) and (numMonths <= 44) ):
interval = 4 # show every 4 months
else: # 45 <= numMonths <= 59
interval = 6 # show every 6 months
elif ( numDays >= numticks ):
self._freq = DAILY
bymonth = None
bymonthday = range(1, 32)
if ( (0 <= numDays) and (numDays <= 9) ):
interval = 1 # show every day
elif ( (10 <= numDays) and (numDays <= 19) ):
interval = 2 # show every 2 days
elif ( (20 <= numDays) and (numDays <= 49) ):
interval = 3 # show every 3 days
elif ( (50 <= numDays) and (numDays <= 99) ):
interval = 7 # show every 1 week
else: # 100 <= numDays <= ~150
interval = 14 # show every 2 weeks
elif ( numHours >= numticks ):
self._freq = HOURLY
bymonth = None
bymonthday = None
byhour = range(0, 24) # show every hour
if ( (0 <= numHours) and (numHours <= 14) ):
interval = 1 # show every hour
elif ( (15 <= numHours) and (numHours <= 30) ):
interval = 2 # show every 2 hours
elif ( (30 <= numHours) and (numHours <= 45) ):
interval = 3 # show every 3 hours
elif ( (45 <= numHours) and (numHours <= 68) ):
interval = 4 # show every 4 hours
elif ( (68 <= numHours) and (numHours <= 90) ):
interval = 6 # show every 6 hours
else: # 90 <= numHours <= 120
interval = 12 # show every 12 hours
elif ( numMinutes >= numticks ):
self._freq = MINUTELY
bymonth = None
bymonthday = None
byhour = None
byminute = range(0, 60)
if ( numMinutes > (10.0 * numticks) ):
interval = 10
# end if
elif ( numSeconds >= numticks ):
self._freq = SECONDLY
bymonth = None
bymonthday = None
byhour = None
byminute = None
bysecond = range(0, 60)
if ( numSeconds > (10.0 * numticks) ):
interval = 10
# end if
else:
# do what?
# microseconds as floats, but floats from what reference point?
pass
rrule = rrulewrapper( self._freq, interval=interval, \
dtstart=dmin, until=dmax, \
bymonth=bymonth, bymonthday=bymonthday, \
byhour=byhour, byminute = byminute, \
bysecond=bysecond )
locator = RRuleLocator(rrule, self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = { 'month' : month,
'day' : day,
'hour' : 0,
'minute' : 0,
'second' : 0,
'tzinfo' : tz
}
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 365
def __call__(self):
dmin, dmax = self.viewlim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
ticks = [dmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year>=ymax: return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, eg 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None: bymonth=range(1,13)
o = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 30
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutils.rrule`.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
o = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 7
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None: bymonthday=range(1,32)
o = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None: byhour=range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
return how many days a unit of the locator is; use for
intelligent autoscaling
"""
return 1/24.
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None: byminute=range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1./(24*60)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None: bysecond=range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1./(24*60*60)
def _close_to_dt(d1, d2, epsilon=5):
'Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.'
delta = d2-d1
mus = abs(delta.days*MUSECONDS_PER_DAY + delta.seconds*1e6 +
delta.microseconds)
assert(mus<epsilon)
def _close_to_num(o1, o2, epsilon=5):
'Assert that float ordinals *o1* and *o2* are within *epsilon* microseconds.'
delta = abs((o2-o1)*MUSECONDS_PER_DAY)
assert(delta<epsilon)
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
spd = 24.*3600.
return 719163 + np.asarray(e)/spd
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
spd = 24.*3600.
return (np.asarray(d)-719163)*spd
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar: return ret[0]
else: return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span==0: span = 1/24.
minutes = span*24*60
hours = span*24
days = span
weeks = span/7.
months = span/31. # approx
years = span/365.
if years>numticks:
locator = YearLocator(int(years/numticks), tz=tz) # define
fmt = '%Y'
elif months>numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif weeks>numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days>numticks:
locator = DayLocator(interval=int(math.ceil(days/numticks)), tz=tz)
fmt = '%b %d'
elif hours>numticks:
locator = HourLocator(interval=int(math.ceil(hours/numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif minutes>numticks:
locator = MinuteLocator(interval=int(math.ceil(minutes/numticks)), tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
'Return seconds as days.'
return float(s)/SEC_PER_DAY
def minutes(m):
'Return minutes as days.'
return float(m)/MINUTES_PER_DAY
def hours(h):
'Return hours as days.'
return h/24.
def weeks(w):
'Return weeks as days.'
return w*7.
class DateConverter(units.ConversionInterface):
def axisinfo(unit):
'return the unit AxisInfo'
if unit=='date':
majloc = AutoDateLocator()
majfmt = AutoDateFormatter(majloc)
return units.AxisInfo(
majloc = majloc,
majfmt = majfmt,
label='',
)
else: return None
axisinfo = staticmethod(axisinfo)
def convert(value, unit):
if units.ConversionInterface.is_numlike(value): return value
return date2num(value)
convert = staticmethod(convert)
def default_units(x):
'Return the default unit for *x* or None'
return 'date'
default_units = staticmethod(default_units)
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
if __name__=='__main__':
#tz = None
tz = pytz.timezone('US/Pacific')
#tz = UTC
dt = datetime.datetime(1011, 10, 9, 13, 44, 22, 101010, tzinfo=tz)
x = date2num(dt)
_close_to_dt(dt, num2date(x, tz))
#tz = _get_rc_timezone()
d1 = datetime.datetime( 2000, 3, 1, tzinfo=tz)
d2 = datetime.datetime( 2000, 3, 5, tzinfo=tz)
#d1 = datetime.datetime( 2002, 1, 5, tzinfo=tz)
#d2 = datetime.datetime( 2003, 12, 1, tzinfo=tz)
delta = datetime.timedelta(hours=6)
dates = drange(d1, d2, delta)
# MGDTODO: Broken on transforms branch
#print 'orig', d1
#print 'd2n and back', num2date(date2num(d1), tz)
from _transforms import Value, Interval
v1 = Value(date2num(d1))
v2 = Value(date2num(d2))
dlim = Interval(v1,v2)
vlim = Interval(v1,v2)
#locator = HourLocator(byhour=(3,15), tz=tz)
#locator = MinuteLocator(byminute=(15,30,45), tz=tz)
#locator = YearLocator(base=5, month=7, day=4, tz=tz)
#locator = MonthLocator(bymonthday=15)
locator = DayLocator(tz=tz)
locator.set_data_interval(dlim)
locator.set_view_interval(vlim)
dmin, dmax = locator.autoscale()
vlim.set_bounds(dmin, dmax)
ticks = locator()
fmt = '%Y-%m-%d %H:%M:%S %Z'
formatter = DateFormatter(fmt, tz)
#for t in ticks: print formatter(t)
for t in dates: print formatter(t)
| agpl-3.0 |
rkmaddox/mne-python | tutorials/machine-learning/50_decoding.py | 3 | 17267 | r"""
===============
Decoding (MVPA)
===============
.. include:: ../../links.inc
Design philosophy
=================
Decoding (a.k.a. MVPA) in MNE largely follows the machine
learning API of the scikit-learn package.
Each estimator implements ``fit``, ``transform``, ``fit_transform``, and
(optionally) ``inverse_transform`` methods. For more details on this design,
visit scikit-learn_. For additional theoretical insights into the decoding
framework in MNE :footcite:`KingEtAl2018`.
For ease of comprehension, we will denote instantiations of the class using
the same name as the class but in small caps instead of camel cases.
Let's start by loading data for a simple two-class problem:
"""
# sphinx_gallery_thumbnail_number = 6
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import mne
from mne.datasets import sample
from mne.decoding import (SlidingEstimator, GeneralizingEstimator, Scaler,
cross_val_multiscore, LinearModel, get_coef,
Vectorizer, CSP)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
tmin, tmax = -0.200, 0.500
event_id = {'Auditory/Left': 1, 'Visual/Left': 3} # just use two
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# The subsequent decoding analyses only capture evoked responses, so we can
# low-pass the MEG data. Usually a value more like 40 Hz would be used,
# but here low-pass at 20 so we can more heavily decimate, and allow
# the examlpe to run faster. The 2 Hz high-pass helps improve CSP.
raw.filter(2, 20)
events = mne.find_events(raw, 'STI 014')
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('grad', 'eog'), baseline=(None, 0.), preload=True,
reject=dict(grad=4000e-13, eog=150e-6), decim=10)
epochs.pick_types(meg=True, exclude='bads') # remove stim and EOG
del raw
X = epochs.get_data() # MEG signals: n_epochs, n_meg_channels, n_times
y = epochs.events[:, 2] # target: auditory left vs visual left
###############################################################################
# Transformation classes
# ======================
#
# Scaler
# ^^^^^^
# The :class:`mne.decoding.Scaler` will standardize the data based on channel
# scales. In the simplest modes ``scalings=None`` or ``scalings=dict(...)``,
# each data channel type (e.g., mag, grad, eeg) is treated separately and
# scaled by a constant. This is the approach used by e.g.,
# :func:`mne.compute_covariance` to standardize channel scales.
#
# If ``scalings='mean'`` or ``scalings='median'``, each channel is scaled using
# empirical measures. Each channel is scaled independently by the mean and
# standand deviation, or median and interquartile range, respectively, across
# all epochs and time points during :class:`~mne.decoding.Scaler.fit`
# (during training). The :meth:`~mne.decoding.Scaler.transform` method is
# called to transform data (training or test set) by scaling all time points
# and epochs on a channel-by-channel basis. To perform both the ``fit`` and
# ``transform`` operations in a single call, the
# :meth:`~mne.decoding.Scaler.fit_transform` method may be used. To invert the
# transform, :meth:`~mne.decoding.Scaler.inverse_transform` can be used. For
# ``scalings='median'``, scikit-learn_ version 0.17+ is required.
#
# .. note:: Using this class is different from directly applying
# :class:`sklearn.preprocessing.StandardScaler` or
# :class:`sklearn.preprocessing.RobustScaler` offered by
# scikit-learn_. These scale each *classification feature*, e.g.
# each time point for each channel, with mean and standard
# deviation computed across epochs, whereas
# :class:`mne.decoding.Scaler` scales each *channel* using mean and
# standard deviation computed across all of its time points
# and epochs.
#
# Vectorizer
# ^^^^^^^^^^
# Scikit-learn API provides functionality to chain transformers and estimators
# by using :class:`sklearn.pipeline.Pipeline`. We can construct decoding
# pipelines and perform cross-validation and grid-search. However scikit-learn
# transformers and estimators generally expect 2D data
# (n_samples * n_features), whereas MNE transformers typically output data
# with a higher dimensionality
# (e.g. n_samples * n_channels * n_frequencies * n_times). A Vectorizer
# therefore needs to be applied between the MNE and the scikit-learn steps
# like:
# Uses all MEG sensors and time points as separate classification
# features, so the resulting filters used are spatio-temporal
clf = make_pipeline(Scaler(epochs.info),
Vectorizer(),
LogisticRegression(solver='lbfgs'))
scores = cross_val_multiscore(clf, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
score = np.mean(scores, axis=0)
print('Spatio-temporal: %0.1f%%' % (100 * score,))
###############################################################################
# PSDEstimator
# ^^^^^^^^^^^^
# The :class:`mne.decoding.PSDEstimator`
# computes the power spectral density (PSD) using the multitaper
# method. It takes a 3D array as input, converts it into 2D and computes the
# PSD.
#
# FilterEstimator
# ^^^^^^^^^^^^^^^
# The :class:`mne.decoding.FilterEstimator` filters the 3D epochs data.
#
# Spatial filters
# ===============
#
# Just like temporal filters, spatial filters provide weights to modify the
# data along the sensor dimension. They are popular in the BCI community
# because of their simplicity and ability to distinguish spatially-separated
# neural activity.
#
# Common spatial pattern
# ^^^^^^^^^^^^^^^^^^^^^^
#
# :class:`mne.decoding.CSP` is a technique to analyze multichannel data based
# on recordings from two classes :footcite:`Koles1991` (see also
# https://en.wikipedia.org/wiki/Common_spatial_pattern).
#
# Let :math:`X \in R^{C\times T}` be a segment of data with
# :math:`C` channels and :math:`T` time points. The data at a single time point
# is denoted by :math:`x(t)` such that :math:`X=[x(t), x(t+1), ..., x(t+T-1)]`.
# Common spatial pattern (CSP) finds a decomposition that projects the signal
# in the original sensor space to CSP space using the following transformation:
#
# .. math:: x_{CSP}(t) = W^{T}x(t)
# :label: csp
#
# where each column of :math:`W \in R^{C\times C}` is a spatial filter and each
# row of :math:`x_{CSP}` is a CSP component. The matrix :math:`W` is also
# called the de-mixing matrix in other contexts. Let
# :math:`\Sigma^{+} \in R^{C\times C}` and :math:`\Sigma^{-} \in R^{C\times C}`
# be the estimates of the covariance matrices of the two conditions.
# CSP analysis is given by the simultaneous diagonalization of the two
# covariance matrices
#
# .. math:: W^{T}\Sigma^{+}W = \lambda^{+}
# :label: diagonalize_p
# .. math:: W^{T}\Sigma^{-}W = \lambda^{-}
# :label: diagonalize_n
#
# where :math:`\lambda^{C}` is a diagonal matrix whose entries are the
# eigenvalues of the following generalized eigenvalue problem
#
# .. math:: \Sigma^{+}w = \lambda \Sigma^{-}w
# :label: eigen_problem
#
# Large entries in the diagonal matrix corresponds to a spatial filter which
# gives high variance in one class but low variance in the other. Thus, the
# filter facilitates discrimination between the two classes.
#
# .. topic:: Examples
#
# * :ref:`ex-decoding-csp-eeg`
# * :ref:`ex-decoding-csp-eeg-timefreq`
#
# .. note::
#
# The winning entry of the Grasp-and-lift EEG competition in Kaggle used
# the :class:`~mne.decoding.CSP` implementation in MNE and was featured as
# a `script of the week <sotw_>`_.
#
# .. _sotw: http://blog.kaggle.com/2015/08/12/july-2015-scripts-of-the-week/
#
# We can use CSP with these data with:
csp = CSP(n_components=3, norm_trace=False)
clf_csp = make_pipeline(csp, LinearModel(LogisticRegression(solver='lbfgs')))
scores = cross_val_multiscore(clf_csp, X, y, cv=5, n_jobs=1)
print('CSP: %0.1f%%' % (100 * scores.mean(),))
###############################################################################
# Source power comodulation (SPoC)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Source Power Comodulation (:class:`mne.decoding.SPoC`)
# :footcite:`DahneEtAl2014` identifies the composition of
# orthogonal spatial filters that maximally correlate with a continuous target.
#
# SPoC can be seen as an extension of the CSP where the target is driven by a
# continuous variable rather than a discrete variable. Typical applications
# include extraction of motor patterns using EMG power or audio patterns using
# sound envelope.
#
# .. topic:: Examples
#
# * :ref:`ex-spoc-cmc`
#
# xDAWN
# ^^^^^
# :class:`mne.preprocessing.Xdawn` is a spatial filtering method designed to
# improve the signal to signal + noise ratio (SSNR) of the ERP responses
# :footcite:`RivetEtAl2009`. Xdawn was originally
# designed for P300 evoked potential by enhancing the target response with
# respect to the non-target response. The implementation in MNE-Python is a
# generalization to any type of ERP.
#
# .. topic:: Examples
#
# * :ref:`ex-xdawn-denoising`
# * :ref:`ex-xdawn-decoding`
#
# Effect-matched spatial filtering
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# The result of :class:`mne.decoding.EMS` is a spatial filter at each time
# point and a corresponding time course :footcite:`SchurgerEtAl2013`.
# Intuitively, the result gives the similarity between the filter at
# each time point and the data vector (sensors) at that time point.
#
# .. topic:: Examples
#
# * :ref:`ex-ems-filtering`
#
# Patterns vs. filters
# ^^^^^^^^^^^^^^^^^^^^
#
# When interpreting the components of the CSP (or spatial filters in general),
# it is often more intuitive to think about how :math:`x(t)` is composed of
# the different CSP components :math:`x_{CSP}(t)`. In other words, we can
# rewrite Equation :eq:`csp` as follows:
#
# .. math:: x(t) = (W^{-1})^{T}x_{CSP}(t)
# :label: patterns
#
# The columns of the matrix :math:`(W^{-1})^T` are called spatial patterns.
# This is also called the mixing matrix. The example :ref:`ex-linear-patterns`
# discusses the difference between patterns and filters.
#
# These can be plotted with:
# Fit CSP on full data and plot
csp.fit(X, y)
csp.plot_patterns(epochs.info)
csp.plot_filters(epochs.info, scalings=1e-9)
###############################################################################
# Decoding over time
# ==================
#
# This strategy consists in fitting a multivariate predictive model on each
# time instant and evaluating its performance at the same instant on new
# epochs. The :class:`mne.decoding.SlidingEstimator` will take as input a
# pair of features :math:`X` and targets :math:`y`, where :math:`X` has
# more than 2 dimensions. For decoding over time the data :math:`X`
# is the epochs data of shape n_epochs x n_channels x n_times. As the
# last dimension of :math:`X` is the time, an estimator will be fit
# on every time instant.
#
# This approach is analogous to SlidingEstimator-based approaches in fMRI,
# where here we are interested in when one can discriminate experimental
# conditions and therefore figure out when the effect of interest happens.
#
# When working with linear models as estimators, this approach boils
# down to estimating a discriminative spatial filter for each time instant.
#
# Temporal decoding
# ^^^^^^^^^^^^^^^^^
#
# We'll use a Logistic Regression for a binary classification as machine
# learning model.
# We will train the classifier on all left visual vs auditory trials on MEG
clf = make_pipeline(StandardScaler(), LogisticRegression(solver='lbfgs'))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)
scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot
fig, ax = plt.subplots()
ax.plot(epochs.times, scores, label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC') # Area Under the Curve
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Sensor space decoding')
###############################################################################
# You can retrieve the spatial filters and spatial patterns if you explicitly
# use a LinearModel
clf = make_pipeline(StandardScaler(),
LinearModel(LogisticRegression(solver='lbfgs')))
time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc', verbose=True)
time_decod.fit(X, y)
coef = get_coef(time_decod, 'patterns_', inverse_transform=True)
evoked_time_gen = mne.EvokedArray(coef, epochs.info, tmin=epochs.times[0])
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
evoked_time_gen.plot_joint(times=np.arange(0., .500, .100), title='patterns',
**joint_kwargs)
###############################################################################
# Temporal generalization
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# Temporal generalization is an extension of the decoding over time approach.
# It consists in evaluating whether the model estimated at a particular
# time instant accurately predicts any other time instant. It is analogous to
# transferring a trained model to a distinct learning problem, where the
# problems correspond to decoding the patterns of brain activity recorded at
# distinct time instants.
#
# The object to for Temporal generalization is
# :class:`mne.decoding.GeneralizingEstimator`. It expects as input :math:`X`
# and :math:`y` (similarly to :class:`~mne.decoding.SlidingEstimator`) but
# generates predictions from each model for all time instants. The class
# :class:`~mne.decoding.GeneralizingEstimator` is generic and will treat the
# last dimension as the one to be used for generalization testing. For
# convenience, here, we refer to it as different tasks. If :math:`X`
# corresponds to epochs data then the last dimension is time.
#
# This runs the analysis used in :footcite:`KingEtAl2014` and further detailed
# in :footcite:`KingDehaene2014`:
# define the Temporal generalization object
time_gen = GeneralizingEstimator(clf, n_jobs=1, scoring='roc_auc',
verbose=True)
scores = cross_val_multiscore(time_gen, X, y, cv=5, n_jobs=1)
# Mean scores across cross-validation splits
scores = np.mean(scores, axis=0)
# Plot the diagonal (it's exactly the same as the time-by-time decoding above)
fig, ax = plt.subplots()
ax.plot(epochs.times, np.diag(scores), label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.set_xlabel('Times')
ax.set_ylabel('AUC')
ax.legend()
ax.axvline(.0, color='k', linestyle='-')
ax.set_title('Decoding MEG sensors over time')
###############################################################################
# Plot the full (generalization) matrix:
fig, ax = plt.subplots(1, 1)
im = ax.imshow(scores, interpolation='lanczos', origin='lower', cmap='RdBu_r',
extent=epochs.times[[0, -1, 0, -1]], vmin=0., vmax=1.)
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title('Temporal generalization')
ax.axvline(0, color='k')
ax.axhline(0, color='k')
plt.colorbar(im, ax=ax)
###############################################################################
# Projecting sensor-space patterns to source space
# ================================================
# If you use a linear classifier (or regressor) for your data, you can also
# project these to source space. For example, using our ``evoked_time_gen``
# from before:
cov = mne.compute_covariance(epochs, tmax=0.)
del epochs
fwd = mne.read_forward_solution(
data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif')
inv = mne.minimum_norm.make_inverse_operator(
evoked_time_gen.info, fwd, cov, loose=0.)
stc = mne.minimum_norm.apply_inverse(evoked_time_gen, inv, 1. / 9., 'dSPM')
del fwd, inv
###############################################################################
# And this can be visualized using :meth:`stc.plot <mne.SourceEstimate.plot>`:
brain = stc.plot(hemi='split', views=('lat', 'med'), initial_time=0.1,
subjects_dir=subjects_dir)
###############################################################################
# Source-space decoding
# =====================
#
# Source space decoding is also possible, but because the number of features
# can be much larger than in the sensor space, univariate feature selection
# using ANOVA f-test (or some other metric) can be done to reduce the feature
# dimension. Interpreting decoding results might be easier in source space as
# compared to sensor space.
#
# .. topic:: Examples
#
# * :ref:`tut_dec_st_source`
#
# Exercise
# ========
#
# - Explore other datasets from MNE (e.g. Face dataset from SPM to predict
# Face vs. Scrambled)
#
# References
# ==========
# .. footbibliography::
| bsd-3-clause |
CZCV/s-dilation-caffe | python/detect.py | 36 | 5734 | #!/usr/bin/env python
"""
detector.py is an out-of-the-box windowed detector
callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
Note that this model was trained for image classification and not detection,
and finetuning for detection can be expected to improve results.
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO:
- batch up image filenames as well: don't want to load all of them into memory
- come up with a batching scheme that preserved order / keeps a unique ID
"""
import numpy as np
import pandas as pd
import os
import argparse
import time
import caffe
CROP_MODES = ['list', 'selective_search']
COORD_COLS = ['ymin', 'xmin', 'ymax', 'xmax']
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output.
parser.add_argument(
"input_file",
help="Input txt/csv filename. If .txt, must be list of filenames.\
If .csv, must be comma-separated file with header\
'filename, xmin, ymin, xmax, ymax'"
)
parser.add_argument(
"output_file",
help="Output h5/csv filename. Format depends on extension."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/deploy.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"),
help="Trained model weights file."
)
parser.add_argument(
"--crop_mode",
default="selective_search",
choices=CROP_MODES,
help="How to generate windows for detection."
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of H x W x K dimensions (numpy array). " +
"Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
help="Multiply input features by this scale to finish preprocessing."
)
parser.add_argument(
"--raw_scale",
type=float,
default=255.0,
help="Multiply raw input by this scale before preprocessing."
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--context_pad",
type=int,
default='16',
help="Amount of surrounding context to collect in input window."
)
args = parser.parse_args()
mean, channel_swap = None, None
if args.mean_file:
mean = np.load(args.mean_file)
if mean.shape[1:] != (1, 1):
mean = mean.mean(1).mean(1)
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.gpu:
caffe.set_mode_gpu()
print("GPU mode")
else:
caffe.set_mode_cpu()
print("CPU mode")
# Make detector.
detector = caffe.Detector(args.model_def, args.pretrained_model, mean=mean,
input_scale=args.input_scale, raw_scale=args.raw_scale,
channel_swap=channel_swap,
context_pad=args.context_pad)
# Load input.
t = time.time()
print("Loading input...")
if args.input_file.lower().endswith('txt'):
with open(args.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif args.input_file.lower().endswith('csv'):
inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception("Unknown input file type: not in txt or csv.")
# Detect.
if args.crop_mode == 'list':
# Unpack sequence of (image filename, windows).
images_windows = [
(ix, inputs.iloc[np.where(inputs.index == ix)][COORD_COLS].values)
for ix in inputs.index.unique()
]
detections = detector.detect_windows(images_windows)
else:
detections = detector.detect_selective_search(inputs)
print("Processed {} windows in {:.3f} s.".format(len(detections),
time.time() - t))
# Collect into dataframe with labeled fields.
df = pd.DataFrame(detections)
df.set_index('filename', inplace=True)
df[COORD_COLS] = pd.DataFrame(
data=np.vstack(df['window']), index=df.index, columns=COORD_COLS)
del(df['window'])
# Save results.
t = time.time()
if args.output_file.lower().endswith('csv'):
# csv
# Enumerate the class probabilities.
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(
data=np.vstack(df['feat']), index=df.index, columns=class_cols)
df.to_csv(args.output_file, cols=COORD_COLS + class_cols)
else:
# h5
df.to_hdf(args.output_file, 'df', mode='w')
print("Saved to {} in {:.3f} s.".format(args.output_file,
time.time() - t))
if __name__ == "__main__":
import sys
main(sys.argv)
| agpl-3.0 |
blab/antibody-response-pulse | bcell-array/code/Antibody_Bcell_Tcell_Virus_model.py | 1 | 11586 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # Antibody Response Pulse
# https://github.com/blab/antibody-response-pulse
#
# ### B-cells evolution --- cross-reactive antibody response after influenza virus infection or vaccination
# ### Adaptive immune response for repeated infection
# <codecell>
'''
author: Alvason Zhenhua Li
date: 04/09/2015
'''
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
AlvaFontSize = 23;
AlvaFigSize = (14, 6);
numberingFig = 0;
# setting parameter
timeUnit = 'day'
if timeUnit == 'hour':
hour = float(1); day = float(24);
elif timeUnit == 'day':
day = float(1); hour = float(1)/24;
###############
numberingFig = numberingFig + 1;
plt.figure(numberingFig, figsize=(12,6))
plt.axis('off')
plt.title(r'$ Antibody-Bcell-Tcell-Virus \ response \ equations \ (long-term-infection) $'
, fontsize = AlvaFontSize)
plt.text(0, 5.0/6,r'$ \frac{\partial A_n(t)}{\partial t} = \
+\mu_a B_{n}(t) - (\phi_{ma} + \phi_{ga})A_{n}(t)V_{n}(t) - (\mu_{ma} + \mu_{ga})A_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 4.0/6,r'$ \frac{\partial B_n(t)}{\partial t} = \
+\mu_b + (\alpha_{bn} + \alpha_{bm}) V_{n}(t)C_{n}(t)B_{n}(t) - \mu_b B_n(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 3.0/6,r'$ \frac{\partial C_n(t)}{\partial t} = \
+\mu_c + \alpha_c V_n(t)C_{n}(t) - \mu_c C_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 2.0/6,r'$ \frac{\partial V_n(t)}{\partial t} = \
+\rho V_n(t)(1 - \frac{V_n(t)}{V_{max}}) - (\phi_{mv} + \phi_{gv}) A_{n}(t)V_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.show()
# define the partial differential equations
def dAdt_array(ABCVxt = [], *args):
# naming
A = ABCVxt[0]
B = ABCVxt[1]
C = ABCVxt[2]
V = ABCVxt[3]
x_totalPoint = ABCVxt.shape[1]
# there are n dSdt
dA_dt_array = np.zeros(x_totalPoint)
# each dVdt with the same equation form
for xn in range(x_totalPoint):
dA_dt_array[xn] = +inRateA*B[xn] - (outRateAmV + outRateAgV)*A[xn]*V[xn] - (outRateAm + outRateAg)*A[xn]
return(dA_dt_array)
def dBdt_array(ABCVxt = [], *args):
# naming
A = ABCVxt[0]
B = ABCVxt[1]
C = ABCVxt[2]
V = ABCVxt[3]
x_totalPoint = ABCVxt.shape[1]
# there are n dSdt
dB_dt_array = np.zeros(x_totalPoint)
# each dCdt with the same equation form
for xn in range(x_totalPoint):
dB_dt_array[xn] = +inOutRateB + (actRateB_naive + actRateB_memory)*V[xn]*C[xn]*B[xn] - inOutRateB*B[xn]
return(dB_dt_array)
def dCdt_array(ABCVxt = [], *args):
# naming
A = ABCVxt[0]
B = ABCVxt[1]
C = ABCVxt[2]
V = ABCVxt[3]
x_totalPoint = ABCVxt.shape[1]
# there are n dSdt
dC_dt_array = np.zeros(x_totalPoint)
# each dTdt with the same equation form
for xn in range(x_totalPoint):
dC_dt_array[xn] = +inOutRateC + actRateC*V[xn]*C[xn] - inOutRateC*C[xn]
return(dC_dt_array)
def dVdt_array(ABCVxt = [], *args):
# naming
A = ABCVxt[0]
B = ABCVxt[1]
C = ABCVxt[2]
V = ABCVxt[3]
x_totalPoint = ABCVxt.shape[1]
# there are n dSdt
dV_dt_array = np.zeros(x_totalPoint)
# each dTdt with the same equation form
for xn in range(x_totalPoint):
dV_dt_array[xn] = +inRateV*V[xn]*(1 - V[xn]/totalV) - (outRateVg + outRateVm)*A[xn]*V[xn]
return(dV_dt_array)
# define RK4 for an array (3, n) of coupled differential equations
def AlvaRungeKutta4ArrayXT(pde_array, startingOut_Value, minX_In, maxX_In, totalGPoint_X, minT_In, maxT_In, totalGPoint_T):
global actRateB_memory
# primary size of pde equations
outWay = pde_array.shape[0]
# initialize the whole memory-space for output and input
inWay = 1; # one layer is enough for storing "x" and "t" (only two list of variable)
# define the first part of array as output memory-space
gridOutIn_array = np.zeros([outWay + inWay, totalGPoint_X, totalGPoint_T])
# loading starting output values
for i in range(outWay):
gridOutIn_array[i, :, :] = startingOut_Value[i, :, :]
# griding input X value
gridingInput_X = np.linspace(minX_In, maxX_In, num = totalGPoint_X, retstep = True)
# loading input values to (define the final array as input memory-space)
gridOutIn_array[-inWay, :, 0] = gridingInput_X[0]
# step-size (increment of input X)
dx = gridingInput_X[1]
# griding input T value
gridingInput_T = np.linspace(minT_In, maxT_In, num = totalGPoint_T, retstep = True)
# loading input values to (define the final array as input memory-space)
gridOutIn_array[-inWay, 0, :] = gridingInput_T[0]
# step-size (increment of input T)
dt = gridingInput_T[1]
# starting
# initialize the memory-space for local try-step
dydt1_array = np.zeros([outWay, totalGPoint_X])
dydt2_array = np.zeros([outWay, totalGPoint_X])
dydt3_array = np.zeros([outWay, totalGPoint_X])
dydt4_array = np.zeros([outWay, totalGPoint_X])
# initialize the memory-space for keeping current value
currentOut_Value = np.zeros([outWay, totalGPoint_X])
for tn in range(totalGPoint_T - 1):
actRateB_memory = 0
tn_unit = totalGPoint_T/(maxT_In - minT_In)
if tn > int(14*day*tn_unit):
actRateB_memory = float(0.01)*24
# setting virus1 = 0 if virus1 < 1
if gridOutIn_array[3, 0, tn] < 1.0:
gridOutIn_array[3, 0, tn] = 0.0
## 2nd infection
if tn == int(20*day*tn_unit):
gridOutIn_array[3, 0, tn] = 1.0 # virus infection
### 3rd infection
if tn == int(2*20*day*tn_unit):
gridOutIn_array[3, 0, tn] = 1.0 # virus infection
### 4rd infection
if tn == int(50*day*tn_unit):
gridOutIn_array[3, 0, tn] = 1.0 # virus infection
# keep initial value at the moment of tn
currentOut_Value[:, :] = np.copy(gridOutIn_array[:-inWay, :, tn])
currentIn_T_Value = np.copy(gridOutIn_array[-inWay, 0, tn])
# first try-step
for i in range(outWay):
for xn in range(totalGPoint_X):
dydt1_array[i, xn] = pde_array[i](gridOutIn_array[:, :, tn])[xn] # computing ratio
gridOutIn_array[:-inWay, :, tn] = currentOut_Value[:, :] + dydt1_array[:, :]*dt/2 # update output
gridOutIn_array[-inWay, 0, tn] = currentIn_T_Value + dt/2 # update input
# second half try-step
for i in range(outWay):
for xn in range(totalGPoint_X):
dydt2_array[i, xn] = pde_array[i](gridOutIn_array[:, :, tn])[xn] # computing ratio
gridOutIn_array[:-inWay, :, tn] = currentOut_Value[:, :] + dydt2_array[:, :]*dt/2 # update output
gridOutIn_array[-inWay, 0, tn] = currentIn_T_Value + dt/2 # update input
# third half try-step
for i in range(outWay):
for xn in range(totalGPoint_X):
dydt3_array[i, xn] = pde_array[i](gridOutIn_array[:, :, tn])[xn] # computing ratio
gridOutIn_array[:-inWay, :, tn] = currentOut_Value[:, :] + dydt3_array[:, :]*dt # update output
gridOutIn_array[-inWay, 0, tn] = currentIn_T_Value + dt # update input
# fourth try-step
for i in range(outWay):
for xn in range(totalGPoint_X):
dydt4_array[i, xn] = pde_array[i](gridOutIn_array[:, :, tn])[xn] # computing ratio
# solid step (update the next output) by accumulate all the try-steps with proper adjustment
gridOutIn_array[:-inWay, :, tn + 1] = currentOut_Value[:, :] + dt*(dydt1_array[:, :]/6
+ dydt2_array[:, :]/3
+ dydt3_array[:, :]/3
+ dydt4_array[:, :]/6)
# restore to initial value
gridOutIn_array[:-inWay, :, tn] = np.copy(currentOut_Value[:, :])
gridOutIn_array[-inWay, 0, tn] = np.copy(currentIn_T_Value)
# end of loop
return (gridOutIn_array[:-inWay, :])
##############
inRateA = float(0.3)/hour # growth rate of antibody from B-cell (secretion)
outRateAm = float(0.014)/hour # out rate of Antibody IgM
outRateAg = float(0.048)/hour # out rate of Antibody IgG
outRateAmV = float(4.2*10**(-5))/hour # antibody IgM clearance rate by virus
outRateAgV = float(1.67*10**(-4))/hour # antibody IgG clearance rate by virus
inOutRateB = float(0.037)/hour # birth rate of B-cell
actRateB_naive = float(6.0*10**(-7))/hour # activation rate of naive B-cell
#actRateB_memory = 0*float(0.0012)/hour # activation rate of memory B-cell
inOutRateC = float(0.017)/hour # birth rate of CD4 T-cell
actRateC = float(7.0*10**(-6))/hour # activation rate of CD4 T-cell
totalV = float(5000) # total virion/micro-liter
inRateV = float(0.16)/hour # intrinsic growth rate/hour
outRateVm = float(1.67*10**(-4))/hour # virion clearance rate by IgM
outRateVg = float(6.68*10**(-4))/hour # virion clearance rate by IgG
# time boundary and griding condition
minT = float(0); maxT = float(3*20*day);
totalGPoint_T = int(1*10**4 + 1);
gridT = np.linspace(minT, maxT, totalGPoint_T);
spacingT = np.linspace(minT, maxT, num = totalGPoint_T, retstep = True)
gridT = spacingT[0]
dt = spacingT[1]
# space boundary and griding condition
minX = float(0); maxX = float(1);
totalGPoint_X = int(1 + 1);
gridX = np.linspace(minX, maxX, totalGPoint_X);
gridingX = np.linspace(minX, maxX, num = totalGPoint_X, retstep = True)
gridX = gridingX[0]
dx = gridingX[1]
gridA_array = np.zeros([totalGPoint_X, totalGPoint_T])
gridB_array = np.zeros([totalGPoint_X, totalGPoint_T])
gridC_array = np.zeros([totalGPoint_X, totalGPoint_T])
gridV_array = np.zeros([totalGPoint_X, totalGPoint_T])
# initial output condition
gridA_array[:, 0] = float(0)
gridB_array[:, 0] = float(0)
gridC_array[0, 0] = float(0)
gridV_array[0, 0] = float(totalV)/10**3
# Runge Kutta numerical solution
pde_array = np.array([dAdt_array, dBdt_array, dCdt_array, dVdt_array])
startingOut_Value = np.array([gridA_array, gridB_array, gridC_array, gridV_array])
gridOut_array = AlvaRungeKutta4ArrayXT(pde_array, startingOut_Value, minX, maxX, totalGPoint_X, minT, maxT, totalGPoint_T)
# plotting
gridA = gridOut_array[0]
gridB = gridOut_array[1]
gridC = gridOut_array[2]
gridV = gridOut_array[3]
numberingFig = numberingFig + 1;
for i in range(1):
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gridT, gridA[i], color = 'green', label = r'$ A_{%i}(t) $'%(i))
plt.plot(gridT, gridB[i], color = 'blue', label = r'$ B_{%i}(t) $'%(i))
plt.plot(gridT, gridC[i], color = 'gray', label = r'$ C_{%i}(t) $'%(i))
plt.plot(gridT, gridV[i], color = 'red', label = r'$ V_{%i}(t) $'%(i))
plt.grid(True)
plt.title(r'$ Antibody-Bcell-Tcell-Virus \ (immune \ response \ for \ primary-infection) $', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize);
plt.ylabel(r'$ Cells/ \mu L $', fontsize = AlvaFontSize);
plt.text(maxT, totalV*6.0/6, r'$ \Omega = %f $'%(totalV), fontsize = AlvaFontSize)
plt.text(maxT, totalV*5.0/6, r'$ \phi = %f $'%(inRateV), fontsize = AlvaFontSize)
plt.text(maxT, totalV*4.0/6, r'$ \xi = %f $'%(inRateA), fontsize = AlvaFontSize)
plt.text(maxT, totalV*3.0/6, r'$ \mu_b = %f $'%(inOutRateB), fontsize = AlvaFontSize)
plt.legend(loc = (1,0))
# plt.yscale('log')
plt.show()
# <codecell>
| gpl-2.0 |
cpcloud/dask | dask/dataframe/multi.py | 1 | 23332 | """
Algorithms that Involve Multiple DataFrames
===========================================
The pandas operations ``concat``, ``join``, and ``merge`` combine multiple
DataFrames. This module contains analogous algorithms in the parallel case.
There are two important cases:
1. We combine along a partitioned index
2. We combine along an unpartitioned index or other column
In the first case we know which partitions of each dataframe interact with
which others. This lets uss be significantly more clever and efficient.
In the second case each partition from one dataset interacts with all
partitions from the other. We handle this through a shuffle operation.
Partitioned Joins
-----------------
In the first case where we join along a partitioned index we proceed in the
following stages.
1. Align the partitions of all inputs to be the same. This involves a call
to ``dd.repartition`` which will split up and concat existing partitions as
necessary. After this step all inputs have partitions that align with
each other. This step is relatively cheap.
See the function ``align_partitions``.
2. Remove unnecessary partitions based on the type of join we perform (left,
right, inner, outer). We can do this at the partition level before any
computation happens. We'll do it again on each partition when we call the
in-memory function. See the function ``require``.
3. Embarrassingly parallel calls to ``pd.concat``, ``pd.join``, or
``pd.merge``. Now that the data is aligned and unnecessary blocks have
been removed we can rely on the fast in-memory Pandas join machinery to
execute joins per-partition. We know that all intersecting records exist
within the same partition
Hash Joins via Shuffle
----------------------
When we join along an unpartitioned index or along an arbitrary column any
partition from one input might interact with any partition in another. In
this case we perform a hash-join by shuffling data in each input by that
column. This results in new inputs with the same partition structure cleanly
separated along that column.
We proceed with hash joins in the following stages:
1. Shuffle each input on the specified column. See the function
``dask.dataframe.shuffle.shuffle``.
2. Perform embarrassingly parallel join across shuffled inputs.
"""
from __future__ import absolute_import, division, print_function
from functools import wraps, partial
from warnings import warn
from toolz import merge_sorted, unique, first
import toolz
import pandas as pd
from ..base import tokenize
from ..compatibility import apply
from .core import (_Frame, DataFrame, map_partitions, Index,
_maybe_from_pandas, new_dd_object, is_broadcastable)
from .io import from_pandas
from . import methods
from .shuffle import shuffle, rearrange_by_divisions
from .utils import strip_unknown_categories
def align_partitions(*dfs):
""" Mutually partition and align DataFrame blocks
This serves as precursor to multi-dataframe operations like join, concat,
or merge.
Parameters
----------
dfs: sequence of dd.DataFrame, dd.Series and dd.base.Scalar
Sequence of dataframes to be aligned on their index
Returns
-------
dfs: sequence of dd.DataFrame, dd.Series and dd.base.Scalar
These must have consistent divisions with each other
divisions: tuple
Full divisions sequence of the entire result
result: list
A list of lists of keys that show which data exist on which
divisions
"""
_is_broadcastable = partial(is_broadcastable, dfs)
dfs1 = [df for df in dfs
if isinstance(df, _Frame) and
not _is_broadcastable(df)]
if len(dfs) == 0:
raise ValueError("dfs contains no DataFrame and Series")
if not all(df.known_divisions for df in dfs1):
raise ValueError("Not all divisions are known, can't align "
"partitions. Please use `set_index` or "
"`set_partition` to set the index.")
divisions = list(unique(merge_sorted(*[df.divisions for df in dfs1])))
dfs2 = [df.repartition(divisions, force=True)
if isinstance(df, _Frame) else df for df in dfs]
result = list()
inds = [0 for df in dfs]
for d in divisions[:-1]:
L = list()
for i, df in enumerate(dfs2):
if isinstance(df, _Frame):
j = inds[i]
divs = df.divisions
if j < len(divs) - 1 and divs[j] == d:
L.append((df._name, inds[i]))
inds[i] += 1
else:
L.append(None)
else: # Scalar has no divisions
L.append(None)
result.append(L)
return dfs2, tuple(divisions), result
def _maybe_align_partitions(args):
"""Align DataFrame blocks if divisions are different.
Note that if all divisions are unknown, but have equal npartitions, then
they will be passed through unchanged. This is different than
`align_partitions`, which will fail if divisions aren't all known"""
_is_broadcastable = partial(is_broadcastable, args)
dfs = [df for df in args
if isinstance(df, _Frame) and
not _is_broadcastable(df)]
if not dfs:
return args
divisions = dfs[0].divisions
if not all(df.divisions == divisions for df in dfs):
dfs2 = iter(align_partitions(*dfs)[0])
return [a if not isinstance(a, _Frame) else next(dfs2) for a in args]
return args
def require(divisions, parts, required=None):
""" Clear out divisions where required components are not present
In left, right, or inner joins we exclude portions of the dataset if one
side or the other is not present. We can achieve this at the partition
level as well
>>> divisions = [1, 3, 5, 7, 9]
>>> parts = [(('a', 0), None),
... (('a', 1), ('b', 0)),
... (('a', 2), ('b', 1)),
... (None, ('b', 2))]
>>> divisions2, parts2 = require(divisions, parts, required=[0])
>>> divisions2
(1, 3, 5, 7)
>>> parts2 # doctest: +NORMALIZE_WHITESPACE
((('a', 0), None),
(('a', 1), ('b', 0)),
(('a', 2), ('b', 1)))
>>> divisions2, parts2 = require(divisions, parts, required=[1])
>>> divisions2
(3, 5, 7, 9)
>>> parts2 # doctest: +NORMALIZE_WHITESPACE
((('a', 1), ('b', 0)),
(('a', 2), ('b', 1)),
(None, ('b', 2)))
>>> divisions2, parts2 = require(divisions, parts, required=[0, 1])
>>> divisions2
(3, 5, 7)
>>> parts2 # doctest: +NORMALIZE_WHITESPACE
((('a', 1), ('b', 0)),
(('a', 2), ('b', 1)))
"""
if not required:
return divisions, parts
for i in required:
present = [j for j, p in enumerate(parts) if p[i] is not None]
divisions = tuple(divisions[min(present): max(present) + 2])
parts = tuple(parts[min(present): max(present) + 1])
return divisions, parts
###############################################################
# Join / Merge
###############################################################
required = {'left': [0], 'right': [1], 'inner': [0, 1], 'outer': []}
def merge_indexed_dataframes(lhs, rhs, how='left', lsuffix='', rsuffix='',
indicator=False):
""" Join two partitioned dataframes along their index """
(lhs, rhs), divisions, parts = align_partitions(lhs, rhs)
divisions, parts = require(divisions, parts, required[how])
left_empty = lhs._meta
right_empty = rhs._meta
name = 'join-indexed-' + tokenize(lhs, rhs, how, lsuffix, rsuffix,
indicator)
dsk = dict()
for i, (a, b) in enumerate(parts):
if a is None and how in ('right', 'outer'):
a = left_empty
if b is None and how in ('left', 'outer'):
b = right_empty
dsk[(name, i)] = (methods.merge, a, b, how, None, None, True, True,
indicator, (lsuffix, rsuffix), left_empty,
right_empty)
meta = pd.merge(lhs._meta_nonempty, rhs._meta_nonempty, how=how,
left_index=True, right_index=True,
suffixes=(lsuffix, rsuffix), indicator=indicator)
return new_dd_object(toolz.merge(lhs.dask, rhs.dask, dsk),
name, meta, divisions)
shuffle_func = shuffle # name sometimes conflicts with keyword argument
def hash_join(lhs, left_on, rhs, right_on, how='inner',
npartitions=None, suffixes=('_x', '_y'), shuffle=None,
indicator=False):
""" Join two DataFrames on particular columns with hash join
This shuffles both datasets on the joined column and then performs an
embarrassingly parallel join partition-by-partition
>>> hash_join(a, 'id', rhs, 'id', how='left', npartitions=10) # doctest: +SKIP
"""
if npartitions is None:
npartitions = max(lhs.npartitions, rhs.npartitions)
lhs2 = shuffle_func(lhs, left_on, npartitions=npartitions, shuffle=shuffle)
rhs2 = shuffle_func(rhs, right_on, npartitions=npartitions, shuffle=shuffle)
if isinstance(left_on, Index):
left_on = None
left_index = True
else:
left_index = False
if isinstance(right_on, Index):
right_on = None
right_index = True
else:
right_index = False
# dummy result
meta = pd.merge(lhs._meta_nonempty, rhs._meta_nonempty, how=how,
left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
suffixes=suffixes, indicator=indicator)
if isinstance(left_on, list):
left_on = (list, tuple(left_on))
if isinstance(right_on, list):
right_on = (list, tuple(right_on))
token = tokenize(lhs2, left_on, rhs2, right_on, left_index, right_index,
how, npartitions, suffixes, shuffle, indicator)
name = 'hash-join-' + token
dsk = dict(((name, i), (methods.merge, (lhs2._name, i), (rhs2._name, i),
how, left_on, right_on,
left_index, right_index, indicator,
suffixes, lhs._meta, rhs._meta))
for i in range(npartitions))
divisions = [None] * (npartitions + 1)
return new_dd_object(toolz.merge(lhs2.dask, rhs2.dask, dsk),
name, meta, divisions)
def single_partition_join(left, right, **kwargs):
# if the merge is perfomed on_index, divisions can be kept, otherwise the
# new index will not necessarily correspond the current divisions
meta = pd.merge(left._meta_nonempty, right._meta_nonempty, **kwargs)
name = 'merge-' + tokenize(left, right, **kwargs)
if left.npartitions == 1:
left_key = first(left._keys())
dsk = dict(((name, i), (apply, pd.merge, [left_key, right_key],
kwargs))
for i, right_key in enumerate(right._keys()))
if kwargs.get('right_index'):
divisions = right.divisions
else:
divisions = [None for _ in right.divisions]
elif right.npartitions == 1:
right_key = first(right._keys())
dsk = dict(((name, i), (apply, pd.merge, [left_key, right_key],
kwargs))
for i, left_key in enumerate(left._keys()))
if kwargs.get('left_index'):
divisions = left.divisions
else:
divisions = [None for _ in left.divisions]
return new_dd_object(toolz.merge(dsk, left.dask, right.dask), name,
meta, divisions)
@wraps(pd.merge)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, suffixes=('_x', '_y'),
indicator=False, npartitions=None, shuffle=None, max_branch=None):
for o in [on, left_on, right_on]:
if isinstance(o, _Frame):
raise NotImplementedError(
"Dask collections not currently allowed in merge columns")
if not on and not left_on and not right_on and not left_index and not right_index:
on = [c for c in left.columns if c in right.columns]
if not on:
left_index = right_index = True
if on and not left_on and not right_on:
left_on = right_on = on
on = None
if (isinstance(left, (pd.Series, pd.DataFrame)) and
isinstance(right, (pd.Series, pd.DataFrame))):
return pd.merge(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, suffixes=suffixes,
indicator=indicator)
# Transform pandas objects into dask.dataframe objects
if isinstance(left, (pd.Series, pd.DataFrame)):
if right_index and left_on: # change to join on index
left = left.set_index(left[left_on])
left_on = False
left_index = True
left = from_pandas(left, npartitions=1) # turn into DataFrame
if isinstance(right, (pd.Series, pd.DataFrame)):
if left_index and right_on: # change to join on index
right = right.set_index(right[right_on])
right_on = False
right_index = True
right = from_pandas(right, npartitions=1) # turn into DataFrame
# Both sides are now dd.DataFrame or dd.Series objects
# Both sides indexed
if (left_index and left.known_divisions and
right_index and right.known_divisions): # Do indexed join
return merge_indexed_dataframes(left, right, how=how,
lsuffix=suffixes[0],
rsuffix=suffixes[1],
indicator=indicator)
# Single partition on one side
elif (left.npartitions == 1 and how in ('inner', 'right') or
right.npartitions == 1 and how in ('inner', 'left')):
return single_partition_join(left, right, how=how, right_on=right_on,
left_on=left_on, left_index=left_index,
right_index=right_index,
suffixes=suffixes, indicator=indicator)
# One side is indexed, the other not
elif (left_index and left.known_divisions and not right_index or
right_index and right.known_divisions and not left_index):
left_empty = left._meta_nonempty
right_empty = right._meta_nonempty
meta = pd.merge(left_empty, right_empty, how=how, on=on,
left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
suffixes=suffixes, indicator=indicator)
if left_index and left.known_divisions:
right = rearrange_by_divisions(right, right_on, left.divisions,
max_branch, shuffle=shuffle)
left = left.clear_divisions()
elif right_index and right.known_divisions:
left = rearrange_by_divisions(left, left_on, right.divisions,
max_branch, shuffle=shuffle)
right = right.clear_divisions()
return map_partitions(pd.merge, left, right, meta=meta, how=how, on=on,
left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
suffixes=suffixes, indicator=indicator)
# Catch all hash join
else:
return hash_join(left, left.index if left_index else left_on,
right, right.index if right_index else right_on,
how, npartitions, suffixes, shuffle=shuffle,
indicator=indicator)
###############################################################
# Concat
###############################################################
def concat_and_check(dfs):
if len(set(map(len, dfs))) != 1:
raise ValueError("Concatenated DataFrames of different lengths")
return pd.concat(dfs, axis=1)
def concat_unindexed_dataframes(dfs):
name = 'concat-' + tokenize(*dfs)
dsk = {(name, i): (concat_and_check, [(df._name, i) for df in dfs])
for i in range(dfs[0].npartitions)}
meta = pd.concat([df._meta for df in dfs], axis=1)
return new_dd_object(toolz.merge(dsk, *[df.dask for df in dfs]),
name, meta, dfs[0].divisions)
def concat_indexed_dataframes(dfs, axis=0, join='outer'):
""" Concatenate indexed dataframes together along the index """
meta = methods.concat([df._meta for df in dfs], axis=axis, join=join)
empties = [strip_unknown_categories(df._meta) for df in dfs]
dfs2, divisions, parts = align_partitions(*dfs)
name = 'concat-indexed-' + tokenize(join, *dfs)
parts2 = [[df if df is not None else empty
for df, empty in zip(part, empties)]
for part in parts]
dsk = dict(((name, i), (methods.concat, part, axis, join))
for i, part in enumerate(parts2))
for df in dfs2:
dsk.update(df.dask)
return new_dd_object(dsk, name, meta, divisions)
def stack_partitions(dfs, divisions, join='outer'):
"""Concatenate partitions on axis=0 by doing a simple stack"""
meta = methods.concat([df._meta for df in dfs], join=join)
empty = strip_unknown_categories(meta)
name = 'concat-{0}'.format(tokenize(*dfs))
dsk = {}
i = 0
for df in dfs:
dsk.update(df.dask)
# An error will be raised if the schemas or categories don't match. In
# this case we need to pass along the meta object to transform each
# partition, so they're all equivalent.
try:
df._meta == meta
match = True
except (ValueError, TypeError):
match = False
for key in df._keys():
if match:
dsk[(name, i)] = key
else:
dsk[(name, i)] = (methods.concat, [empty, key], 0, join)
i += 1
return new_dd_object(dsk, name, meta, divisions)
def concat(dfs, axis=0, join='outer', interleave_partitions=False):
""" Concatenate DataFrames along rows.
- When axis=0 (default), concatenate DataFrames row-wise:
- If all divisions are known and ordered, concatenate DataFrames keeping
divisions. When divisions are not ordered, specifying
interleave_partition=True allows concatenate divisions each by each.
- If any of division is unknown, concatenate DataFrames resetting its
division to unknown (None)
- When axis=1, concatenate DataFrames column-wise:
- Allowed if all divisions are known.
- If any of division is unknown, it raises ValueError.
Parameters
----------
dfs : list
List of dask.DataFrames to be concatenated
axis : {0, 1, 'index', 'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis
interleave_partitions : bool, default False
Whether to concatenate DataFrames ignoring its order. If True, every
divisions are concatenated each by each.
Examples
--------
If all divisions are known and ordered, divisions are kept.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(6, 8, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 3, 6, 8, 10)>
Unable to concatenate if divisions are not ordered.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(2, 3, 6)>
>>> dd.concat([a, b]) # doctest: +SKIP
ValueError: All inputs have known divisions which cannot be concatenated
in order. Specify interleave_partitions=True to ignore order
Specify interleave_partitions=True to ignore the division order.
>>> dd.concat([a, b], interleave_partitions=True) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 2, 3, 5, 6)>
If any of division is unknown, the result division will be unknown
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(None, None)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(1, 4, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
"""
if not isinstance(dfs, list):
raise TypeError("dfs must be a list of DataFrames/Series objects")
if len(dfs) == 0:
raise ValueError('No objects to concatenate')
if len(dfs) == 1:
return dfs[0]
if join not in ('inner', 'outer'):
raise ValueError("'join' must be 'inner' or 'outer'")
axis = DataFrame._validate_axis(axis)
dasks = [df for df in dfs if isinstance(df, _Frame)]
dfs = _maybe_from_pandas(dfs)
if axis == 1:
if all(df.known_divisions for df in dasks):
return concat_indexed_dataframes(dfs, axis=axis, join=join)
elif (len(dasks) == len(dfs) and
all(not df.known_divisions for df in dfs) and
len({df.npartitions for df in dasks}) == 1):
warn("Concatenating dataframes with unknown divisions.\n"
"We're assuming that the indexes of each dataframes are \n"
"aligned. This assumption is not generally safe.")
return concat_unindexed_dataframes(dfs)
else:
raise ValueError('Unable to concatenate DataFrame with unknown '
'division specifying axis=1')
else:
if all(df.known_divisions for df in dasks):
# each DataFrame's division must be greater than previous one
if all(dfs[i].divisions[-1] < dfs[i + 1].divisions[0]
for i in range(len(dfs) - 1)):
divisions = []
for df in dfs[:-1]:
# remove last to concatenate with next
divisions += df.divisions[:-1]
divisions += dfs[-1].divisions
return stack_partitions(dfs, divisions, join=join)
elif interleave_partitions:
return concat_indexed_dataframes(dfs, join=join)
else:
raise ValueError('All inputs have known divisions which '
'cannot be concatenated in order. Specify '
'interleave_partitions=True to ignore order')
else:
divisions = [None] * (sum([df.npartitions for df in dfs]) + 1)
return stack_partitions(dfs, divisions, join=join)
| bsd-3-clause |
jefffohl/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_wx.py | 69 | 77038 | from __future__ import division
"""
backend_wx.py
A wxPython backend for matplotlib, based (very heavily) on
backend_template.py and backend_gtk.py
Author: Jeremy O'Donoghue ([email protected])
Derived from original copyright work by John Hunter
([email protected])
Copyright (C) Jeremy O'Donoghue & John Hunter, 2003-4
License: This work is licensed under a PSF compatible license. A copy
should be included with this source code.
"""
"""
KNOWN BUGS -
- Mousewheel (on Windows) only works after menu button has been pressed
at least once
- Mousewheel on Linux (wxGTK linked against GTK 1.2) does not work at all
- Vertical text renders horizontally if you use a non TrueType font
on Windows. This is a known wxPython issue. Work-around is to ensure
that you use a TrueType font.
- Pcolor demo puts chart slightly outside bounding box (approx 1-2 pixels
to the bottom left)
- Outputting to bitmap more than 300dpi results in some text being incorrectly
scaled. Seems to be a wxPython bug on Windows or font point sizes > 60, as
font size is correctly calculated.
- Performance poorer than for previous direct rendering version
- TIFF output not supported on wxGTK. This is a wxGTK issue
- Text is not anti-aliased on wxGTK. This is probably a platform
configuration issue.
- If a second call is made to show(), no figure is generated (#866965)
Not implemented:
- Printing
Fixed this release:
- Bug #866967: Interactive operation issues fixed [JDH]
- Bug #866969: Dynamic update does not function with backend_wx [JOD]
Examples which work on this release:
---------------------------------------------------------------
| Windows 2000 | Linux |
| wxPython 2.3.3 | wxPython 2.4.2.4 |
--------------------------------------------------------------|
- alignment_test.py | TBE | OK |
- arctest.py | TBE | (3) |
- axes_demo.py | OK | OK |
- axes_props.py | OK | OK |
- bar_stacked.py | TBE | OK |
- barchart_demo.py | OK | OK |
- color_demo.py | OK | OK |
- csd_demo.py | OK | OK |
- dynamic_demo.py | N/A | N/A |
- dynamic_demo_wx.py | TBE | OK |
- embedding_in_gtk.py | N/A | N/A |
- embedding_in_wx.py | OK | OK |
- errorbar_demo.py | OK | OK |
- figtext.py | OK | OK |
- histogram_demo.py | OK | OK |
- interactive.py | N/A (2) | N/A (2) |
- interactive2.py | N/A (2) | N/A (2) |
- legend_demo.py | OK | OK |
- legend_demo2.py | OK | OK |
- line_styles.py | OK | OK |
- log_demo.py | OK | OK |
- logo.py | OK | OK |
- mpl_with_glade.py | N/A (2) | N/A (2) |
- mri_demo.py | OK | OK |
- mri_demo_with_eeg.py | OK | OK |
- multiple_figs_demo.py | OK | OK |
- pcolor_demo.py | OK | OK |
- psd_demo.py | OK | OK |
- scatter_demo.py | OK | OK |
- scatter_demo2.py | OK | OK |
- simple_plot.py | OK | OK |
- stock_demo.py | OK | OK |
- subplot_demo.py | OK | OK |
- system_monitor.py | N/A (2) | N/A (2) |
- text_handles.py | OK | OK |
- text_themes.py | OK | OK |
- vline_demo.py | OK | OK |
---------------------------------------------------------------
(2) - Script uses GTK-specific features - cannot not run,
but wxPython equivalent should be written.
(3) - Clipping seems to be broken.
"""
cvs_id = '$Id: backend_wx.py 6484 2008-12-03 18:38:03Z jdh2358 $'
import sys, os, os.path, math, StringIO, weakref, warnings
import numpy as npy
# Debugging settings here...
# Debug level set here. If the debug level is less than 5, information
# messages (progressively more info for lower value) are printed. In addition,
# traceback is performed, and pdb activated, for all uncaught exceptions in
# this case
_DEBUG = 5
if _DEBUG < 5:
import traceback, pdb
_DEBUG_lvls = {1 : 'Low ', 2 : 'Med ', 3 : 'High', 4 : 'Error' }
try:
import wx
backend_version = wx.VERSION_STRING
except:
raise ImportError("Matplotlib backend_wx requires wxPython be installed")
#!!! this is the call that is causing the exception swallowing !!!
#wx.InitAllImageHandlers()
def DEBUG_MSG(string, lvl=3, o=None):
if lvl >= _DEBUG:
cls = o.__class__
# Jeremy, often times the commented line won't print but the
# one below does. I think WX is redefining stderr, damned
# beast
#print >>sys.stderr, "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls)
print "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls)
def debug_on_error(type, value, tb):
"""Code due to Thomas Heller - published in Python Cookbook (O'Reilley)"""
traceback.print_exc(type, value, tb)
print
pdb.pm() # jdh uncomment
class fake_stderr:
"""Wx does strange things with stderr, as it makes the assumption that there
is probably no console. This redirects stderr to the console, since we know
that there is one!"""
def write(self, msg):
print "Stderr: %s\n\r" % msg
#if _DEBUG < 5:
# sys.excepthook = debug_on_error
# WxLogger =wx.LogStderr()
# sys.stderr = fake_stderr
# Event binding code changed after version 2.5
if wx.VERSION_STRING >= '2.5':
def bind(actor,event,action,**kw):
actor.Bind(event,action,**kw)
else:
def bind(actor,event,action,id=None):
if id is not None:
event(actor, id, action)
else:
event(actor,action)
import matplotlib
from matplotlib import verbose
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureCanvasBase, FigureManagerBase, NavigationToolbar2, \
cursors
from matplotlib._pylab_helpers import Gcf
from matplotlib.artist import Artist
from matplotlib.cbook import exception_to_str, is_string_like, is_writable_file_like
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.text import _process_text_args, Text
from matplotlib.transforms import Affine2D
from matplotlib.widgets import SubplotTool
from matplotlib import rcParams
# the True dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
# Delay time for idle checks
IDLE_DELAY = 5
def error_msg_wx(msg, parent=None):
"""
Signal an error condition -- in a GUI, popup a error dialog
"""
dialog =wx.MessageDialog(parent = parent,
message = msg,
caption = 'Matplotlib backend_wx error',
style=wx.OK | wx.CENTRE)
dialog.ShowModal()
dialog.Destroy()
return None
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
class RendererWx(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles. It acts as the
'renderer' instance used by many classes in the hierarchy.
"""
#In wxPython, drawing is performed on a wxDC instance, which will
#generally be mapped to the client aread of the window displaying
#the plot. Under wxPython, the wxDC instance has a wx.Pen which
#describes the colour and weight of any lines drawn, and a wxBrush
#which describes the fill colour of any closed polygon.
fontweights = {
100 : wx.LIGHT,
200 : wx.LIGHT,
300 : wx.LIGHT,
400 : wx.NORMAL,
500 : wx.NORMAL,
600 : wx.NORMAL,
700 : wx.BOLD,
800 : wx.BOLD,
900 : wx.BOLD,
'ultralight' : wx.LIGHT,
'light' : wx.LIGHT,
'normal' : wx.NORMAL,
'medium' : wx.NORMAL,
'semibold' : wx.NORMAL,
'bold' : wx.BOLD,
'heavy' : wx.BOLD,
'ultrabold' : wx.BOLD,
'black' : wx.BOLD
}
fontangles = {
'italic' : wx.ITALIC,
'normal' : wx.NORMAL,
'oblique' : wx.SLANT }
# wxPython allows for portable font styles, choosing them appropriately
# for the target platform. Map some standard font names to the portable
# styles
# QUESTION: Is it be wise to agree standard fontnames across all backends?
fontnames = { 'Sans' : wx.SWISS,
'Roman' : wx.ROMAN,
'Script' : wx.SCRIPT,
'Decorative' : wx.DECORATIVE,
'Modern' : wx.MODERN,
'Courier' : wx.MODERN,
'courier' : wx.MODERN }
def __init__(self, bitmap, dpi):
"""
Initialise a wxWindows renderer instance.
"""
DEBUG_MSG("__init__()", 1, self)
if wx.VERSION_STRING < "2.8":
raise RuntimeError("matplotlib no longer supports wxPython < 2.8 for the Wx backend.\nYou may, however, use the WxAgg backend.")
self.width = bitmap.GetWidth()
self.height = bitmap.GetHeight()
self.bitmap = bitmap
self.fontd = {}
self.dpi = dpi
self.gc = None
def flipy(self):
return True
def offset_text_height(self):
return True
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
#return 1, 1
if ismath: s = self.strip_math(s)
if self.gc is None:
gc = self.new_gc()
else:
gc = self.gc
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
gfx_ctx.SetFont(font, wx.BLACK)
w, h, descent, leading = gfx_ctx.GetFullTextExtent(s)
return w, h, descent
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def handle_clip_rectangle(self, gc):
new_bounds = gc.get_clip_rectangle()
if new_bounds is not None:
new_bounds = new_bounds.bounds
gfx_ctx = gc.gfx_ctx
if gfx_ctx._lastcliprect != new_bounds:
gfx_ctx._lastcliprect = new_bounds
if new_bounds is None:
gfx_ctx.ResetClip()
else:
gfx_ctx.Clip(new_bounds[0], self.height - new_bounds[1] - new_bounds[3],
new_bounds[2], new_bounds[3])
#@staticmethod
def convert_path(gfx_ctx, tpath):
wxpath = gfx_ctx.CreatePath()
for points, code in tpath.iter_segments():
if code == Path.MOVETO:
wxpath.MoveToPoint(*points)
elif code == Path.LINETO:
wxpath.AddLineToPoint(*points)
elif code == Path.CURVE3:
wxpath.AddQuadCurveToPoint(*points)
elif code == Path.CURVE4:
wxpath.AddCurveToPoint(*points)
elif code == Path.CLOSEPOLY:
wxpath.CloseSubpath()
return wxpath
convert_path = staticmethod(convert_path)
def draw_path(self, gc, path, transform, rgbFace=None):
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
transform = transform + Affine2D().scale(1.0, -1.0).translate(0.0, self.height)
tpath = transform.transform_path(path)
wxpath = self.convert_path(gfx_ctx, tpath)
if rgbFace is not None:
gfx_ctx.SetBrush(wx.Brush(gc.get_wxcolour(rgbFace)))
gfx_ctx.DrawPath(wxpath)
else:
gfx_ctx.StrokePath(wxpath)
gc.unselect()
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
if bbox != None:
l,b,w,h = bbox.bounds
else:
l=0
b=0,
w=self.width
h=self.height
rows, cols, image_str = im.as_rgba_str()
image_array = npy.fromstring(image_str, npy.uint8)
image_array.shape = rows, cols, 4
bitmap = wx.BitmapFromBufferRGBA(cols,rows,image_array)
gc = self.get_gc()
gc.select()
gc.gfx_ctx.DrawBitmap(bitmap,int(l),int(b),int(w),int(h))
gc.unselect()
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
Render the matplotlib.text.Text instance
None)
"""
if ismath: s = self.strip_math(s)
DEBUG_MSG("draw_text()", 1, self)
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
color = gc.get_wxcolour(gc.get_rgb())
gfx_ctx.SetFont(font, color)
w, h, d = self.get_text_width_height_descent(s, prop, ismath)
x = int(x)
y = int(y-h)
if angle == 0.0:
gfx_ctx.DrawText(s, x, y)
else:
rads = angle / 180.0 * math.pi
xo = h * math.sin(rads)
yo = h * math.cos(rads)
gfx_ctx.DrawRotatedText(s, x - xo, y - yo, rads)
gc.unselect()
def new_gc(self):
"""
Return an instance of a GraphicsContextWx, and sets the current gc copy
"""
DEBUG_MSG('new_gc()', 2, self)
self.gc = GraphicsContextWx(self.bitmap, self)
self.gc.select()
self.gc.unselect()
return self.gc
def get_gc(self):
"""
Fetch the locally cached gc.
"""
# This is a dirty hack to allow anything with access to a renderer to
# access the current graphics context
assert self.gc != None, "gc must be defined"
return self.gc
def get_wx_font(self, s, prop):
"""
Return a wx font. Cache instances in a font dictionary for
efficiency
"""
DEBUG_MSG("get_wx_font()", 1, self)
key = hash(prop)
fontprop = prop
fontname = fontprop.get_name()
font = self.fontd.get(key)
if font is not None:
return font
# Allow use of platform independent and dependent font names
wxFontname = self.fontnames.get(fontname, wx.ROMAN)
wxFacename = '' # Empty => wxPython chooses based on wx_fontname
# Font colour is determined by the active wx.Pen
# TODO: It may be wise to cache font information
size = self.points_to_pixels(fontprop.get_size_in_points())
font =wx.Font(int(size+0.5), # Size
wxFontname, # 'Generic' name
self.fontangles[fontprop.get_style()], # Angle
self.fontweights[fontprop.get_weight()], # Weight
False, # Underline
wxFacename) # Platform font name
# cache the font and gc and return it
self.fontd[key] = font
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
return points*(PIXELS_PER_INCH/72.0*self.dpi/72.0)
class GraphicsContextWx(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc...
This class stores a reference to a wxMemoryDC, and a
wxGraphicsContext that draws to it. Creating a wxGraphicsContext
seems to be fairly heavy, so these objects are cached based on the
bitmap object that is passed in.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). wxPython uses an int interval, but
since wxPython colour management is rather simple, I have not chosen
to implement a separate colour manager class.
"""
_capd = { 'butt': wx.CAP_BUTT,
'projecting': wx.CAP_PROJECTING,
'round': wx.CAP_ROUND }
_joind = { 'bevel': wx.JOIN_BEVEL,
'miter': wx.JOIN_MITER,
'round': wx.JOIN_ROUND }
_dashd_wx = { 'solid': wx.SOLID,
'dashed': wx.SHORT_DASH,
'dashdot': wx.DOT_DASH,
'dotted': wx.DOT }
_cache = weakref.WeakKeyDictionary()
def __init__(self, bitmap, renderer):
GraphicsContextBase.__init__(self)
#assert self.Ok(), "wxMemoryDC not OK to use"
DEBUG_MSG("__init__()", 1, self)
dc, gfx_ctx = self._cache.get(bitmap, (None, None))
if dc is None:
dc = wx.MemoryDC()
dc.SelectObject(bitmap)
gfx_ctx = wx.GraphicsContext.Create(dc)
gfx_ctx._lastcliprect = None
self._cache[bitmap] = dc, gfx_ctx
self.bitmap = bitmap
self.dc = dc
self.gfx_ctx = gfx_ctx
self._pen = wx.Pen('BLACK', 1, wx.SOLID)
gfx_ctx.SetPen(self._pen)
self._style = wx.SOLID
self.renderer = renderer
def select(self):
"""
Select the current bitmap into this wxDC instance
"""
if sys.platform=='win32':
self.dc.SelectObject(self.bitmap)
self.IsSelected = True
def unselect(self):
"""
Select a Null bitmasp into this wxDC instance
"""
if sys.platform=='win32':
self.dc.SelectObject(wx.NullBitmap)
self.IsSelected = False
def set_foreground(self, fg, isRGB=None):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
# Implementation note: wxPython has a separate concept of pen and
# brush - the brush fills any outline trace left by the pen.
# Here we set both to the same colour - if a figure is not to be
# filled, the renderer will set the brush to be transparent
# Same goes for text foreground...
DEBUG_MSG("set_foreground()", 1, self)
self.select()
GraphicsContextBase.set_foreground(self, fg, isRGB)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_graylevel(self, frac):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
DEBUG_MSG("set_graylevel()", 1, self)
self.select()
GraphicsContextBase.set_graylevel(self, frac)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linewidth(self, w):
"""
Set the line width.
"""
DEBUG_MSG("set_linewidth()", 1, self)
self.select()
if w>0 and w<1: w = 1
GraphicsContextBase.set_linewidth(self, w)
lw = int(self.renderer.points_to_pixels(self._linewidth))
if lw==0: lw = 1
self._pen.SetWidth(lw)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
DEBUG_MSG("set_capstyle()", 1, self)
self.select()
GraphicsContextBase.set_capstyle(self, cs)
self._pen.SetCap(GraphicsContextWx._capd[self._capstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
DEBUG_MSG("set_joinstyle()", 1, self)
self.select()
GraphicsContextBase.set_joinstyle(self, js)
self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linestyle(self, ls):
"""
Set the line style to be one of
"""
DEBUG_MSG("set_linestyle()", 1, self)
self.select()
GraphicsContextBase.set_linestyle(self, ls)
try:
self._style = GraphicsContextWx._dashd_wx[ls]
except KeyError:
self._style = wx.LONG_DASH# Style not used elsewhere...
# On MS Windows platform, only line width of 1 allowed for dash lines
if wx.Platform == '__WXMSW__':
self.set_linewidth(1)
self._pen.SetStyle(self._style)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def get_wxcolour(self, color):
"""return a wx.Colour from RGB format"""
DEBUG_MSG("get_wx_color()", 1, self)
if len(color) == 3:
r, g, b = color
r *= 255
g *= 255
b *= 255
return wx.Colour(red=int(r), green=int(g), blue=int(b))
else:
r, g, b, a = color
r *= 255
g *= 255
b *= 255
a *= 255
return wx.Colour(red=int(r), green=int(g), blue=int(b), alpha=int(a))
class FigureCanvasWx(FigureCanvasBase, wx.Panel):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually) lives
inside a frame instantiated by a FigureManagerWx. The parent window probably
implements a wx.Sizer to control the displayed control size - but we give a
hint as to our preferred minimum size.
"""
keyvald = {
wx.WXK_CONTROL : 'control',
wx.WXK_SHIFT : 'shift',
wx.WXK_ALT : 'alt',
wx.WXK_LEFT : 'left',
wx.WXK_UP : 'up',
wx.WXK_RIGHT : 'right',
wx.WXK_DOWN : 'down',
wx.WXK_ESCAPE : 'escape',
wx.WXK_F1 : 'f1',
wx.WXK_F2 : 'f2',
wx.WXK_F3 : 'f3',
wx.WXK_F4 : 'f4',
wx.WXK_F5 : 'f5',
wx.WXK_F6 : 'f6',
wx.WXK_F7 : 'f7',
wx.WXK_F8 : 'f8',
wx.WXK_F9 : 'f9',
wx.WXK_F10 : 'f10',
wx.WXK_F11 : 'f11',
wx.WXK_F12 : 'f12',
wx.WXK_SCROLL : 'scroll_lock',
wx.WXK_PAUSE : 'break',
wx.WXK_BACK : 'backspace',
wx.WXK_RETURN : 'enter',
wx.WXK_INSERT : 'insert',
wx.WXK_DELETE : 'delete',
wx.WXK_HOME : 'home',
wx.WXK_END : 'end',
wx.WXK_PRIOR : 'pageup',
wx.WXK_NEXT : 'pagedown',
wx.WXK_PAGEUP : 'pageup',
wx.WXK_PAGEDOWN : 'pagedown',
wx.WXK_NUMPAD0 : '0',
wx.WXK_NUMPAD1 : '1',
wx.WXK_NUMPAD2 : '2',
wx.WXK_NUMPAD3 : '3',
wx.WXK_NUMPAD4 : '4',
wx.WXK_NUMPAD5 : '5',
wx.WXK_NUMPAD6 : '6',
wx.WXK_NUMPAD7 : '7',
wx.WXK_NUMPAD8 : '8',
wx.WXK_NUMPAD9 : '9',
wx.WXK_NUMPAD_ADD : '+',
wx.WXK_NUMPAD_SUBTRACT : '-',
wx.WXK_NUMPAD_MULTIPLY : '*',
wx.WXK_NUMPAD_DIVIDE : '/',
wx.WXK_NUMPAD_DECIMAL : 'dec',
wx.WXK_NUMPAD_ENTER : 'enter',
wx.WXK_NUMPAD_UP : 'up',
wx.WXK_NUMPAD_RIGHT : 'right',
wx.WXK_NUMPAD_DOWN : 'down',
wx.WXK_NUMPAD_LEFT : 'left',
wx.WXK_NUMPAD_PRIOR : 'pageup',
wx.WXK_NUMPAD_NEXT : 'pagedown',
wx.WXK_NUMPAD_PAGEUP : 'pageup',
wx.WXK_NUMPAD_PAGEDOWN : 'pagedown',
wx.WXK_NUMPAD_HOME : 'home',
wx.WXK_NUMPAD_END : 'end',
wx.WXK_NUMPAD_INSERT : 'insert',
wx.WXK_NUMPAD_DELETE : 'delete',
}
def __init__(self, parent, id, figure):
"""
Initialise a FigureWx instance.
- Initialise the FigureCanvasBase and wxPanel parents.
- Set event handlers for:
EVT_SIZE (Resize event)
EVT_PAINT (Paint event)
"""
FigureCanvasBase.__init__(self, figure)
# Set preferred window size hint - helps the sizer (if one is
# connected)
l,b,w,h = figure.bbox.bounds
w = int(math.ceil(w))
h = int(math.ceil(h))
wx.Panel.__init__(self, parent, id, size=wx.Size(w, h))
def do_nothing(*args, **kwargs):
warnings.warn('could not find a setinitialsize function for backend_wx; please report your wxpython version=%s to the matplotlib developers list'%backend_version)
pass
# try to find the set size func across wx versions
try:
getattr(self, 'SetInitialSize')
except AttributeError:
self.SetInitialSize = getattr(self, 'SetBestFittingSize', do_nothing)
if not hasattr(self,'IsShownOnScreen'):
self.IsShownOnScreen = getattr(self, 'IsVisible', lambda *args: True)
# Create the drawing bitmap
self.bitmap =wx.EmptyBitmap(w, h)
DEBUG_MSG("__init__() - bitmap w:%d h:%d" % (w,h), 2, self)
# TODO: Add support for 'point' inspection and plot navigation.
self._isDrawn = False
bind(self, wx.EVT_SIZE, self._onSize)
bind(self, wx.EVT_PAINT, self._onPaint)
bind(self, wx.EVT_ERASE_BACKGROUND, self._onEraseBackground)
bind(self, wx.EVT_KEY_DOWN, self._onKeyDown)
bind(self, wx.EVT_KEY_UP, self._onKeyUp)
bind(self, wx.EVT_RIGHT_DOWN, self._onRightButtonDown)
bind(self, wx.EVT_RIGHT_DCLICK, self._onRightButtonDown)
bind(self, wx.EVT_RIGHT_UP, self._onRightButtonUp)
bind(self, wx.EVT_MOUSEWHEEL, self._onMouseWheel)
bind(self, wx.EVT_LEFT_DOWN, self._onLeftButtonDown)
bind(self, wx.EVT_LEFT_DCLICK, self._onLeftButtonDown)
bind(self, wx.EVT_LEFT_UP, self._onLeftButtonUp)
bind(self, wx.EVT_MOTION, self._onMotion)
bind(self, wx.EVT_LEAVE_WINDOW, self._onLeave)
bind(self, wx.EVT_ENTER_WINDOW, self._onEnter)
bind(self, wx.EVT_IDLE, self._onIdle)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.macros = {} # dict from wx id to seq of macros
self.Printer_Init()
def Destroy(self, *args, **kwargs):
wx.Panel.Destroy(self, *args, **kwargs)
def Copy_to_Clipboard(self, event=None):
"copy bitmap of canvas to system clipboard"
bmp_obj = wx.BitmapDataObject()
bmp_obj.SetBitmap(self.bitmap)
wx.TheClipboard.Open()
wx.TheClipboard.SetData(bmp_obj)
wx.TheClipboard.Close()
def Printer_Init(self):
"""initialize printer settings using wx methods"""
self.printerData = wx.PrintData()
self.printerData.SetPaperId(wx.PAPER_LETTER)
self.printerData.SetPrintMode(wx.PRINT_MODE_PRINTER)
self.printerPageData= wx.PageSetupDialogData()
self.printerPageData.SetMarginBottomRight((25,25))
self.printerPageData.SetMarginTopLeft((25,25))
self.printerPageData.SetPrintData(self.printerData)
self.printer_width = 5.5
self.printer_margin= 0.5
def Printer_Setup(self, event=None):
"""set up figure for printing. The standard wx Printer
Setup Dialog seems to die easily. Therefore, this setup
simply asks for image width and margin for printing. """
dmsg = """Width of output figure in inches.
The current aspect ration will be kept."""
dlg = wx.Dialog(self, -1, 'Page Setup for Printing' , (-1,-1))
df = dlg.GetFont()
df.SetWeight(wx.NORMAL)
df.SetPointSize(11)
dlg.SetFont(df)
x_wid = wx.TextCtrl(dlg,-1,value="%.2f" % self.printer_width, size=(70,-1))
x_mrg = wx.TextCtrl(dlg,-1,value="%.2f" % self.printer_margin,size=(70,-1))
sizerAll = wx.BoxSizer(wx.VERTICAL)
sizerAll.Add(wx.StaticText(dlg,-1,dmsg),
0, wx.ALL | wx.EXPAND, 5)
sizer = wx.FlexGridSizer(0,3)
sizerAll.Add(sizer, 0, wx.ALL | wx.EXPAND, 5)
sizer.Add(wx.StaticText(dlg,-1,'Figure Width'),
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(x_wid,
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(wx.StaticText(dlg,-1,'in'),
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(wx.StaticText(dlg,-1,'Margin'),
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(x_mrg,
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(wx.StaticText(dlg,-1,'in'),
1, wx.ALIGN_LEFT|wx.ALL, 2)
btn = wx.Button(dlg,wx.ID_OK, " OK ")
btn.SetDefault()
sizer.Add(btn, 1, wx.ALIGN_LEFT, 5)
btn = wx.Button(dlg,wx.ID_CANCEL, " CANCEL ")
sizer.Add(btn, 1, wx.ALIGN_LEFT, 5)
dlg.SetSizer(sizerAll)
dlg.SetAutoLayout(True)
sizerAll.Fit(dlg)
if dlg.ShowModal() == wx.ID_OK:
try:
self.printer_width = float(x_wid.GetValue())
self.printer_margin = float(x_mrg.GetValue())
except:
pass
if ((self.printer_width + self.printer_margin) > 7.5):
self.printerData.SetOrientation(wx.LANDSCAPE)
else:
self.printerData.SetOrientation(wx.PORTRAIT)
dlg.Destroy()
return
def Printer_Setup2(self, event=None):
"""set up figure for printing. Using the standard wx Printer
Setup Dialog. """
if hasattr(self, 'printerData'):
data = wx.PageSetupDialogData()
data.SetPrintData(self.printerData)
else:
data = wx.PageSetupDialogData()
data.SetMarginTopLeft( (15, 15) )
data.SetMarginBottomRight( (15, 15) )
dlg = wx.PageSetupDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPageSetupData()
tl = data.GetMarginTopLeft()
br = data.GetMarginBottomRight()
self.printerData = wx.PrintData(data.GetPrintData())
dlg.Destroy()
def Printer_Preview(self, event=None):
""" generate Print Preview with wx Print mechanism"""
po1 = PrintoutWx(self, width=self.printer_width,
margin=self.printer_margin)
po2 = PrintoutWx(self, width=self.printer_width,
margin=self.printer_margin)
self.preview = wx.PrintPreview(po1,po2,self.printerData)
if not self.preview.Ok(): print "error with preview"
self.preview.SetZoom(50)
frameInst= self
while not isinstance(frameInst, wx.Frame):
frameInst= frameInst.GetParent()
frame = wx.PreviewFrame(self.preview, frameInst, "Preview")
frame.Initialize()
frame.SetPosition(self.GetPosition())
frame.SetSize((850,650))
frame.Centre(wx.BOTH)
frame.Show(True)
self.gui_repaint()
def Printer_Print(self, event=None):
""" Print figure using wx Print mechanism"""
pdd = wx.PrintDialogData()
# SetPrintData for 2.4 combatibility
pdd.SetPrintData(self.printerData)
pdd.SetToPage(1)
printer = wx.Printer(pdd)
printout = PrintoutWx(self, width=int(self.printer_width),
margin=int(self.printer_margin))
print_ok = printer.Print(self, printout, True)
if wx.VERSION_STRING >= '2.5':
if not print_ok and not printer.GetLastError() == wx.PRINTER_CANCELLED:
wx.MessageBox("""There was a problem printing.
Perhaps your current printer is not set correctly?""",
"Printing", wx.OK)
else:
if not print_ok:
wx.MessageBox("""There was a problem printing.
Perhaps your current printer is not set correctly?""",
"Printing", wx.OK)
printout.Destroy()
self.gui_repaint()
def draw_idle(self):
"""
Delay rendering until the GUI is idle.
"""
DEBUG_MSG("draw_idle()", 1, self)
self._isDrawn = False # Force redraw
# Create a timer for handling draw_idle requests
# If there are events pending when the timer is
# complete, reset the timer and continue. The
# alternative approach, binding to wx.EVT_IDLE,
# doesn't behave as nicely.
if hasattr(self,'_idletimer'):
self._idletimer.Restart(IDLE_DELAY)
else:
self._idletimer = wx.FutureCall(IDLE_DELAY,self._onDrawIdle)
# FutureCall is a backwards-compatible alias;
# CallLater became available in 2.7.1.1.
def _onDrawIdle(self, *args, **kwargs):
if wx.GetApp().Pending():
self._idletimer.Restart(IDLE_DELAY, *args, **kwargs)
else:
del self._idletimer
# GUI event or explicit draw call may already
# have caused the draw to take place
if not self._isDrawn:
self.draw(*args, **kwargs)
def draw(self, drawDC=None):
"""
Render the figure using RendererWx instance renderer, or using a
previously defined renderer if none is specified.
"""
DEBUG_MSG("draw()", 1, self)
self.renderer = RendererWx(self.bitmap, self.figure.dpi)
self.figure.draw(self.renderer)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def flush_events(self):
wx.Yield()
def start_event_loop(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
Call signature::
start_event_loop(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
Raises RuntimeError if event loop is already running.
"""
if hasattr(self, '_event_loop'):
raise RuntimeError("Event loop already running")
id = wx.NewId()
timer = wx.Timer(self, id=id)
if timeout > 0:
timer.Start(timeout*1000, oneShot=True)
bind(self, wx.EVT_TIMER, self.stop_event_loop, id=id)
# Event loop handler for start/stop event loop
self._event_loop = wx.EventLoop()
self._event_loop.Run()
timer.Stop()
def stop_event_loop(self, event=None):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
if hasattr(self,'_event_loop'):
if self._event_loop.IsRunning():
self._event_loop.Exit()
del self._event_loop
def _get_imagesave_wildcards(self):
'return the wildcard string for the filesave dialog'
default_filetype = self.get_default_filetype()
filetypes = self.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
wildcards = []
extensions = []
filter_index = 0
for i, (name, exts) in enumerate(sorted_filetypes):
ext_list = ';'.join(['*.%s' % ext for ext in exts])
extensions.append(exts[0])
wildcard = '%s (%s)|%s' % (name, ext_list, ext_list)
if default_filetype in exts:
filter_index = i
wildcards.append(wildcard)
wildcards = '|'.join(wildcards)
return wildcards, extensions, filter_index
def gui_repaint(self, drawDC=None):
"""
Performs update of the displayed image on the GUI canvas, using the
supplied device context. If drawDC is None, a ClientDC will be used to
redraw the image.
"""
DEBUG_MSG("gui_repaint()", 1, self)
if self.IsShownOnScreen():
if drawDC is None:
drawDC=wx.ClientDC(self)
drawDC.BeginDrawing()
drawDC.DrawBitmap(self.bitmap, 0, 0)
drawDC.EndDrawing()
#wx.GetApp().Yield()
else:
pass
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['bmp'] = 'Windows bitmap'
filetypes['jpeg'] = 'JPEG'
filetypes['jpg'] = 'JPEG'
filetypes['pcx'] = 'PCX'
filetypes['png'] = 'Portable Network Graphics'
filetypes['tif'] = 'Tagged Image Format File'
filetypes['tiff'] = 'Tagged Image Format File'
filetypes['xpm'] = 'X pixmap'
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasBase.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
def print_bmp(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_BMP, *args, **kwargs)
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_JPEG, *args, **kwargs)
print_jpg = print_jpeg
def print_pcx(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PCX, *args, **kwargs)
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PNG, *args, **kwargs)
def print_tiff(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_TIF, *args, **kwargs)
print_tif = print_tiff
def print_xpm(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_XPM, *args, **kwargs)
def _print_image(self, filename, filetype, *args, **kwargs):
origBitmap = self.bitmap
l,b,width,height = self.figure.bbox.bounds
width = int(math.ceil(width))
height = int(math.ceil(height))
self.bitmap = wx.EmptyBitmap(width, height)
renderer = RendererWx(self.bitmap, self.figure.dpi)
gc = renderer.new_gc()
self.figure.draw(renderer)
# Now that we have rendered into the bitmap, save it
# to the appropriate file type and clean up
if is_string_like(filename):
if not self.bitmap.SaveFile(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError('Could not save figure to %s\n' % (filename))
elif is_writable_file_like(filename):
if not self.bitmap.ConvertToImage().SaveStream(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError('Could not save figure to %s\n' % (filename))
# Restore everything to normal
self.bitmap = origBitmap
# Note: draw is required here since bits of state about the
# last renderer are strewn about the artist draw methods. Do
# not remove the draw without first verifying that these have
# been cleaned up. The artist contains() methods will fail
# otherwise.
if self._isDrawn:
self.draw()
self.Refresh()
def get_default_filetype(self):
return 'png'
def _onPaint(self, evt):
"""
Called when wxPaintEvt is generated
"""
DEBUG_MSG("_onPaint()", 1, self)
drawDC = wx.PaintDC(self)
if not self._isDrawn:
self.draw(drawDC=drawDC)
else:
self.gui_repaint(drawDC=drawDC)
evt.Skip()
def _onEraseBackground(self, evt):
"""
Called when window is redrawn; since we are blitting the entire
image, we can leave this blank to suppress flicker.
"""
pass
def _onSize(self, evt):
"""
Called when wxEventSize is generated.
In this application we attempt to resize to fit the window, so it
is better to take the performance hit and redraw the whole window.
"""
DEBUG_MSG("_onSize()", 2, self)
# Create a new, correctly sized bitmap
self._width, self._height = self.GetClientSize()
self.bitmap =wx.EmptyBitmap(self._width, self._height)
self._isDrawn = False
if self._width <= 1 or self._height <= 1: return # Empty figure
dpival = self.figure.dpi
winch = self._width/dpival
hinch = self._height/dpival
self.figure.set_size_inches(winch, hinch)
# Rendering will happen on the associated paint event
# so no need to do anything here except to make sure
# the whole background is repainted.
self.Refresh(eraseBackground=False)
def _get_key(self, evt):
keyval = evt.m_keyCode
if keyval in self.keyvald:
key = self.keyvald[keyval]
elif keyval <256:
key = chr(keyval)
else:
key = None
# why is wx upcasing this?
if key is not None: key = key.lower()
return key
def _onIdle(self, evt):
'a GUI idle event'
evt.Skip()
FigureCanvasBase.idle_event(self, guiEvent=evt)
def _onKeyDown(self, evt):
"""Capture key press."""
key = self._get_key(evt)
evt.Skip()
FigureCanvasBase.key_press_event(self, key, guiEvent=evt)
def _onKeyUp(self, evt):
"""Release key."""
key = self._get_key(evt)
#print 'release key', key
evt.Skip()
FigureCanvasBase.key_release_event(self, key, guiEvent=evt)
def _onRightButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 3, guiEvent=evt)
def _onRightButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
if self.HasCapture(): self.ReleaseMouse()
FigureCanvasBase.button_release_event(self, x, y, 3, guiEvent=evt)
def _onLeftButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 1, guiEvent=evt)
def _onLeftButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
#print 'release button', 1
evt.Skip()
if self.HasCapture(): self.ReleaseMouse()
FigureCanvasBase.button_release_event(self, x, y, 1, guiEvent=evt)
def _onMouseWheel(self, evt):
"""Translate mouse wheel events into matplotlib events"""
# Determine mouse location
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# Convert delta/rotation/rate into a floating point step size
delta = evt.GetWheelDelta()
rotation = evt.GetWheelRotation()
rate = evt.GetLinesPerAction()
#print "delta,rotation,rate",delta,rotation,rate
step = rate*float(rotation)/delta
# Done handling event
evt.Skip()
# Mac is giving two events for every wheel event
# Need to skip every second one
if wx.Platform == '__WXMAC__':
if not hasattr(self,'_skipwheelevent'):
self._skipwheelevent = True
elif self._skipwheelevent:
self._skipwheelevent = False
return # Return without processing event
else:
self._skipwheelevent = True
# Convert to mpl event
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt)
def _onMotion(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt)
def _onLeave(self, evt):
"""Mouse has left the window."""
evt.Skip()
FigureCanvasBase.leave_notify_event(self, guiEvent = evt)
def _onEnter(self, evt):
"""Mouse has entered the window."""
FigureCanvasBase.enter_notify_event(self, guiEvent = evt)
########################################################################
#
# The following functions and classes are for pylab compatibility
# mode (matplotlib.pylab) and implement figure managers, etc...
#
########################################################################
def _create_wx_app():
"""
Creates a wx.PySimpleApp instance if a wx.App has not been created.
"""
wxapp = wx.GetApp()
if wxapp is None:
wxapp = wx.PySimpleApp()
wxapp.SetExitOnFrameDelete(True)
# retain a reference to the app object so it does not get garbage
# collected and cause segmentation faults
_create_wx_app.theWxApp = wxapp
def draw_if_interactive():
"""
This should be overriden in a windowing environment if drawing
should be done in interactive python mode
"""
DEBUG_MSG("draw_if_interactive()", 1, None)
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def show():
"""
Current implementation assumes that matplotlib is executed in a PyCrust
shell. It appears to be possible to execute wxPython applications from
within a PyCrust without having to ensure that wxPython has been created
in a secondary thread (e.g. SciPy gui_thread).
Unfortunately, gui_thread seems to introduce a number of further
dependencies on SciPy modules, which I do not wish to introduce
into the backend at this point. If there is a need I will look
into this in a later release.
"""
DEBUG_MSG("show()", 3, None)
for figwin in Gcf.get_all_fig_managers():
figwin.frame.Show()
if show._needmain and not matplotlib.is_interactive():
# start the wxPython gui event if there is not already one running
wxapp = wx.GetApp()
if wxapp is not None:
# wxPython 2.4 has no wx.App.IsMainLoopRunning() method
imlr = getattr(wxapp, 'IsMainLoopRunning', lambda: False)
if not imlr():
wxapp.MainLoop()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
_create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
frame = FigureFrameWx(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
class FigureFrameWx(wx.Frame):
def __init__(self, num, fig):
# On non-Windows platform, explicitly set the position - fix
# positioning bug on some Linux platforms
if wx.Platform == '__WXMSW__':
pos = wx.DefaultPosition
else:
pos =wx.Point(20,20)
l,b,w,h = fig.bbox.bounds
wx.Frame.__init__(self, parent=None, id=-1, pos=pos,
title="Figure %d" % num)
# Frame will be sized later by the Fit method
DEBUG_MSG("__init__()", 1, self)
self.num = num
statbar = StatusBarWx(self)
self.SetStatusBar(statbar)
self.canvas = self.get_canvas(fig)
self.canvas.SetInitialSize(wx.Size(fig.bbox.width, fig.bbox.height))
self.sizer =wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version
self.toolbar = self._get_toolbar(statbar)
if self.toolbar is not None:
self.toolbar.Realize()
if wx.Platform == '__WXMAC__':
# Mac platform (OSX 10.3, MacPython) does not seem to cope with
# having a toolbar in a sizer. This work-around gets the buttons
# back, but at the expense of having the toolbar at the top
self.SetToolBar(self.toolbar)
else:
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
# As noted above, doesn't work for Mac.
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
self.figmgr = FigureManagerWx(self.canvas, num, self)
bind(self, wx.EVT_CLOSE, self._onClose)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2Wx(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
def get_canvas(self, fig):
return FigureCanvasWx(self, -1, fig)
def get_figure_manager(self):
DEBUG_MSG("get_figure_manager()", 1, self)
return self.figmgr
def _onClose(self, evt):
DEBUG_MSG("onClose()", 1, self)
self.canvas.stop_event_loop()
Gcf.destroy(self.num)
#self.Destroy()
def GetToolBar(self):
"""Override wxFrame::GetToolBar as we don't have managed toolbar"""
return self.toolbar
def Destroy(self, *args, **kwargs):
wx.Frame.Destroy(self, *args, **kwargs)
if self.toolbar is not None:
self.toolbar.Destroy()
wxapp = wx.GetApp()
if wxapp:
wxapp.Yield()
return True
class FigureManagerWx(FigureManagerBase):
"""
This class contains the FigureCanvas and GUI frame
It is instantiated by GcfWx whenever a new figure is created. GcfWx is
responsible for managing multiple instances of FigureManagerWx.
NB: FigureManagerBase is found in _pylab_helpers
public attrs
canvas - a FigureCanvasWx(wx.Panel) instance
window - a wxFrame instance - http://www.lpthe.jussieu.fr/~zeitlin/wxWindows/docs/wxwin_wxframe.html#wxframe
"""
def __init__(self, canvas, num, frame):
DEBUG_MSG("__init__()", 1, self)
FigureManagerBase.__init__(self, canvas, num)
self.frame = frame
self.window = frame
self.tb = frame.GetToolBar()
self.toolbar = self.tb # consistent with other backends
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.tb != None: self.tb.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def showfig(*args):
frame.Show()
# attach a show method to the figure
self.canvas.figure.show = showfig
def destroy(self, *args):
DEBUG_MSG("destroy()", 1, self)
self.frame.Destroy()
#if self.tb is not None: self.tb.Destroy()
import wx
#wx.GetApp().ProcessIdle()
wx.WakeUpIdle()
def set_window_title(self, title):
self.window.SetTitle(title)
def resize(self, width, height):
'Set the canvas size in pixels'
self.canvas.SetInitialSize(wx.Size(width, height))
self.window.GetSizer().Fit(self.window)
# Identifiers for toolbar controls - images_wx contains bitmaps for the images
# used in the controls. wxWindows does not provide any stock images, so I've
# 'stolen' those from GTK2, and transformed them into the appropriate format.
#import images_wx
_NTB_AXISMENU =wx.NewId()
_NTB_AXISMENU_BUTTON =wx.NewId()
_NTB_X_PAN_LEFT =wx.NewId()
_NTB_X_PAN_RIGHT =wx.NewId()
_NTB_X_ZOOMIN =wx.NewId()
_NTB_X_ZOOMOUT =wx.NewId()
_NTB_Y_PAN_UP =wx.NewId()
_NTB_Y_PAN_DOWN =wx.NewId()
_NTB_Y_ZOOMIN =wx.NewId()
_NTB_Y_ZOOMOUT =wx.NewId()
#_NTB_SUBPLOT =wx.NewId()
_NTB_SAVE =wx.NewId()
_NTB_CLOSE =wx.NewId()
def _load_bitmap(filename):
"""
Load a bitmap file from the backends/images subdirectory in which the
matplotlib library is installed. The filename parameter should not
contain any path information as this is determined automatically.
Returns a wx.Bitmap object
"""
basedir = os.path.join(rcParams['datapath'],'images')
bmpFilename = os.path.normpath(os.path.join(basedir, filename))
if not os.path.exists(bmpFilename):
raise IOError('Could not find bitmap file "%s"; dying'%bmpFilename)
bmp = wx.Bitmap(bmpFilename)
return bmp
class MenuButtonWx(wx.Button):
"""
wxPython does not permit a menu to be incorporated directly into a toolbar.
This class simulates the effect by associating a pop-up menu with a button
in the toolbar, and managing this as though it were a menu.
"""
def __init__(self, parent):
wx.Button.__init__(self, parent, _NTB_AXISMENU_BUTTON, "Axes: ",
style=wx.BU_EXACTFIT)
self._toolbar = parent
self._menu =wx.Menu()
self._axisId = []
# First two menu items never change...
self._allId =wx.NewId()
self._invertId =wx.NewId()
self._menu.Append(self._allId, "All", "Select all axes", False)
self._menu.Append(self._invertId, "Invert", "Invert axes selected", False)
self._menu.AppendSeparator()
bind(self, wx.EVT_BUTTON, self._onMenuButton, id=_NTB_AXISMENU_BUTTON)
bind(self, wx.EVT_MENU, self._handleSelectAllAxes, id=self._allId)
bind(self, wx.EVT_MENU, self._handleInvertAxesSelected, id=self._invertId)
def Destroy(self):
self._menu.Destroy()
self.Destroy()
def _onMenuButton(self, evt):
"""Handle menu button pressed."""
x, y = self.GetPositionTuple()
w, h = self.GetSizeTuple()
self.PopupMenuXY(self._menu, x, y+h-4)
# When menu returned, indicate selection in button
evt.Skip()
def _handleSelectAllAxes(self, evt):
"""Called when the 'select all axes' menu item is selected."""
if len(self._axisId) == 0:
return
for i in range(len(self._axisId)):
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _handleInvertAxesSelected(self, evt):
"""Called when the invert all menu item is selected"""
if len(self._axisId) == 0: return
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
self._menu.Check(self._axisId[i], False)
else:
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _onMenuItemSelected(self, evt):
"""Called whenever one of the specific axis menu items is selected"""
current = self._menu.IsChecked(evt.GetId())
if current:
new = False
else:
new = True
self._menu.Check(evt.GetId(), new)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def updateAxes(self, maxAxis):
"""Ensures that there are entries for max_axis axes in the menu
(selected by default)."""
if maxAxis > len(self._axisId):
for i in range(len(self._axisId) + 1, maxAxis + 1, 1):
menuId =wx.NewId()
self._axisId.append(menuId)
self._menu.Append(menuId, "Axis %d" % i, "Select axis %d" % i, True)
self._menu.Check(menuId, True)
bind(self, wx.EVT_MENU, self._onMenuItemSelected, id=menuId)
self._toolbar.set_active(range(len(self._axisId)))
def getActiveAxes(self):
"""Return a list of the selected axes."""
active = []
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
active.append(i)
return active
def updateButtonText(self, lst):
"""Update the list of selected axes in the menu button"""
axis_txt = ''
for e in lst:
axis_txt += '%d,' % (e+1)
# remove trailing ',' and add to button string
self.SetLabel("Axes: %s" % axis_txt[:-1])
cursord = {
cursors.MOVE : wx.CURSOR_HAND,
cursors.HAND : wx.CURSOR_HAND,
cursors.POINTER : wx.CURSOR_ARROW,
cursors.SELECT_REGION : wx.CURSOR_CROSS,
}
class SubplotToolWX(wx.Frame):
def __init__(self, targetfig):
wx.Frame.__init__(self, None, -1, "Configure subplots")
toolfig = Figure((6,3))
canvas = FigureCanvasWx(self, -1, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, self)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT|wx.TOP|wx.GROW)
self.SetSizer(sizer)
self.Fit()
tool = SubplotTool(targetfig, toolfig)
class NavigationToolbar2Wx(NavigationToolbar2, wx.ToolBar):
def __init__(self, canvas):
wx.ToolBar.__init__(self, canvas.GetParent(), -1)
NavigationToolbar2.__init__(self, canvas)
self.canvas = canvas
self._idle = True
self.statbar = None
def get_canvas(self, frame, fig):
return FigureCanvasWx(frame, -1, fig)
def _init_toolbar(self):
DEBUG_MSG("_init_toolbar", 1, self)
self._parent = self.canvas.GetParent()
_NTB2_HOME =wx.NewId()
self._NTB2_BACK =wx.NewId()
self._NTB2_FORWARD =wx.NewId()
self._NTB2_PAN =wx.NewId()
self._NTB2_ZOOM =wx.NewId()
_NTB2_SAVE = wx.NewId()
_NTB2_SUBPLOT =wx.NewId()
self.SetToolBitmapSize(wx.Size(24,24))
self.AddSimpleTool(_NTB2_HOME, _load_bitmap('home.png'),
'Home', 'Reset original view')
self.AddSimpleTool(self._NTB2_BACK, _load_bitmap('back.png'),
'Back', 'Back navigation view')
self.AddSimpleTool(self._NTB2_FORWARD, _load_bitmap('forward.png'),
'Forward', 'Forward navigation view')
# todo: get new bitmap
self.AddCheckTool(self._NTB2_PAN, _load_bitmap('move.png'),
shortHelp='Pan',
longHelp='Pan with left, zoom with right')
self.AddCheckTool(self._NTB2_ZOOM, _load_bitmap('zoom_to_rect.png'),
shortHelp='Zoom', longHelp='Zoom to rectangle')
self.AddSeparator()
self.AddSimpleTool(_NTB2_SUBPLOT, _load_bitmap('subplots.png'),
'Configure subplots', 'Configure subplot parameters')
self.AddSimpleTool(_NTB2_SAVE, _load_bitmap('filesave.png'),
'Save', 'Save plot contents to file')
bind(self, wx.EVT_TOOL, self.home, id=_NTB2_HOME)
bind(self, wx.EVT_TOOL, self.forward, id=self._NTB2_FORWARD)
bind(self, wx.EVT_TOOL, self.back, id=self._NTB2_BACK)
bind(self, wx.EVT_TOOL, self.zoom, id=self._NTB2_ZOOM)
bind(self, wx.EVT_TOOL, self.pan, id=self._NTB2_PAN)
bind(self, wx.EVT_TOOL, self.configure_subplot, id=_NTB2_SUBPLOT)
bind(self, wx.EVT_TOOL, self.save, id=_NTB2_SAVE)
self.Realize()
def zoom(self, *args):
self.ToggleTool(self._NTB2_PAN, False)
NavigationToolbar2.zoom(self, *args)
def pan(self, *args):
self.ToggleTool(self._NTB2_ZOOM, False)
NavigationToolbar2.pan(self, *args)
def configure_subplot(self, evt):
frame = wx.Frame(None, -1, "Configure subplots")
toolfig = Figure((6,3))
canvas = self.get_canvas(frame, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, frame)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT|wx.TOP|wx.GROW)
frame.SetSizer(sizer)
frame.Fit()
tool = SubplotTool(self.canvas.figure, toolfig)
frame.Show()
def save(self, evt):
# Fetch the required filename and file type.
filetypes, exts, filter_index = self.canvas._get_imagesave_wildcards()
default_file = "image." + self.canvas.get_default_filetype()
dlg = wx.FileDialog(self._parent, "Save to file", "", default_file,
filetypes,
wx.SAVE|wx.OVERWRITE_PROMPT|wx.CHANGE_DIR)
dlg.SetFilterIndex(filter_index)
if dlg.ShowModal() == wx.ID_OK:
dirname = dlg.GetDirectory()
filename = dlg.GetFilename()
DEBUG_MSG('Save file dir:%s name:%s' % (dirname, filename), 3, self)
format = exts[dlg.GetFilterIndex()]
basename, ext = os.path.splitext(filename)
if ext.startswith('.'):
ext = ext[1:]
if ext in ('svg', 'pdf', 'ps', 'eps', 'png') and format!=ext:
#looks like they forgot to set the image type drop
#down, going with the extension.
warnings.warn('extension %s did not match the selected image type %s; going with %s'%(ext, format, ext), stacklevel=0)
format = ext
try:
self.canvas.print_figure(
os.path.join(dirname, filename), format=format)
except Exception, e:
error_msg_wx(str(e))
def set_cursor(self, cursor):
cursor =wx.StockCursor(cursord[cursor])
self.canvas.SetCursor( cursor )
def release(self, event):
try: del self.lastrect
except AttributeError: pass
def dynamic_update(self):
d = self._idle
self._idle = False
if d:
self.canvas.draw()
self._idle = True
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
canvas = self.canvas
dc =wx.ClientDC(canvas)
# Set logical function to XOR for rubberbanding
dc.SetLogicalFunction(wx.XOR)
# Set dc brush and pen
# Here I set brush and pen to white and grey respectively
# You can set it to your own choices
# The brush setting is not really needed since we
# dont do any filling of the dc. It is set just for
# the sake of completion.
wbrush =wx.Brush(wx.Colour(255,255,255), wx.TRANSPARENT)
wpen =wx.Pen(wx.Colour(200, 200, 200), 1, wx.SOLID)
dc.SetBrush(wbrush)
dc.SetPen(wpen)
dc.ResetBoundingBox()
dc.BeginDrawing()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1<y0: y0, y1 = y1, y0
if x1<y0: x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = int(x0), int(y0), int(w), int(h)
try: lastrect = self.lastrect
except AttributeError: pass
else: dc.DrawRectangle(*lastrect) #erase last
self.lastrect = rect
dc.DrawRectangle(*rect)
dc.EndDrawing()
def set_status_bar(self, statbar):
self.statbar = statbar
def set_message(self, s):
if self.statbar is not None: self.statbar.set_function(s)
def set_history_buttons(self):
can_backward = (self._views._pos > 0)
can_forward = (self._views._pos < len(self._views._elements) - 1)
self.EnableTool(self._NTB2_BACK, can_backward)
self.EnableTool(self._NTB2_FORWARD, can_forward)
class NavigationToolbarWx(wx.ToolBar):
def __init__(self, canvas, can_kill=False):
"""
figure is the Figure instance that the toolboar controls
win, if not None, is the wxWindow the Figure is embedded in
"""
wx.ToolBar.__init__(self, canvas.GetParent(), -1)
DEBUG_MSG("__init__()", 1, self)
self.canvas = canvas
self._lastControl = None
self._mouseOnButton = None
self._parent = canvas.GetParent()
self._NTB_BUTTON_HANDLER = {
_NTB_X_PAN_LEFT : self.panx,
_NTB_X_PAN_RIGHT : self.panx,
_NTB_X_ZOOMIN : self.zoomx,
_NTB_X_ZOOMOUT : self.zoomy,
_NTB_Y_PAN_UP : self.pany,
_NTB_Y_PAN_DOWN : self.pany,
_NTB_Y_ZOOMIN : self.zoomy,
_NTB_Y_ZOOMOUT : self.zoomy }
self._create_menu()
self._create_controls(can_kill)
self.Realize()
def _create_menu(self):
"""
Creates the 'menu' - implemented as a button which opens a
pop-up menu since wxPython does not allow a menu as a control
"""
DEBUG_MSG("_create_menu()", 1, self)
self._menu = MenuButtonWx(self)
self.AddControl(self._menu)
self.AddSeparator()
def _create_controls(self, can_kill):
"""
Creates the button controls, and links them to event handlers
"""
DEBUG_MSG("_create_controls()", 1, self)
# Need the following line as Windows toolbars default to 15x16
self.SetToolBitmapSize(wx.Size(16,16))
self.AddSimpleTool(_NTB_X_PAN_LEFT, _load_bitmap('stock_left.xpm'),
'Left', 'Scroll left')
self.AddSimpleTool(_NTB_X_PAN_RIGHT, _load_bitmap('stock_right.xpm'),
'Right', 'Scroll right')
self.AddSimpleTool(_NTB_X_ZOOMIN, _load_bitmap('stock_zoom-in.xpm'),
'Zoom in', 'Increase X axis magnification')
self.AddSimpleTool(_NTB_X_ZOOMOUT, _load_bitmap('stock_zoom-out.xpm'),
'Zoom out', 'Decrease X axis magnification')
self.AddSeparator()
self.AddSimpleTool(_NTB_Y_PAN_UP,_load_bitmap('stock_up.xpm'),
'Up', 'Scroll up')
self.AddSimpleTool(_NTB_Y_PAN_DOWN, _load_bitmap('stock_down.xpm'),
'Down', 'Scroll down')
self.AddSimpleTool(_NTB_Y_ZOOMIN, _load_bitmap('stock_zoom-in.xpm'),
'Zoom in', 'Increase Y axis magnification')
self.AddSimpleTool(_NTB_Y_ZOOMOUT, _load_bitmap('stock_zoom-out.xpm'),
'Zoom out', 'Decrease Y axis magnification')
self.AddSeparator()
self.AddSimpleTool(_NTB_SAVE, _load_bitmap('stock_save_as.xpm'),
'Save', 'Save plot contents as images')
self.AddSeparator()
bind(self, wx.EVT_TOOL, self._onLeftScroll, id=_NTB_X_PAN_LEFT)
bind(self, wx.EVT_TOOL, self._onRightScroll, id=_NTB_X_PAN_RIGHT)
bind(self, wx.EVT_TOOL, self._onXZoomIn, id=_NTB_X_ZOOMIN)
bind(self, wx.EVT_TOOL, self._onXZoomOut, id=_NTB_X_ZOOMOUT)
bind(self, wx.EVT_TOOL, self._onUpScroll, id=_NTB_Y_PAN_UP)
bind(self, wx.EVT_TOOL, self._onDownScroll, id=_NTB_Y_PAN_DOWN)
bind(self, wx.EVT_TOOL, self._onYZoomIn, id=_NTB_Y_ZOOMIN)
bind(self, wx.EVT_TOOL, self._onYZoomOut, id=_NTB_Y_ZOOMOUT)
bind(self, wx.EVT_TOOL, self._onSave, id=_NTB_SAVE)
bind(self, wx.EVT_TOOL_ENTER, self._onEnterTool, id=self.GetId())
if can_kill:
bind(self, wx.EVT_TOOL, self._onClose, id=_NTB_CLOSE)
bind(self, wx.EVT_MOUSEWHEEL, self._onMouseWheel)
def set_active(self, ind):
"""
ind is a list of index numbers for the axes which are to be made active
"""
DEBUG_MSG("set_active()", 1, self)
self._ind = ind
if ind != None:
self._active = [ self._axes[i] for i in self._ind ]
else:
self._active = []
# Now update button text wit active axes
self._menu.updateButtonText(ind)
def get_last_control(self):
"""Returns the identity of the last toolbar button pressed."""
return self._lastControl
def panx(self, direction):
DEBUG_MSG("panx()", 1, self)
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
self.canvas.Refresh(eraseBackground=False)
def pany(self, direction):
DEBUG_MSG("pany()", 1, self)
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
self.canvas.Refresh(eraseBackground=False)
def zoomx(self, in_out):
DEBUG_MSG("zoomx()", 1, self)
for a in self._active:
a.xaxis.zoom(in_out)
self.canvas.draw()
self.canvas.Refresh(eraseBackground=False)
def zoomy(self, in_out):
DEBUG_MSG("zoomy()", 1, self)
for a in self._active:
a.yaxis.zoom(in_out)
self.canvas.draw()
self.canvas.Refresh(eraseBackground=False)
def update(self):
"""
Update the toolbar menu - called when (e.g.) a new subplot or axes are added
"""
DEBUG_MSG("update()", 1, self)
self._axes = self.canvas.figure.get_axes()
self._menu.updateAxes(len(self._axes))
def _do_nothing(self, d):
"""A NULL event handler - does nothing whatsoever"""
pass
# Local event handlers - mainly supply parameters to pan/scroll functions
def _onEnterTool(self, evt):
toolId = evt.GetSelection()
try:
self.button_fn = self._NTB_BUTTON_HANDLER[toolId]
except KeyError:
self.button_fn = self._do_nothing
evt.Skip()
def _onLeftScroll(self, evt):
self.panx(-1)
evt.Skip()
def _onRightScroll(self, evt):
self.panx(1)
evt.Skip()
def _onXZoomIn(self, evt):
self.zoomx(1)
evt.Skip()
def _onXZoomOut(self, evt):
self.zoomx(-1)
evt.Skip()
def _onUpScroll(self, evt):
self.pany(1)
evt.Skip()
def _onDownScroll(self, evt):
self.pany(-1)
evt.Skip()
def _onYZoomIn(self, evt):
self.zoomy(1)
evt.Skip()
def _onYZoomOut(self, evt):
self.zoomy(-1)
evt.Skip()
def _onMouseEnterButton(self, button):
self._mouseOnButton = button
def _onMouseLeaveButton(self, button):
if self._mouseOnButton == button:
self._mouseOnButton = None
def _onMouseWheel(self, evt):
if evt.GetWheelRotation() > 0:
direction = 1
else:
direction = -1
self.button_fn(direction)
_onSave = NavigationToolbar2Wx.save
def _onClose(self, evt):
self.GetParent().Destroy()
class StatusBarWx(wx.StatusBar):
"""
A status bar is added to _FigureFrame to allow measurements and the
previously selected scroll function to be displayed as a user
convenience.
"""
def __init__(self, parent):
wx.StatusBar.__init__(self, parent, -1)
self.SetFieldsCount(2)
self.SetStatusText("None", 1)
#self.SetStatusText("Measurement: None", 2)
#self.Reposition()
def set_function(self, string):
self.SetStatusText("%s" % string, 1)
#def set_measurement(self, string):
# self.SetStatusText("Measurement: %s" % string, 2)
#< Additions for printing support: Matt Newville
class PrintoutWx(wx.Printout):
"""Simple wrapper around wx Printout class -- all the real work
here is scaling the matplotlib canvas bitmap to the current
printer's definition.
"""
def __init__(self, canvas, width=5.5,margin=0.5, title='matplotlib'):
wx.Printout.__init__(self,title=title)
self.canvas = canvas
# width, in inches of output figure (approximate)
self.width = width
self.margin = margin
def HasPage(self, page):
#current only supports 1 page print
return page == 1
def GetPageInfo(self):
return (1, 1, 1, 1)
def OnPrintPage(self, page):
self.canvas.draw()
dc = self.GetDC()
(ppw,pph) = self.GetPPIPrinter() # printer's pixels per in
(pgw,pgh) = self.GetPageSizePixels() # page size in pixels
(dcw,dch) = dc.GetSize()
(grw,grh) = self.canvas.GetSizeTuple()
# save current figure dpi resolution and bg color,
# so that we can temporarily set them to the dpi of
# the printer, and the bg color to white
bgcolor = self.canvas.figure.get_facecolor()
fig_dpi = self.canvas.figure.dpi
# draw the bitmap, scaled appropriately
vscale = float(ppw) / fig_dpi
# set figure resolution,bg color for printer
self.canvas.figure.dpi = ppw
self.canvas.figure.set_facecolor('#FFFFFF')
renderer = RendererWx(self.canvas.bitmap, self.canvas.figure.dpi)
self.canvas.figure.draw(renderer)
self.canvas.bitmap.SetWidth( int(self.canvas.bitmap.GetWidth() * vscale))
self.canvas.bitmap.SetHeight( int(self.canvas.bitmap.GetHeight()* vscale))
self.canvas.draw()
# page may need additional scaling on preview
page_scale = 1.0
if self.IsPreview(): page_scale = float(dcw)/pgw
# get margin in pixels = (margin in in) * (pixels/in)
top_margin = int(self.margin * pph * page_scale)
left_margin = int(self.margin * ppw * page_scale)
# set scale so that width of output is self.width inches
# (assuming grw is size of graph in inches....)
user_scale = (self.width * fig_dpi * page_scale)/float(grw)
dc.SetDeviceOrigin(left_margin,top_margin)
dc.SetUserScale(user_scale,user_scale)
# this cute little number avoid API inconsistencies in wx
try:
dc.DrawBitmap(self.canvas.bitmap, 0, 0)
except:
try:
dc.DrawBitmap(self.canvas.bitmap, (0, 0))
except:
pass
# restore original figure resolution
self.canvas.figure.set_facecolor(bgcolor)
self.canvas.figure.dpi = fig_dpi
self.canvas.draw()
return True
#>
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
Toolbar = NavigationToolbarWx
FigureManager = FigureManagerWx
| gpl-3.0 |
mattilyra/scikit-learn | sklearn/tree/tests/test_tree.py | 32 | 52369 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
| bsd-3-clause |
trankmichael/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 162 | 7103 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
weight = X[rows[:, np.newaxis], cols].sum()
cut = (X[row_complement[:, np.newaxis], cols].sum() +
X[rows[:, np.newaxis], col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
griffinfoster/pulsar-polarization-sims | scripts/SavitzkyGolay.py | 1 | 6998 | #!/usr/bin/env python
"""
Apply a low pass filter to a pulsar profile
"""
import pyfits as pf
import numpy as n
import pylab as p
import os
import sys
import shutil
import time
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
from math import factorial
try:
window_size = n.abs(n.int(window_size))
order = n.abs(n.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = n.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = n.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - n.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + n.abs(y[-half_window-1:-1][::-1] - y[-1])
y = n.concatenate((firstvals, y, lastvals))
return n.convolve( m[::-1], y, mode='valid')
if __name__ == "__main__":
from optparse import OptionParser
o = OptionParser()
o.set_usage('%prog [options] [FITS file]')
o.set_description(__doc__)
o.add_option('-r', '--rot', dest='rot', action='store_true',
help='Rotate the profile by 0.5 of the phase')
o.add_option('-w','--win_len',dest='win_len',default=51,type='int',
help='Window smoothing size, should be odd, default:51')
o.add_option('-s','--save',dest='save',action='store_true',
help='Save the smoothed profile to a new fits file')
o.add_option('-S','--shift',dest='shift',default=0, type='int',
help='Shift the smoothed profile to the left N values default:0')
opts, args = o.parse_args(sys.argv[1:])
hdulist=pf.open(args[0])
#print hdulist.info()
primary=hdulist['PRIMARY'].header
print primary['FITSTYPE']
#see www.atnf.csiro.au/research/pulsar/psrfists/fitsdef.html section: Subintegration data
d=hdulist[3].data
#print d
offsets=d[0][-3]
sclFactor=d[0][-2]
data=d[0][-1]
#print sclFactor
#print offsets
#print data.shape
if len(data.shape)==1:
data.shape=(4,1,data.shape[-1]/4)
print data.shape
dout=n.zeros_like(data, dtype=n.float32)
for sid,stokes in enumerate(sclFactor): dout[sid,0,:]=data[sid,0,:].astype(n.float32)*sclFactor[sid]+offsets[sid]
xvals=n.arange(dout.shape[2],dtype=n.float32)
hdulist.close()
if opts.rot: dout=n.roll(dout, dout.shape[2]/2, axis=2)
##LOW PASS FILTER
#ntaps=dout.shape[2]
#cutoff=opts.cutoff
#fir=signal.firwin(ntaps,cutoff)
#ifilter=n.convolve(dout[0,0,:],fir)[int(ntaps/2)-1:-1*int(ntaps/2)]
#qfilter=n.convolve(dout[1,0,:],fir)[int(ntaps/2)-1:-1*int(ntaps/2)]
#ufilter=n.convolve(dout[2,0,:],fir)[int(ntaps/2)-1:-1*int(ntaps/2)]
#vfilter=n.convolve(dout[3,0,:],fir)[int(ntaps/2)-1:-1*int(ntaps/2)]
#SMOOTHING
ifilter=savitzky_golay(dout[0,0,:], opts.win_len, 10)
qfilter=savitzky_golay(dout[1,0,:], opts.win_len, 10)
ufilter=savitzky_golay(dout[2,0,:], opts.win_len, 10)
vfilter=savitzky_golay(dout[3,0,:], opts.win_len, 10)
#SHIFTING
if not (opts.shift==0):
shift=-1*opts.shift
print 'Applying a shift of %i units'%shift
ifilter=n.roll(ifilter,shift)
qfilter=n.roll(qfilter,shift)
ufilter=n.roll(ufilter,shift)
vfilter=n.roll(vfilter,shift)
if opts.save:
dirname,basename=os.path.split(os.path.abspath(args[0]))
outputname=basename.split('.fits')[0]+'.smooth.fits'
outputname=dirname+'/'+outputname
shutil.copy(os.path.abspath(args[0]),outputname)
time.sleep(.1)
hdulist=pf.open(outputname,mode='update')
dwrite=n.zeros_like(dout)
dwrite[0,0,:]=(ifilter-offsets[0])/sclFactor[0]
dwrite[1,0,:]=(qfilter-offsets[1])/sclFactor[1]
dwrite[2,0,:]=(ufilter-offsets[2])/sclFactor[2]
dwrite[3,0,:]=(vfilter-offsets[3])/sclFactor[3]
if opts.rot: dwrite=n.roll(dwrite, -dwrite.shape[2]/2, axis=2)
#dwrite=dwrite.flatten()
dDict=hdulist[3].data
print dwrite.shape
dDict[0][-1]=dwrite
hdulist[3].data=dDict
hdulist.flush()
hdulist.close()
p.subplot(221)
p.plot((ifilter-offsets[0])/sclFactor[0])
p.plot((dout[0,0,:]-offsets[0])/sclFactor[0])
p.subplot(222)
p.plot((qfilter-offsets[1])/sclFactor[1])
p.plot((dout[1,0,:]-offsets[1])/sclFactor[1])
p.subplot(223)
p.plot((ufilter-offsets[2])/sclFactor[2])
p.plot((dout[2,0,:]-offsets[2])/sclFactor[2])
p.subplot(224)
p.plot((vfilter-offsets[3])/sclFactor[3])
p.plot((dout[3,0,:]-offsets[3])/sclFactor[3])
p.show()
| mit |
nikitasingh981/scikit-learn | sklearn/exceptions.py | 50 | 5276 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'SkipTestWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior.
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifies the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
.. versionadded:: 0.18
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
.. versionchanged:: 0.18
Moved from sklearn.cross_validation.
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation, extends EfficiencyWarning.
"""
class SkipTestWarning(UserWarning):
"""Warning class used to notify the user of a test that was skipped.
For example, one of the estimator checks requires a pandas import.
If the pandas package cannot be imported, the test will be skipped rather
than register as a failure.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
| bsd-3-clause |
wzbozon/statsmodels | statsmodels/nonparametric/_kernel_base.py | 29 | 18238 | """
Module containing the base object for multivariate kernel density and
regression, plus some utilities.
"""
from statsmodels.compat.python import range, string_types
import copy
import numpy as np
from scipy import optimize
from scipy.stats.mstats import mquantiles
try:
import joblib
has_joblib = True
except ImportError:
has_joblib = False
from . import kernels
kernel_func = dict(wangryzin=kernels.wang_ryzin,
aitchisonaitken=kernels.aitchison_aitken,
gaussian=kernels.gaussian,
aitchison_aitken_reg = kernels.aitchison_aitken_reg,
wangryzin_reg = kernels.wang_ryzin_reg,
gauss_convolution=kernels.gaussian_convolution,
wangryzin_convolution=kernels.wang_ryzin_convolution,
aitchisonaitken_convolution=kernels.aitchison_aitken_convolution,
gaussian_cdf=kernels.gaussian_cdf,
aitchisonaitken_cdf=kernels.aitchison_aitken_cdf,
wangryzin_cdf=kernels.wang_ryzin_cdf,
d_gaussian=kernels.d_gaussian)
def _compute_min_std_IQR(data):
"""Compute minimum of std and IQR for each variable."""
s1 = np.std(data, axis=0)
q75 = mquantiles(data, 0.75, axis=0).data[0]
q25 = mquantiles(data, 0.25, axis=0).data[0]
s2 = (q75 - q25) / 1.349 # IQR
dispersion = np.minimum(s1, s2)
return dispersion
def _compute_subset(class_type, data, bw, co, do, n_cvars, ix_ord,
ix_unord, n_sub, class_vars, randomize, bound):
""""Compute bw on subset of data.
Called from ``GenericKDE._compute_efficient_*``.
Notes
-----
Needs to be outside the class in order for joblib to be able to pickle it.
"""
if randomize:
np.random.shuffle(data)
sub_data = data[:n_sub, :]
else:
sub_data = data[bound[0]:bound[1], :]
if class_type == 'KDEMultivariate':
from .kernel_density import KDEMultivariate
var_type = class_vars[0]
sub_model = KDEMultivariate(sub_data, var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
elif class_type == 'KDEMultivariateConditional':
from .kernel_density import KDEMultivariateConditional
k_dep, dep_type, indep_type = class_vars
endog = sub_data[:, :k_dep]
exog = sub_data[:, k_dep:]
sub_model = KDEMultivariateConditional(endog, exog, dep_type,
indep_type, bw=bw, defaults=EstimatorSettings(efficient=False))
elif class_type == 'KernelReg':
from .kernel_regression import KernelReg
var_type, k_vars, reg_type = class_vars
endog = _adjust_shape(sub_data[:, 0], 1)
exog = _adjust_shape(sub_data[:, 1:], k_vars)
sub_model = KernelReg(endog=endog, exog=exog, reg_type=reg_type,
var_type=var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
else:
raise ValueError("class_type not recognized, should be one of " \
"{KDEMultivariate, KDEMultivariateConditional, KernelReg}")
# Compute dispersion in next 4 lines
if class_type == 'KernelReg':
sub_data = sub_data[:, 1:]
dispersion = _compute_min_std_IQR(sub_data)
fct = dispersion * n_sub**(-1. / (n_cvars + co))
fct[ix_unord] = n_sub**(-2. / (n_cvars + do))
fct[ix_ord] = n_sub**(-2. / (n_cvars + do))
sample_scale_sub = sub_model.bw / fct #TODO: check if correct
bw_sub = sub_model.bw
return sample_scale_sub, bw_sub
class GenericKDE (object):
"""
Base class for density estimation and regression KDE classes.
"""
def _compute_bw(self, bw):
"""
Computes the bandwidth of the data.
Parameters
----------
bw: array_like or str
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'.
"""
self.bw_func = dict(normal_reference=self._normal_reference,
cv_ml=self._cv_ml, cv_ls=self._cv_ls)
if bw is None:
bwfunc = self.bw_func['normal_reference']
return bwfunc()
if not isinstance(bw, string_types):
self._bw_method = "user-specified"
res = np.asarray(bw)
else:
# The user specified a bandwidth selection method
self._bw_method = bw
bwfunc = self.bw_func[bw]
res = bwfunc()
return res
def _compute_dispersion(self, data):
"""
Computes the measure of dispersion.
The minimum of the standard deviation and interquartile range / 1.349
Notes
-----
Reimplemented in `KernelReg`, because the first column of `data` has to
be removed.
References
----------
See the user guide for the np package in R.
In the notes on bwscaling option in npreg, npudens, npcdens there is
a discussion on the measure of dispersion
"""
return _compute_min_std_IQR(data)
def _get_class_vars_type(self):
"""Helper method to be able to pass needed vars to _compute_subset.
Needs to be implemented by subclasses."""
pass
def _compute_efficient(self, bw):
"""
Computes the bandwidth by estimating the scaling factor (c)
in n_res resamples of size ``n_sub`` (in `randomize` case), or by
dividing ``nobs`` into as many ``n_sub`` blocks as needed (if
`randomize` is False).
References
----------
See p.9 in socserv.mcmaster.ca/racine/np_faq.pdf
"""
if bw is None:
self._bw_method = 'normal_reference'
if isinstance(bw, string_types):
self._bw_method = bw
else:
self._bw_method = "user-specified"
return bw
nobs = self.nobs
n_sub = self.n_sub
data = copy.deepcopy(self.data)
n_cvars = self.data_type.count('c')
co = 4 # 2*order of continuous kernel
do = 4 # 2*order of discrete kernel
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
# Define bounds for slicing the data
if self.randomize:
# randomize chooses blocks of size n_sub, independent of nobs
bounds = [None] * self.n_res
else:
bounds = [(i * n_sub, (i+1) * n_sub) for i in range(nobs // n_sub)]
if nobs % n_sub > 0:
bounds.append((nobs - nobs % n_sub, nobs))
n_blocks = self.n_res if self.randomize else len(bounds)
sample_scale = np.empty((n_blocks, self.k_vars))
only_bw = np.empty((n_blocks, self.k_vars))
class_type, class_vars = self._get_class_vars_type()
if has_joblib:
# `res` is a list of tuples (sample_scale_sub, bw_sub)
res = joblib.Parallel(n_jobs=self.n_jobs) \
(joblib.delayed(_compute_subset) \
(class_type, data, bw, co, do, n_cvars, ix_ord, ix_unord, \
n_sub, class_vars, self.randomize, bounds[i]) \
for i in range(n_blocks))
else:
res = []
for i in range(n_blocks):
res.append(_compute_subset(class_type, data, bw, co, do,
n_cvars, ix_ord, ix_unord, n_sub,
class_vars, self.randomize,
bounds[i]))
for i in range(n_blocks):
sample_scale[i, :] = res[i][0]
only_bw[i, :] = res[i][1]
s = self._compute_dispersion(data)
order_func = np.median if self.return_median else np.mean
m_scale = order_func(sample_scale, axis=0)
# TODO: Check if 1/5 is correct in line below!
bw = m_scale * s * nobs**(-1. / (n_cvars + co))
bw[ix_ord] = m_scale[ix_ord] * nobs**(-2./ (n_cvars + do))
bw[ix_unord] = m_scale[ix_unord] * nobs**(-2./ (n_cvars + do))
if self.return_only_bw:
bw = np.median(only_bw, axis=0)
return bw
def _set_defaults(self, defaults):
"""Sets the default values for the efficient estimation"""
self.n_res = defaults.n_res
self.n_sub = defaults.n_sub
self.randomize = defaults.randomize
self.return_median = defaults.return_median
self.efficient = defaults.efficient
self.return_only_bw = defaults.return_only_bw
self.n_jobs = defaults.n_jobs
def _normal_reference(self):
"""
Returns Scott's normal reference rule of thumb bandwidth parameter.
Notes
-----
See p.13 in [2] for an example and discussion. The formula for the
bandwidth is
.. math:: h = 1.06n^{-1/(4+q)}
where ``n`` is the number of observations and ``q`` is the number of
variables.
"""
X = np.std(self.data, axis=0)
return 1.06 * X * self.nobs ** (- 1. / (4 + self.data.shape[1]))
def _set_bw_bounds(self, bw):
"""
Sets bandwidth lower bound to effectively zero )1e-10), and for
discrete values upper bound to 1.
"""
bw[bw < 0] = 1e-10
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
bw[ix_ord] = np.minimum(bw[ix_ord], 1.)
bw[ix_unord] = np.minimum(bw[ix_unord], 1.)
return bw
def _cv_ml(self):
"""
Returns the cross validation maximum likelihood bandwidth parameter.
Notes
-----
For more details see p.16, 18, 27 in Ref. [1] (see module docstring).
Returns the bandwidth estimate that maximizes the leave-out-out
likelihood. The leave-one-out log likelihood function is:
.. math:: \ln L=\sum_{i=1}^{n}\ln f_{-i}(X_{i})
The leave-one-out kernel estimator of :math:`f_{-i}` is:
.. math:: f_{-i}(X_{i})=\frac{1}{(n-1)h}
\sum_{j=1,j\neq i}K_{h}(X_{i},X_{j})
where :math:`K_{h}` represents the Generalized product kernel
estimator:
.. math:: K_{h}(X_{i},X_{j})=\prod_{s=1}^
{q}h_{s}^{-1}k\left(\frac{X_{is}-X_{js}}{h_{s}}\right)
"""
# the initial value for the optimization is the normal_reference
h0 = self._normal_reference()
bw = optimize.fmin(self.loo_likelihood, x0=h0, args=(np.log, ),
maxiter=1e3, maxfun=1e3, disp=0, xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def _cv_ls(self):
"""
Returns the cross-validation least squares bandwidth parameter(s).
Notes
-----
For more details see pp. 16, 27 in Ref. [1] (see module docstring).
Returns the value of the bandwidth that maximizes the integrated mean
square error between the estimated and actual distribution. The
integrated mean square error (IMSE) is given by:
.. math:: \int\left[\hat{f}(x)-f(x)\right]^{2}dx
This is the general formula for the IMSE. The IMSE differs for
conditional (``KDEMultivariateConditional``) and unconditional
(``KDEMultivariate``) kernel density estimation.
"""
h0 = self._normal_reference()
bw = optimize.fmin(self.imse, x0=h0, maxiter=1e3, maxfun=1e3, disp=0,
xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def loo_likelihood(self):
raise NotImplementedError
class EstimatorSettings(object):
"""
Object to specify settings for density estimation or regression.
`EstimatorSettings` has several proporties related to how bandwidth
estimation for the `KDEMultivariate`, `KDEMultivariateConditional`,
`KernelReg` and `CensoredKernelReg` classes behaves.
Parameters
----------
efficient: bool, optional
If True, the bandwidth estimation is to be performed
efficiently -- by taking smaller sub-samples and estimating
the scaling factor of each subsample. This is useful for large
samples (nobs >> 300) and/or multiple variables (k_vars > 3).
If False (default), all data is used at the same time.
randomize: bool, optional
If True, the bandwidth estimation is to be performed by
taking `n_res` random resamples (with replacement) of size `n_sub` from
the full sample. If set to False (default), the estimation is
performed by slicing the full sample in sub-samples of size `n_sub` so
that all samples are used once.
n_sub: int, optional
Size of the sub-samples. Default is 50.
n_res: int, optional
The number of random re-samples used to estimate the bandwidth.
Only has an effect if ``randomize == True``. Default value is 25.
return_median: bool, optional
If True (default), the estimator uses the median of all scaling factors
for each sub-sample to estimate the bandwidth of the full sample.
If False, the estimator uses the mean.
return_only_bw: bool, optional
If True, the estimator is to use the bandwidth and not the
scaling factor. This is *not* theoretically justified.
Should be used only for experimenting.
n_jobs : int, optional
The number of jobs to use for parallel estimation with
``joblib.Parallel``. Default is -1, meaning ``n_cores - 1``, with
``n_cores`` the number of available CPU cores.
See the `joblib documentation
<https://pythonhosted.org/joblib/parallel.html>`_ for more details.
Examples
--------
>>> settings = EstimatorSettings(randomize=True, n_jobs=3)
>>> k_dens = KDEMultivariate(data, var_type, defaults=settings)
"""
def __init__(self, efficient=False, randomize=False, n_res=25, n_sub=50,
return_median=True, return_only_bw=False, n_jobs=-1):
self.efficient = efficient
self.randomize = randomize
self.n_res = n_res
self.n_sub = n_sub
self.return_median = return_median
self.return_only_bw = return_only_bw # TODO: remove this?
self.n_jobs = n_jobs
class LeaveOneOut(object):
"""
Generator to give leave-one-out views on X.
Parameters
----------
X : array-like
2-D array.
Examples
--------
>>> X = np.random.normal(0, 1, [10,2])
>>> loo = LeaveOneOut(X)
>>> for x in loo:
... print x
Notes
-----
A little lighter weight than sklearn LOO. We don't need test index.
Also passes views on X, not the index.
"""
def __init__(self, X):
self.X = np.asarray(X)
def __iter__(self):
X = self.X
nobs, k_vars = np.shape(X)
for i in range(nobs):
index = np.ones(nobs, dtype=np.bool)
index[i] = False
yield X[index, :]
def _get_type_pos(var_type):
ix_cont = np.array([c == 'c' for c in var_type])
ix_ord = np.array([c == 'o' for c in var_type])
ix_unord = np.array([c == 'u' for c in var_type])
return ix_cont, ix_ord, ix_unord
def _adjust_shape(dat, k_vars):
""" Returns an array of shape (nobs, k_vars) for use with `gpke`."""
dat = np.asarray(dat)
if dat.ndim > 2:
dat = np.squeeze(dat)
if dat.ndim == 1 and k_vars > 1: # one obs many vars
nobs = 1
elif dat.ndim == 1 and k_vars == 1: # one obs one var
nobs = len(dat)
else:
if np.shape(dat)[0] == k_vars and np.shape(dat)[1] != k_vars:
dat = dat.T
nobs = np.shape(dat)[0] # ndim >1 so many obs many vars
dat = np.reshape(dat, (nobs, k_vars))
return dat
def gpke(bw, data, data_predict, var_type, ckertype='gaussian',
okertype='wangryzin', ukertype='aitchisonaitken', tosum=True):
"""
Returns the non-normalized Generalized Product Kernel Estimator
Parameters
----------
bw: 1-D ndarray
The user-specified bandwidth parameters.
data: 1D or 2-D ndarray
The training data.
data_predict: 1-D ndarray
The evaluation points at which the kernel estimation is performed.
var_type: str, optional
The variable type (continuous, ordered, unordered).
ckertype: str, optional
The kernel used for the continuous variables.
okertype: str, optional
The kernel used for the ordered discrete variables.
ukertype: str, optional
The kernel used for the unordered discrete variables.
tosum : bool, optional
Whether or not to sum the calculated array of densities. Default is
True.
Returns
-------
dens: array-like
The generalized product kernel density estimator.
Notes
-----
The formula for the multivariate kernel estimator for the pdf is:
.. math:: f(x)=\frac{1}{nh_{1}...h_{q}}\sum_{i=1}^
{n}K\left(\frac{X_{i}-x}{h}\right)
where
.. math:: K\left(\frac{X_{i}-x}{h}\right) =
k\left( \frac{X_{i1}-x_{1}}{h_{1}}\right)\times
k\left( \frac{X_{i2}-x_{2}}{h_{2}}\right)\times...\times
k\left(\frac{X_{iq}-x_{q}}{h_{q}}\right)
"""
kertypes = dict(c=ckertype, o=okertype, u=ukertype)
#Kval = []
#for ii, vtype in enumerate(var_type):
# func = kernel_func[kertypes[vtype]]
# Kval.append(func(bw[ii], data[:, ii], data_predict[ii]))
#Kval = np.column_stack(Kval)
Kval = np.empty(data.shape)
for ii, vtype in enumerate(var_type):
func = kernel_func[kertypes[vtype]]
Kval[:, ii] = func(bw[ii], data[:, ii], data_predict[ii])
iscontinuous = np.array([c == 'c' for c in var_type])
dens = Kval.prod(axis=1) / np.prod(bw[iscontinuous])
if tosum:
return dens.sum(axis=0)
else:
return dens
| bsd-3-clause |
whoisever/vgg16_finetune_mutli_label | inception_v3.py | 1 | 9515 | # -*- coding: utf-8 -*-
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
from sklearn.metrics import log_loss
from load_cifar10 import load_cifar10_data
def conv2d_bn(x, nb_filter, nb_row, nb_col,
border_mode='same', subsample=(1, 1),
name=None):
"""
Utility function to apply conv + BN for Inception V3.
"""
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
bn_axis = 1
x = Convolution2D(nb_filter, nb_row, nb_col,
subsample=subsample,
activation='relu',
border_mode=border_mode,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name)(x)
return x
def inception_v3_model(img_rows, img_cols, channel=1, num_classes=None):
"""
Inception-V3 Model for Keras
Model Schema is based on
https://github.com/fchollet/deep-learning-models/blob/master/inception_v3.py
ImageNet Pretrained Weights
https://github.com/fchollet/deep-learning-models/releases/download/v0.2/inception_v3_weights_th_dim_ordering_th_kernels.h5
Parameters:
img_rows, img_cols - resolution of inputs
channel - 1 for grayscale, 3 for color
num_classes - number of class labels for our classification task
"""
channel_axis = 1
img_input = Input(shape=(channel, img_rows, img_cols))
x = conv2d_bn(img_input, 32, 3, 3, subsample=(2, 2), border_mode='valid')
x = conv2d_bn(x, 32, 3, 3, border_mode='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, border_mode='valid')
x = conv2d_bn(x, 192, 3, 3, border_mode='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0, 1, 2: 35 x 35 x 256
for i in range(3):
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = merge([branch1x1, branch5x5, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(i))
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, subsample=(2, 2), border_mode='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
subsample=(2, 2), border_mode='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = AveragePooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch7x7x3, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = merge([branch3x3_1, branch3x3_2],
mode='concat', concat_axis=channel_axis,
name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = merge([branch3x3dbl_1, branch3x3dbl_2],
mode='concat', concat_axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(9 + i))
# Fully Connected Softmax Layer
x_fc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_fc = Flatten(name='flatten')(x_fc)
x_fc = Dense(1000, activation='softmax', name='predictions')(x_fc)
# Create model
model = Model(img_input, x_fc)
# Load ImageNet pre-trained data
model.load_weights('imagenet_models/inception_v3_weights_th_dim_ordering_th_kernels.h5')
# Truncate and replace softmax layer for transfer learning
# Cannot use model.layers.pop() since model is not of Sequential() type
# The method below works since pre-trained weights are stored in layers but not in the model
x_newfc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_newfc = Flatten(name='flatten')(x_newfc)
x_newfc = Dense(num_classes, activation='softmax', name='predictions')(x_newfc)
# Create another model with our customized softmax
model = Model(img_input, x_newfc)
# Learning rate is changed to 0.001
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
if __name__ == '__main__':
# Example to fine-tune on 3000 samples from Cifar10
img_rows, img_cols = 299, 299 # Resolution of inputs
channel = 3
num_classes = 10
batch_size = 16
nb_epoch = 10
# Load Cifar10 data. Please implement your own load_data() module for your own dataset
X_train, Y_train, X_valid, Y_valid = load_cifar10_data(img_rows, img_cols)
# Load our model
model = inception_v3_model(img_rows, img_cols, channel, num_classes)
# Start Fine-tuning
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_data=(X_valid, Y_valid),
)
# Make predictions
predictions_valid = model.predict(X_valid, batch_size=batch_size, verbose=1)
# Cross-entropy loss score
score = log_loss(Y_valid, predictions_valid)
| mit |
rspavel/spack | var/spack/repos/builtin/packages/py-iminuit/package.py | 5 | 1039 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyIminuit(PythonPackage):
"""Interactive IPython-Friendly Minimizer based on SEAL Minuit2."""
homepage = "https://pypi.python.org/pypi/iminuit"
url = "https://pypi.io/packages/source/i/iminuit/iminuit-1.2.tar.gz"
version('1.3.6', sha256='d79a197f305d4708a0e3e52b0a6748c1a6997360d2fbdfd09c022995a6963b5e')
version('1.2', sha256='7651105fc3f186cfb5742f075ffebcc5088bf7797d8ed124c00977eebe0d1c64')
# Required dependencies
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'), when='@1.3:')
# Optional dependencies
depends_on('py-matplotlib', type='test', when='@1.3:')
depends_on('py-cython', type='test', when='@1.3:')
depends_on('py-pytest', type='test', when='@1.3:')
depends_on('py-scipy', type='test', when='@1.3:')
| lgpl-2.1 |
rajat1994/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
yandex-load/volta | volta/core/postloader.py | 1 | 2832 | import logging
import pandas as pd
import json
import yaml
from volta.listeners.uploader.uploader import DataUploader
from volta.core.core import VoltaConfig
from volta.core.config.dynamic_options import DYNAMIC_OPTIONS
logger = logging.getLogger(__name__)
def main():
import argparse
parser = argparse.ArgumentParser(description='volta console post-loader')
parser.add_argument('--debug', dest='debug', action='store_true', default=False)
parser.add_argument('-l', '--logs', action='append', help='Log files list')
parser.add_argument('-c', '--config', dest='config')
args = parser.parse_args()
logging.basicConfig(
level="DEBUG" if args.debug else "INFO",
format='%(asctime)s [%(levelname)s] [Volta Post-loader] %(filename)s:%(lineno)d %(message)s')
config = {}
PACKAGE_SCHEMA_PATH = 'volta.core'
if not args.config:
raise RuntimeError('config should be specified')
if not args.logs:
raise RuntimeError('Empty log list')
with open(args.config, 'r') as cfg_stream:
try:
config = VoltaConfig(yaml.load(cfg_stream), DYNAMIC_OPTIONS, PACKAGE_SCHEMA_PATH)
except Exception:
raise RuntimeError('Config file not in yaml or malformed')
uploader = DataUploader(config)
uploader.create_job()
for log in args.logs:
try:
with open(log, 'r') as logname:
meta = json.loads(logname.readline())
except ValueError:
logger.warning('Skipped data file: no json header in logfile %s or json malformed...', log)
logger.debug('Skipped data file: no json header in logfile %s or json malformed', log, exc_info=True)
continue
else:
df = pd.read_csv(log, sep='\t', skiprows=1, names=meta['names'], dtype=meta['dtypes'])
logger.info('Uploading %s, meta type: %s', log, meta['type'])
uploader.put(df, meta['type'])
logger.info('Updating job metadata...')
try:
update_job_data = {
'test_id': config.get_option('core', 'test_id'),
'name': config.get_option('uploader', 'name'),
'dsc': config.get_option('uploader', 'dsc'),
'device_id': config.get_option('uploader', 'device_id'),
'device_model': config.get_option('uploader', 'device_model'),
'device_os': config.get_option('uploader', 'device_os'),
'app': config.get_option('uploader', 'app'),
'ver': config.get_option('uploader', 'ver'),
'meta': config.get_option('uploader', 'meta'),
'task': config.get_option('uploader', 'task'),
}
uploader.update_job(update_job_data)
except Exception:
logger.warning('Exception updating metadata')
uploader.close()
logger.info('Done!')
| mpl-2.0 |
aburrell/pysat | pysat/_orbits.py | 1 | 32127 | from __future__ import print_function
from __future__ import absolute_import
import functools
import numpy as np
import pandas as pds
from pysat import Series, DataFrame
class Orbits(object):
"""Determines orbits on the fly and provides orbital data in .data.
Determines the locations of orbit breaks in the loaded data in inst.data
and provides iteration tools and convenient orbit selection via
inst.orbit[orbit num].
Parameters
----------
sat : pysat.Instrument instance
instrument object to determine orbits for
index : string
name of the data series to use for determing orbit breaks
kind : {'local time', 'longitude', 'polar', 'orbit'}
kind of orbit, determines how orbital breaks are determined
- local time: negative gradients in lt or breaks in inst.data.index
- longitude: negative gradients or breaks in inst.data.index
- polar: zero crossings in latitude or breaks in inst.data.index
- orbit: uses unique values of orbit number
period : np.timedelta64
length of time for orbital period, used to gauge when a break
in the datetime index (inst.data.index) is large enough to
consider it a new orbit
Note
----
class should not be called directly by the user, use the interface provided
by inst.orbits where inst = pysat.Instrument()
Warning
-------
This class is still under development.
Examples
--------
::
info = {'index':'longitude', 'kind':'longitude'}
vefi = pysat.Instrument(platform='cnofs', name='vefi', tag='dc_b',
clean_level=None, orbit_info=info)
start = pysat.datetime(2009,1,1)
stop = pysat.datetime(2009,1,10)
vefi.load(date=start)
vefi.bounds(start, stop)
# iterate over orbits
for vefi in vefi.orbits:
print('Next available orbit ', vefi['dB_mer'])
# load fifth orbit of first day
vefi.load(date=start)
vefi.orbits[5]
# less convenient load
vefi.orbits.load(5)
# manually iterate orbit
vefi.orbits.next()
# backwards
vefi.orbits.prev()
"""
def __init__(self, sat=None, index=None, kind=None, period=None):
# create null arrays for storing orbit info
if sat is None:
raise ValueError('Must provide a pysat instrument object when initializing ' +
'orbits class.')
else:
# self.sat = weakref.proxy(sat)
self.sat = sat
if kind is None:
kind = 'local time'
else:
kind = kind.lower()
if period is None:
period = np.timedelta64(97, 'm')
self.orbit_period = period
if (kind == 'local time') or (kind == 'lt'):
self._detBreaks = functools.partial(self._equaBreaks, orbit_index_period=24.)
elif (kind == 'longitude') or (kind == 'long'):
self._detBreaks = functools.partial(self._equaBreaks, orbit_index_period=360.)
elif kind == 'polar':
self._detBreaks = self._polarBreaks
elif kind == 'orbit':
self._detBreaks = self._orbitNumberBreaks
else:
raise ValueError('Unknown kind of orbit requested.')
self._orbit_breaks = []
self.num = 0 #[]
self.current = 0
self.orbit_index = index
def __getitem__(self, key):
"""Enable convenience notation for loading orbit into parent object.
Examples
--------
::
inst.load(date=date)
inst.orbits[4]
print('Orbit data ', inst.data)
Note
----
A day of data must already be loaded.
"""
# hack included so that orbits appear to be zero indexed
if key < 0:
self.load(key)
else:
self.load(key+1)
def _reset(self):
# create null arrays for storing orbit info
self._orbit_breaks = []
self.num = 0 #None
self.current = 0
def _calcOrbits(self):
"""Prepares data structure for breaking data into orbits. Not intended for end user."""
# if the breaks between orbit have not been defined, define them
# also, store the data so that grabbing different orbits does not
# require reloads of whole dataset
if len(self._orbit_breaks) == 0:
# determine orbit breaks
self._detBreaks()
# store a copy of data
self._fullDayData = self.sat.data.copy()
# set current orbit counter to zero (default)
self.current = 0
def _equaBreaks(self, orbit_index_period=24.):
"""Determine where breaks in an equatorial satellite orbit occur.
Looks for negative gradients in local time (or longitude) as well as
breaks in UT.
Parameters
----------
orbit_index_period : float
The change in value of supplied index parameter for a single orbit
"""
if self.orbit_index is None:
raise ValueError('Orbit properties must be defined at ' +
'pysat.Instrument object instantiation.' +
'See Instrument docs.')
else:
try:
self.sat[self.orbit_index]
except ValueError:
raise ValueError('Provided orbit index does not exist in loaded data')
# get difference in orbit index around the orbit
lt_diff = self.sat[self.orbit_index].diff()
# universal time values, from datetime index
ut_vals = Series(self.sat.data.index)
# UT difference
ut_diff = ut_vals.diff()
# get locations where orbit index derivative is less than 0
# then do some basic checks on these locations
ind, = np.where((lt_diff < -0.1))
if len(ind) > 0:
ind = np.hstack((ind, np.array([len(self.sat[self.orbit_index])])))
# look at distance between breaks
dist = ind[1:] - ind[0:-1]
# only keep orbit breaks with a distance greater than 1
# done for robustness
if len(ind) > 1:
if min(dist) == 1:
print('There are orbit breaks right next to each other')
ind = ind[:-1][dist > 1]
# check for large positive gradients around the break that would
# suggest not a true orbit break, but rather bad orbit_index values
new_ind = []
for idx in ind:
tidx, = np.where(lt_diff[idx - 5:idx + 6] > 0.1)
if len(tidx) != 0:
# there are large changes, suggests a false alarm
# iterate over samples and check
for tidx in tidx:
# look at time change vs local time change
if (ut_diff[idx - 5:idx + 6].iloc[tidx] < lt_diff[idx - 5:idx + 6].iloc[tidx] /
orbit_index_period * self.orbit_period):
# change in ut is small compared to the change in the orbit index
# this is flagged as a false alarm, or dropped from consideration
pass
else:
# change in UT is significant, keep orbit break
new_ind.append(idx)
break
else:
# no large positive gradients, current orbit break passes the first test
new_ind.append(idx)
# replace all breaks with those that are 'good'
ind = np.array(new_ind)
# now, assemble some orbit breaks that are not triggered by changes in the orbit index
# check if there is a UT break that is larger than orbital period, aka a time gap
ut_change_vs_period = ut_diff > self.orbit_period
# characterize ut change using orbital period
norm_ut = ut_diff / self.orbit_period
# now, look for breaks because the length of time between samples is too large,
# thus there is no break in slt/mlt/etc, lt_diff is small but UT change is big
norm_ut_vs_norm_lt = norm_ut.gt(np.abs(lt_diff.values / orbit_index_period))
# indices when one or other flag is true
ut_ind, = np.where(ut_change_vs_period | (norm_ut_vs_norm_lt & (norm_ut > 0.95)))
# & lt_diff.notnull() ))# & (lt_diff != 0) ) ) #added the or and check after or on 10/20/2014
# combine these UT determined orbit breaks with the orbit index orbit breaks
if len(ut_ind) > 0:
ind = np.hstack((ind, ut_ind))
ind = np.sort(ind)
ind = np.unique(ind)
print('Time Gap')
# now that most problems in orbits should have been caught, look at
# the time difference between orbits (not individual orbits)
orbit_ut_diff = ut_vals[ind].diff()
orbit_lt_diff = self.sat[self.orbit_index][ind].diff()
# look for time gaps between partial orbits. The full orbital time period is not required
# between end of one orbit and begining of next if first orbit is partial.
# also provides another general test of the orbital breaks determined.
idx, = np.where((orbit_ut_diff / self.orbit_period - orbit_lt_diff.values / orbit_index_period) > 0.97)
# pull out breaks that pass the test, need to make sure the first one is always included
# it gets dropped via the nature of diff
if len(idx) > 0:
if idx[0] != 0:
idx = np.hstack((0, idx))
else:
idx = np.array([0])
# only keep the good indices
if len(ind) > 0:
ind = ind[idx]
# create orbitbreak index, ensure first element is always 0
if ind[0] != 0:
ind = np.hstack((np.array([0]), ind))
else:
ind = np.array([0])
# number of orbits
num_orbits = len(ind)
# set index of orbit breaks
self._orbit_breaks = ind
# set number of orbits for the day
self.num = num_orbits
def _polarBreaks(self):
"""Determine where breaks in a polar orbiting satellite orbit occur.
Looks for sign changes in latitude (magnetic or geographic) as well as
breaks in UT.
"""
if self.orbit_index is None:
raise ValueError('Orbit properties must be defined at ' +
'pysat.Instrument object instantiation.' +
'See Instrument docs.')
else:
try:
self.sat[self.orbit_index]
except ValueError:
raise ValueError('Provided orbit index does not appear to exist in loaded data')
# determine where orbit index goes from positive to negative
pos = self.sat[self.orbit_index] >= 0
npos = -pos
change = (pos.values[:-1] & npos.values[1:]) | (npos.values[:-1] & pos.values[1:])
ind, = np.where(change)
ind += 1
ut_diff = Series(self.sat.data.index).diff()
ut_ind, = np.where(ut_diff / self.orbit_period > 0.95)
if len(ut_ind) > 0:
ind = np.hstack((ind, ut_ind))
ind = np.sort(ind)
ind = np.unique(ind)
# print 'Time Gap'
# create orbitbreak index, ensure first element is always 0
if ind[0] != 0:
ind = np.hstack((np.array([0]), ind))
# number of orbits
num_orbits = len(ind)
# set index of orbit breaks
self._orbit_breaks = ind
# set number of orbits for the day
self.num = num_orbits
def _orbitNumberBreaks(self):
"""Determine where orbital breaks in a dataset with orbit numbers occur.
Looks for changes in unique values.
"""
if self.orbit_index is None:
raise ValueError('Orbit properties must be defined at ' +
'pysat.Instrument object instantiation.' +
'See Instrument docs.')
else:
try:
self.sat[self.orbit_index]
except ValueError:
raise ValueError('Provided orbit index does not appear to exist in loaded data')
# determine where the orbit index changes from one value to the next
uniq_vals = self.sat[self.orbit_index].unique()
orbit_index = []
for val in uniq_vals:
idx, = np.where(val == self.sat[self.orbit_index].values)
orbit_index.append(idx[0])
# create orbitbreak index, ensure first element is always 0
if orbit_index[0] != 0:
ind = np.hstack((np.array([0]), orbit_index))
else:
ind = orbit_index
# number of orbits
num_orbits = len(ind)
# set index of orbit breaks
self._orbit_breaks = ind
# set number of orbits for the day
self.num = num_orbits
def _getBasicOrbit(self, orbit=None):
"""Load a particular orbit into .data for loaded day.
Parameters
----------
orbit : int
orbit number, 1 indexed, negative indexes allowed, -1 last orbit
Note
----
A day of data must be loaded before this routine functions properly.
If the last orbit of the day is requested, it will NOT automatically be
padded with data from the next day.
"""
# ensure data exists
if not self.sat.empty:
# ensure proper orbit metadata present
self._calcOrbits()
# ensure user is requesting a particular orbit
if orbit is not None:
# pull out requested orbit
if orbit == -1:
# load orbit data into data
self.sat.data = self._fullDayData[self._orbit_breaks[self.num + orbit]:]
self.current = self.num + orbit + 1
elif ((orbit < 0) & (orbit >= -self.num)):
# load orbit data into data
self.sat.data = self._fullDayData[
self._orbit_breaks[self.num + orbit]:self._orbit_breaks[self.num + orbit + 1]]
self.current = self.num + orbit + 1
elif (orbit < self.num) & (orbit != 0):
# load orbit data into data
self.sat.data = self._fullDayData[self._orbit_breaks[orbit - 1]:self._orbit_breaks[orbit]]
self.current = orbit
elif orbit == self.num:
self.sat.data = self._fullDayData[self._orbit_breaks[orbit - 1]:]
self.current = orbit # recent addition, wondering why it wasn't there before, could just be a bug
elif orbit == 0:
raise ValueError('Orbits internally indexed by 1, 0 not allowed')
else:
# gone too far
self.sat.data = []
raise ValueError('Requested an orbit past total orbits for day')
else:
raise ValueError('Must set an orbit')
def load(self, orbit=None):
"""Load a particular orbit into .data for loaded day.
Parameters
----------
orbit : int
orbit number, 1 indexed
Note
----
A day of data must be loaded before this routine functions properly.
If the last orbit of the day is requested, it will automatically be
padded with data from the next day. The orbit counter will be
reset to 1.
"""
if not self.sat.empty: # ensure data exists
# set up orbit metadata
self._calcOrbits()
# ensure user supplied an orbit
if orbit is not None:
# pull out requested orbit
if orbit < 0:
# negative indexing consistent with numpy, -1 last, -2 second
# to last, etc.
orbit = self.num + 1 + orbit
if orbit == 1:
# change from orig copied from _core, didn't look correct.
# self._getBasicOrbit(orbit=2)
try:
true_date = self.sat.date # .copy()
self.sat.prev()
# if and else added becuase of CINDI turn off 6/5/2013,turn on 10/22/2014
# crashed when starting on 10/22/2014
# prev returned empty data
if not self.sat.empty:
self.load(orbit=-1)
else:
self.sat.next()
self._getBasicOrbit(orbit=1)
# check that this orbit should end on the current day
delta = pds.to_timedelta(true_date - self.sat.data.index[0])
# print 'checking if first orbit should land on requested day'
# print self.sat.date, self.sat.data.index[0], delta, delta >= self.orbit_period
# print delta - self.orbit_period
if delta >= self.orbit_period:
# the orbit loaded isn't close enough to date
# to be the first orbit of the day, move forward
self.next()
except StopIteration:
# print 'going for basic orbit'
self._getBasicOrbit(orbit=1)
# includes hack to appear to be zero indexed
print('Loaded Orbit:%i' % (self.current - 1))
# check if the first orbit is also the last orbit
elif orbit == self.num:
# we get here if user asks for last orbit
# make sure that orbit data goes across daybreak as needed
# load previous orbit
if self.num != 1:
self._getBasicOrbit(self.num - 1)
self.next()
else:
self._getBasicOrbit(orbit=-1)
elif orbit < self.num:
# load orbit data into data
self._getBasicOrbit(orbit)
# includes hack to appear to be zero indexed
print('Loaded Orbit:%i' % (self.current - 1))
else:
# gone too far
self.sat.data = DataFrame()
raise Exception('Requested an orbit past total orbits for day')
else:
raise Exception('Must set an orbit')
else:
print('No data loaded in instrument object to determine orbits.')
def next(self, *arg, **kwarg):
"""Load the next orbit into .data.
Note
----
Forms complete orbits across day boundaries. If no data loaded
then the first orbit from the first date of data is returned.
"""
# first, check if data exists
if not self.sat.empty:
# set up orbit metadata
self._calcOrbits()
# if current orbit near the last, must be careful
if self.current == (self.num - 1):
# first, load last orbit data
self._getBasicOrbit(orbit=-1)
# End of orbit may occur on the next day
load_next = True
if self.sat._iter_type == 'date':
delta = pds.to_timedelta(self.sat.date - self.sat.data.index[-1]) + np.timedelta64(1, 'D')
if delta >= self.orbit_period:
# don't need to load the next day because this orbit ends more than a orbital
# period from the next date
load_next = False
if load_next:
# the end of the user's desired orbit occurs tomorrow, need to form a complete orbit
# save this current orbit, load the next day, combine data, select the correct orbit
temp_orbit_data = self.sat.data.copy()
try:
# loading next day/file clears orbit breaks info
self.sat.next()
if not self.sat.empty:
# combine this next day's data with previous last orbit, grab the first one
self.sat.data = pds.concat(
[temp_orbit_data[:self.sat.data.index[0] - pds.DateOffset(microseconds=1)],
self.sat.data])
self._getBasicOrbit(orbit=1)
else:
# no data, go back a day and grab the last orbit. As complete as orbit can be
self.sat.prev()
self._getBasicOrbit(orbit=-1)
except StopIteration:
pass
del temp_orbit_data
# includes hack to appear to be zero indexed
print('Loaded Orbit:%i' % (self.current - 1))
elif self.current == (self.num):
# at the last orbit, need to be careful about getting the next orbit
# save this current orbit and load the next day
temp_orbit_data = self.sat.data.copy()
# load next day, which clears orbit breaks info
self.sat.next()
# combine this next day orbit with previous last orbit to ensure things are correct
if not self.sat.empty:
pad_next = True
# check if data padding is really needed, only works when loading by date
if self.sat._iter_type == 'date':
delta = pds.to_timedelta(self.sat.date - temp_orbit_data.index[-1])
if delta >= self.orbit_period:
# the end of the previous orbit is more than an orbit away from today
# we don't have to worry about it
pad_next = False
if pad_next:
# orbit went across day break, stick old orbit onto new data and grab second orbit (first is old)
self.sat.data = pds.concat(
[temp_orbit_data[:self.sat.data.index[0] - pds.DateOffset(microseconds=1)], self.sat.data])
# select second orbit of combined data
self._getBasicOrbit(orbit=2)
else:
# padding from the previous orbit wasn't needed, can just grab the first orbit of loaded data
self._getBasicOrbit(orbit=1)
if self.sat._iter_type == 'date':
delta = pds.to_timedelta(self.sat.date + pds.DateOffset(days=1) - self.sat.data.index[0])
if delta < self.orbit_period:
# this orbits end occurs on the next day
# though we grabbed the first orbit, missing data means the first available
# orbit in the data is actually the last for the day
# Resetting to second to last orbit and then calling next()
# will get the last orbit, accounting for tomorrow's data as well.
self.current = self.num - 1
self.next()
else:
# no data for the next day
# continue loading data until there is some
# nextData raises StopIteration when it reaches the end, leaving this function
while self.sat.empty:
self.sat.next()
self._getBasicOrbit(orbit=1)
del temp_orbit_data
# includes hack to appear to be zero indexed
print('Loaded Orbit:%i' % (self.current - 1))
elif self.current == 0:
# no current orbit set, grab the first one
# using load command to specify the first orbit
# which automatically loads prev day if needed to form complete orbit
self.load(orbit=1)
elif self.current < (self.num - 1):
# since we aren't close to the last orbit, just pull the next orbit
self._getBasicOrbit(orbit=self.current + 1)
# includes hack to appear to be zero indexed
print('Loaded Orbit:%i' % (self.current - 1))
else:
raise Exception(
'You ended up where nobody should ever be. Talk to someone about this fundamental failure.')
else: # no data
while self.sat.empty:
# keep going until data is found
# next raises stopIteration at end of data set, no more data possible
self.sat.next()
# we've found data, grab the next orbit
self.next()
def prev(self, *arg, **kwarg):
"""Load the previous orbit into .data.
Note
----
Forms complete orbits across day boundaries. If no data loaded
then the last orbit of data from the last day is loaded into .data.
"""
# first, check if data exists
if not self.sat.empty:
# set up orbit metadata
self._calcOrbits()
# if not close to the first orbit,just pull the previous orbit
if (self.current > 2) & (self.current <= self.num):
# load orbit and put it into self.sat.data
self._getBasicOrbit(orbit=self.current - 1)
print('Loaded Orbit:%i' % (self.current - 1))
# if current orbit near the first, must be careful
elif self.current == 2:
# first, load prev orbit data
self._getBasicOrbit(orbit=self.current - 1)
load_prev = True
if self.sat._iter_type == 'date':
delta = pds.to_timedelta(self.sat.data.index[-1] - self.sat.date)
if delta >= self.orbit_period:
# don't need to load the prev day because this orbit ends more than a orbital
# period from start of today's date
load_prev = False
if load_prev:
# need to save this current orbit and load the prev day
temp_orbit_data = self.sat.data[self.sat.date:]
# load previous day, which clears orbit breaks info
try:
self.sat.prev()
# combine this next day orbit with previous last orbit
if not self.sat.empty:
self.sat.data = pds.concat([self.sat.data, temp_orbit_data])
# select first orbit of combined data
self._getBasicOrbit(orbit=-1)
else:
self.sat.next()
self._getBasicOrbit(orbit=1)
except StopIteration:
# if loading the first orbit, of first day of data, you'll
# end up here as the attempt to make a full orbit will
# move the date backwards, and StopIteration is made.
# everything is already ok, just move along
pass
del temp_orbit_data
print('Loaded Orbit:%i' % (self.current - 1))
elif self.current == 0:
self.load(orbit=-1)
return
elif self.current < 2:
# first, load prev orbit data
self._getBasicOrbit(orbit=1)
# need to save this current orbit and load the prev day
temp_orbit_data = self.sat[self.sat.date:]
# load previous day, which clears orbit breaks info
self.sat.prev()
# combine this next day orbit with previous last orbit
if not self.sat.empty:
load_prev = True
if self.sat._iter_type == 'date':
delta = pds.to_timedelta(self.sat.date - self.sat.data.index[-1]) + np.timedelta64(1, 'D')
if delta >= self.orbit_period:
# don't need to load the prev day because this orbit ends more than a orbital
# period from start of today's date
load_prev = False
if load_prev:
self.sat.data = pds.concat([self.sat.data, temp_orbit_data])
# select second to last orbit of combined data
self._getBasicOrbit(orbit=-2)
else:
# padding from the previous is needed
self._getBasicOrbit(orbit=-1)
if self.sat._iter_type == 'date':
delta = pds.to_timedelta(self.sat.date - self.sat.data.index[-1]) + np.timedelta64(1, 'D')
if delta < self.orbit_period:
self.current = self.num
self.prev()
else:
while self.sat.empty:
self.sat.prev()
self._getBasicOrbit(orbit=-1)
del temp_orbit_data
print('Loaded Orbit:%i' % (self.current - 1))
else:
raise Exception(
'You ended up where noone should ever be. Talk to someone about this fundamental failure.')
# includes hack to appear to be zero indexed
#print('Loaded Orbit:%i' % (self.current - 1))
else:
# no data
while self.sat.empty:
self.sat.prev() # raises stopIteration at end of dataset
self.prev()
def __iter__(self):
"""Support iteration by orbit.
For each iteration the next available orbit is loaded into
inst.data.
Examples
--------
::
for inst in inst.orbits:
print 'next available orbit ', inst.data
Note
----
Limits of iteration set by setting inst.bounds.
"""
# load up the first increment of data
# coupling with Instrument frame is high, but it is already
# high in a number of areas
while self.sat.empty:
self.sat.next()
# if self.sat._iter_type == 'file':
# for fname in self.sat._iter_list:
# self.sat.load(fname=fname)
# break
#
# elif self.sat._iter_type == 'date':
# for date in self.sat._iter_list:
# self.sat.load(date=date)
# break
# else:
# raise ValueError('Iteration type not set')
while True:
self.next()
yield self.sat
| bsd-3-clause |
wxgeo/geophar | wxgeometrie/GUI/aide.py | 1 | 10319 | # -*- coding: utf-8 -*-
##--------------------------------------#######
# Aide #
##--------------------------------------#######
# WxGeometrie
# Dynamic geometry, graph plotter, and more for french mathematic teachers.
# Copyright (C) 2005-2013 Nicolas Pourcelot
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QPushButton, QDialog, QWidget, QVBoxLayout, \
QHBoxLayout, QLabel, QTextEdit, QTabWidget
from PyQt5.QtCore import Qt
from .. import param
from ..param import NOMPROG, LOGO
ANNEE = param.date_version[0]
from ..pylib.infos import informations_configuration
from ..pylib import path2
from .app import app, white_palette
class Informations(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setWindowTitle("Configuration systeme")
self.setPalette(white_palette)
panel = QWidget(self)
panelSizer = QVBoxLayout()
textes = informations_configuration().split("\n")
for i, texte in enumerate(textes):
if texte.startswith("+ "):
textes[i] = '<i>' + texte + '</i>'
t = QLabel('<br>'.join(textes), panel)
panelSizer.addWidget(t)
btnOK = QPushButton("OK", panel)
btnOK.clicked.connect(self.close)
btnCopier = QPushButton("Copier", panel)
btnCopier.clicked.connect(self.copier)
sizer = QHBoxLayout()
sizer.addWidget(btnOK)
sizer.addStretch()
sizer.addWidget(btnCopier)
panelSizer.addLayout(sizer)
panel.setLayout(panelSizer)
topSizer = QHBoxLayout()
topSizer.addWidget(panel)
self.setLayout(topSizer)
def copier(self):
app.vers_presse_papier(informations_configuration())
class APropos(QWidget):
def __init__(self, parent):
QWidget.__init__(self, parent)
self.parent = parent
sizer = QVBoxLayout()
global LOGO
LOGO = path2(LOGO)
logo = QLabel(self)
logo.setPixmap(QPixmap(LOGO))
sizer.addWidget(logo, 0, Qt.AlignCenter)
date = "/".join(str(n) for n in reversed(param.date_version))
textes = ["<b>%s version %s</b>" % (NOMPROG, param.version)]
textes.append("<i>Version publiée le " + date + "</i>")
textes.append('')
textes.append("« Le couteau suisse du prof de maths »")
textes.append('')
textes.append("<img src='%s'> <b>%s est un \
<a href='http://fr.wikipedia.org/wiki/Logiciel_libre'> \
logiciel libre</a></b>"
%(path2('%/wxgeometrie/images/copyleft.png'), NOMPROG))
textes.append("Vous pouvez l'utiliser et le modifier selon les termes de la GNU Public License v2.")
textes.append("<i>Copyleft 2005-%s Nicolas Pourcelot ([email protected])</i>"
% ANNEE)
textes.append('')
label = QLabel('<br>'.join(textes))
label.setAlignment(Qt.AlignCenter)
label.setOpenExternalLinks(True)
sizer.addWidget(label, 0, Qt.AlignCenter)
self.setLayout(sizer)
class Licence(QWidget):
def __init__(self, parent):
QWidget.__init__(self, parent)
self.parent = parent
sizer = QVBoxLayout()
texte = QTextEdit(self)
with open(path2("%/wxgeometrie/doc/license.txt"), "r", encoding="utf8") as f:
msg = f.read()
texte.setPlainText(msg)
texte.setMinimumHeight(500)
texte.setReadOnly(True)
texte.setLineWrapMode(QTextEdit.NoWrap)
doc = texte.document()
width = doc.idealWidth() + 4*doc.documentMargin()
texte.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
texte.setMinimumWidth(width)
sizer.addWidget(texte)
self.setLayout(sizer)
class Notes(QWidget):
def __init__(self, parent):
QWidget.__init__(self, parent)
self.parent = parent
sizer = QVBoxLayout()
texte = QTextEdit(self)
with open(path2("%/wxgeometrie/doc/changelog.txt"), 'r', encoding='utf8') as f:
msg = f.read().replace('\n', '<br>')
titre = "<b>Changements apportés par la version courante (%s) :</b>" % param.version
msg = '<br>'.join((titre, '', msg))
texte.setHtml(msg)
texte.setMinimumHeight(500)
texte.setMinimumWidth(300)
texte.setReadOnly(True)
doc = texte.document()
width = doc.idealWidth() + 4*doc.documentMargin()
texte.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
texte.setMinimumWidth(width)
sizer.addWidget(texte)
self.setLayout(sizer)
class Credits(QWidget):
def __init__(self, parent):
QWidget.__init__(self, parent)
self.parent = parent
sizer = QVBoxLayout()
texte = \
"""<h3>Contributeurs :</h3>
<p><i>Les personnes suivantes ont contribué au code de %(NOMPROG)s</i></p>
<ul>
<li><i>Boris Mauricette</i> : statistiques, interpolation (2011-2012)</li>
<li><i>Christophe Gragnic</i> : gestion de la documentation (2012)</li>
</ul>
<br>
<h3>Remerciements :</h3>
<p>
<a href="http://wxgeo.free.fr/doc/html/help.html#remerciements">
De nombreuses personnes</a> ont aidé ce projet, par leurs retours d'expérience,<br>
ou par leur aide à l'installation sur certaines plateformes.<br>
Qu'elles en soient remerciées.</p>
<p>
Remerciements tous particuliers à <i>Jean-Pierre Garcia</i> et à <i>Georges Khaznadar</i>.</p>
<br>
<h3>Librairies utilisées :</h3>
<ul>
<li>%(NOMPROG)s inclut désormais <a href='http://www.sympy.org'> SymPy</a>
(Python library for symbolic mathematics)<br>
© 2006-%(ANNEE)s <i>The Sympy Team</i></li>
<li>%(NOMPROG)s est codé en <a href="http://www.python.org">Python</a></li>
<li><a href="http://www.numpy.org">Numpy</a> est une bibliothèque de calcul numérique</li>
<li><a href="http://www.matplotlib.org">Matplotlib</a> est une librairie graphique scientifique</li>
<li><a href="http://www.riverbankcomputing.co.uk/software/pyqt">PyQt</a>
est utilisé pour l'interface graphique</li>
</ul>
<p>
Plus généralement, je remercie tous les acteurs de la communauté du logiciel libre,<br>
tous ceux qui prennent la peine de partager leur savoir et leur travail.</p>
<p>Nous ne sommes jamais que <i>« des nains juchés sur des épaules de géants » (Bernard de Chartres)</i>.
<br>
<p><i>À Sophie, Clémence, Timothée, Olivier.</i></p>
<p><i>« Il y a des yeux qui reçoivent la lumière, et il y a des yeux qui la donnent. » (Paul Claudel)</i>
</p>
""" % globals()
label = QLabel(texte)
label.setOpenExternalLinks(True)
sizer.addWidget(label)
sizer.addStretch()
self.setLayout(sizer)
class OngletsAbout(QTabWidget):
def __init__(self, parent):
QTabWidget.__init__(self, parent)
self.addTab(APropos(parent), 'À propos')
self.addTab(Licence(parent), 'Licence')
self.addTab(Notes(parent), 'Notes de version')
self.addTab(Credits(parent), 'Crédits')
self.setTabPosition(QTabWidget.South)
self.setStyleSheet("""
QTabBar::tab:selected {
background: white;
border: 1px solid #C4C4C3;
border-top-color: white; /* same as the pane color */
border-bottom-left-radius: 4px;
border-bottom-right-radius: 4px;
border-top-left-radius: 0px;
border-top-right-radius: 0px;
min-width: 8ex;
padding: 7px;
}
QStackedWidget {background:white}
QTabBar QToolButton {
background:white;
border: 1px solid #C4C4C3;
border-top-color: white; /* same as the pane color */
border-bottom-left-radius: 4px;
border-bottom-right-radius: 4px;
border-top-left-radius: 0px;
border-top-right-radius: 0px;
}
""")
class About(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setWindowTitle("A propos de " + NOMPROG)
self.setPalette(white_palette)
##self.setWindowFlags(Qt.Dialog|Qt.FramelessWindowHint|Qt.X11BypassWindowManagerHint)
sizer = QVBoxLayout()
sizer.addWidget(OngletsAbout(self))
self.setLayout(sizer)
##class WhiteScrolledMessageDialog(QDialog):
##def __init__(self, parent, title='', msg = '', width=None):
##QDialog.__init__(self, parent)
##self.setWindowTitle(title)
##self.setPalette(white_palette)
##
##sizer = QVBoxLayout()
##self.setLayout(sizer)
##
##texte = QTextEdit(self)
##texte.setPlainText(msg)
##texte.setMinimumHeight(500)
##texte.setReadOnly(True)
##if width is None:
##texte.setLineWrapMode(QTextEdit.NoWrap)
##doc = texte.document()
##width = doc.idealWidth() + 4*doc.documentMargin()
##texte.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
##texte.setMinimumWidth(width)
##sizer.addWidget(texte)
##
##boutons = QHBoxLayout()
##boutons.addStretch()
##ok = QPushButton('OK', clicked=self.close)
##boutons.addWidget(ok)
##boutons.addStretch()
##sizer.addLayout(boutons)
| gpl-2.0 |
kwecht/ML-with-Kaggle | code/Linear_Regression_Funcs.py | 1 | 6293 | # Functions to support the linear regression ipython notebook work.
import pandas as pd
import numpy as np
def add_dummy(df, categories, label, drop=False):#, interaction=None):
"""
df - dataframe in which to place new dummy variables
categories - categorical variable from which make dummy variables
label - string of how to label each dummy column.
drop - Boolean indicating whether to drop a column of dummies
"""
# Get dataframe of dummy variables from categories
dum = pd.get_dummies(categories)
# Set index to match that of new dataframe
dum = dum.set_index(df.index)
# Label columns of dummy variables
dum.columns = [label+'_'+str(val) for val in dum.columns]
# Drop one column of dummy variable so that no column is
# a linear combination of another. Do this when using
# a constant in the linear model.
if drop==True:
dum.drop(dum.columns[0],axis=1,inplace=True)
# Join new dummy dataframe to the dataframe of variables
# for the regression
df = df.join(dum)
# Return new updated dataframe to the calling program
return df
def add_interactions(df, variables):
"""
df - dataframe in which to place interaction terms
variables - list of names from which to create interaction terms
"""
# Enumerate all variables in each group
vardict = {}
for var in variables:
if var=='Hour':
vardict[var] = ['Hour_'+str(val) for val in range(1,24)]
if var=='Day':
vardict[var] = ['Day_'+str(val) for val in range(1,7)]
if var=='Season':
vardict[var] = ['Season_'+str(val) for val in range(1,4)]
if var=='Month':
vardict[var] = ['Month_'+str(val+1) for val in range(1,12)]
if var=='Weather':
vardict[var] = ['Weather_'+str(val+1) for val in range(1,4)]
if var=='days_elapsed':
vardict[var] = 'days_elapsed'
# Add interaction between all items in the variable dictionary
if len(variables)==2:
for value1 in vardict.values()[0]:
for value2 in vardict.values()[1]:
newname = value1+'_*_'+value2
df[newname] = df[value1]*df[value2]
if len(variables)==3:
for value1 in vardict.values()[0]:
for value2 in vardict.values()[1]:
for value3 in vardict.values()[2]:
newname = value1+'_*_'+value2+'_*_'+value3
df[newname] = df[value1]*df[value2]*df[value3]
if len(variables)==4:
for value1 in vardict.values()[0]:
for value2 in vardict.values()[1]:
for value3 in vardict.values()[2]:
for value4 in vardict.values()[3]:
newname = value1+'_*_'+value2+'_*_'+value3+'_*_'+value4
df[newname] = df[value1]*df[value2]*df[value3]*df[value4]
# Return dataframe to calling program
return df
def make_matrix(df, monthly_scale={}, weather_scale={}, constant=False):
"""
Function to build matrix for statsmodels regression.
monthly_scale - list of values by which to scale input for each month
weather_scale - list of values by which to scale weather for each type
constant - if True, adds constant to regression.
"""
# Define new dataframe to hold the matrix
X = pd.DataFrame(index=df.index)
# Add time variables to the predictor variables matrix
months_elapsed = []
for val in X.index:
yeardiff = val.year - X.index[0].year
monthdiff = val.month - X.index[0].month
months_elapsed.append(12*yeardiff + monthdiff)
X = add_dummy(X, months_elapsed, 'Months_Elapsed', drop=True)
X = add_dummy(X, df.index.hour, 'Hour', drop=True)
X = add_dummy(X, df.index.dayofweek, 'Day', drop=True)
X = add_dummy(X, df.weather, 'Weather', drop=True)
# Add holidays by forcing them to look like a Saturday
X['Day_5'][df.holiday==1] = 1
# Add interaction terms
# After some experimentation, the major interaction term is Day_of_week*Hour_of_Day
# This is the big one. Each day of the week has its own daily pattern of rides
X = add_interactions(X, ['Day','Hour'])
# Most of the weather data proves unreliable
#X['temp'] = df.temp
# Scale each row of X by the mean ridership that month.
# This lets us scale our model to mean ridership each month, so the
# months with fewer riders will have less pronounced daily cycles
if monthly_scale!={}:
for time,scale in monthly_scale.iteritems():
this = (df.index.month==time.month) & (df.index.year==time.year)
X[this] = scale*X[this]
# Scale each row of X by the mean weather during that weather type
# This lets us scale our model to mean ridership during different types
# of weather.
if weather_scale!={}:
for weather,scale in weather_scale.iteritems():
this = (df['weather']==weather)
X[this] = scale*X[this]
# Do not add constant because we already have mean offsets for each month of data.
# A constant would be a linear combination of the monthly indicator variables.
if constant==True:
X = sm.add_constant(X,prepend=True)
# Return dataframe to calling program
return X
def score( obs, predict ):
"""
Calculate score on the predictions (predict) given the observations (obs).
"""
rmsle = np.sqrt( np.sum( 1./len(obs) * (np.log(predict+1) - np.log(obs+1))**2 ) )
return rmsle
def get_scale(df,types):
"""
Return dictionary of scale factors for all types in types.
"""
outdict = {}
for tt in types:
if tt=='weather':
weather_scale = {}
weather_mean = df['count'].groupby(df['weather']).mean()
for ii in range(len(weather_mean)):
weather_scale[weather_mean.index[ii]] = weather_mean.iloc[ii] / df['count'].mean()
outdict[tt] = weather_scale
if tt=='monthly':
monthly_scale = {}
monthly_mean = df['count'].resample('1m',how=np.mean)
for ii in range(len(monthly_mean)):
monthly_scale[monthly_mean.index[ii]] = monthly_mean.iloc[ii] / df['count'].mean()
outdict[tt] = monthly_scale
return outdict
| mit |
alejob/mdanalysis | package/MDAnalysis/visualization/streamlines.py | 1 | 16103 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
'''
Multicore 2D streamplot Python library for MDAnalysis --- :mod:`MDAnalysis.visualization.streamlines`
=====================================================================================================
:Authors: Tyler Reddy and Matthieu Chavent
:Year: 2014
:Copyright: GNU Public License v3
:Citation: [Chavent2014]_
.. autofunction:: generate_streamlines
'''
try:
import matplotlib
import matplotlib.path
except ImportError:
raise ImportError(
'2d streamplot module requires: matplotlib.path for its path.Path.contains_points method. The installation '
'instructions for the matplotlib module can be found here: '
'http://matplotlib.org/faq/installing_faq.html?highlight=install')
import MDAnalysis
import multiprocessing
import numpy as np
import scipy
def produce_grid(tuple_of_limits, grid_spacing):
'''Produce a grid for the simulation system based on the tuple of Cartesian Coordinate limits calculated in an
earlier step.'''
x_min, x_max, y_min, y_max = tuple_of_limits
grid = np.mgrid[x_min:x_max:grid_spacing, y_min:y_max:grid_spacing]
return grid
def split_grid(grid, num_cores):
'''Take the overall grid for the system and split it into lists of square vertices that can be distributed to
each core. Limited to 2D for now'''
# produce an array containing the cartesian coordinates of all vertices in the grid:
x_array, y_array = grid
grid_vertex_cartesian_array = np.dstack((x_array, y_array))
#the grid_vertex_cartesian_array has N_rows, with each row corresponding to a column of coordinates in the grid (
# so a given row has shape N_rows, 2); overall shape (N_columns_in_grid, N_rows_in_a_column, 2)
#although I'll eventually want a pure numpy/scipy/vector-based solution, for now I'll allow loops to simplify the
# division of the cartesian coordinates into a list of the squares in the grid
list_all_squares_in_grid = [] # should eventually be a nested list of all the square vertices in the grid/system
list_parent_index_values = [] # want an ordered list of assignment indices for reconstructing the grid positions
# in the parent process
current_column = 0
while current_column < grid_vertex_cartesian_array.shape[0] - 1:
# go through all the columns except the last one and account for the square vertices (the last column
# has no 'right neighbour')
current_row = 0
while current_row < grid_vertex_cartesian_array.shape[1] - 1:
# all rows except the top row, which doesn't have a row above it for forming squares
bottom_left_vertex_current_square = grid_vertex_cartesian_array[current_column, current_row]
bottom_right_vertex_current_square = grid_vertex_cartesian_array[current_column + 1, current_row]
top_right_vertex_current_square = grid_vertex_cartesian_array[current_column + 1, current_row + 1]
top_left_vertex_current_square = grid_vertex_cartesian_array[current_column, current_row + 1]
#append the vertices of this square to the overall list of square vertices:
list_all_squares_in_grid.append(
[bottom_left_vertex_current_square, bottom_right_vertex_current_square, top_right_vertex_current_square,
top_left_vertex_current_square])
list_parent_index_values.append([current_row, current_column])
current_row += 1
current_column += 1
#split the list of square vertices [[v1,v2,v3,v4],[v1,v2,v3,v4],...,...] into roughly equally-sized sublists to
# be distributed over the available cores on the system:
list_square_vertex_arrays_per_core = np.array_split(list_all_squares_in_grid, num_cores)
list_parent_index_values = np.array_split(list_parent_index_values, num_cores)
return [list_square_vertex_arrays_per_core, list_parent_index_values, current_row, current_column]
def per_core_work(coordinate_file_path, trajectory_file_path, list_square_vertex_arrays_this_core, MDA_selection,
start_frame, end_frame, reconstruction_index_list, maximum_delta_magnitude):
'''The code to perform on a given core given the list of square vertices assigned to it.'''
# obtain the relevant coordinates for particles of interest
universe_object = MDAnalysis.Universe(coordinate_file_path, trajectory_file_path)
list_previous_frame_centroids = []
list_previous_frame_indices = []
#define some utility functions for trajectory iteration:
def produce_list_indices_point_in_polygon_this_frame(vertex_coord_list):
list_indices_point_in_polygon = []
for square_vertices in vertex_coord_list:
path_object = matplotlib.path.Path(square_vertices)
index_list_in_polygon = np.where(path_object.contains_points(relevant_particle_coordinate_array_xy))
list_indices_point_in_polygon.append(index_list_in_polygon)
return list_indices_point_in_polygon
def produce_list_centroids_this_frame(list_indices_in_polygon):
list_centroids_this_frame = []
for indices in list_indices_in_polygon:
if not indices[0].size > 0: # if there are no particles of interest in this particular square
list_centroids_this_frame.append('empty')
else:
current_coordinate_array_in_square = relevant_particle_coordinate_array_xy[indices]
current_square_indices_centroid = np.average(current_coordinate_array_in_square, axis=0)
list_centroids_this_frame.append(current_square_indices_centroid)
return list_centroids_this_frame # a list of numpy xy centroid arrays for this frame
for ts in universe_object.trajectory:
if ts.frame < start_frame: # don't start until first specified frame
continue
relevant_particle_coordinate_array_xy = universe_object.select_atoms(MDA_selection).coordinates()[..., :-1]
# only 2D / xy coords for now
#I will need a list of indices for relevant particles falling within each square in THIS frame:
list_indices_in_squares_this_frame = produce_list_indices_point_in_polygon_this_frame(
list_square_vertex_arrays_this_core)
#likewise, I will need a list of centroids of particles in each square (same order as above list):
list_centroids_in_squares_this_frame = produce_list_centroids_this_frame(list_indices_in_squares_this_frame)
if list_previous_frame_indices: # if the previous frame had indices in at least one square I will need to use
# those indices to generate the updates to the corresponding centroids in this frame:
list_centroids_this_frame_using_indices_from_last_frame = produce_list_centroids_this_frame(
list_previous_frame_indices)
#I need to write a velocity of zero if there are any 'empty' squares in either frame:
xy_deltas_to_write = []
for square_1_centroid, square_2_centroid in zip(list_centroids_this_frame_using_indices_from_last_frame,
list_previous_frame_centroids):
if square_1_centroid == 'empty' or square_2_centroid == 'empty':
xy_deltas_to_write.append([0, 0])
else:
xy_deltas_to_write.append(np.subtract(square_1_centroid, square_2_centroid).tolist())
#xy_deltas_to_write = np.subtract(np.array(
# list_centroids_this_frame_using_indices_from_last_frame),np.array(list_previous_frame_centroids))
xy_deltas_to_write = np.array(xy_deltas_to_write)
#now filter the array to only contain distances in the range [-8,8] as a placeholder for dealing with PBC
# issues (Matthieu seemed to use a limit of 8 as well);
xy_deltas_to_write = np.clip(xy_deltas_to_write, -maximum_delta_magnitude, maximum_delta_magnitude)
#with the xy and dx,dy values calculated I need to set the values from this frame to previous frame
# values in anticipation of the next frame:
list_previous_frame_centroids = list_centroids_in_squares_this_frame[:]
list_previous_frame_indices = list_indices_in_squares_this_frame[:]
else: # either no points in squares or after the first frame I'll just reset the 'previous' values so they
# can be used when consecutive frames have proper values
list_previous_frame_centroids = list_centroids_in_squares_this_frame[:]
list_previous_frame_indices = list_indices_in_squares_this_frame[:]
if ts.frame > end_frame:
break # stop here
return zip(reconstruction_index_list, xy_deltas_to_write.tolist())
def generate_streamlines(coordinate_file_path, trajectory_file_path, grid_spacing, MDA_selection, start_frame,
end_frame, xmin, xmax, ymin, ymax, maximum_delta_magnitude, num_cores='maximum'):
'''Produce the x and y components of a 2D streamplot data set.
:Parameters:
**coordinate_file_path** : str
Absolute path to the coordinate file
**trajectory_file_path** : str
Absolute path to the trajectory file. It will normally be desirable to filter the trajectory with a tool
such as GROMACS g_filter (see [Chavent2014]_)
**grid_spacing** : float
The spacing between grid lines (angstroms)
**MDA_selection** : str
MDAnalysis selection string
**start_frame** : int
First frame number to parse
**end_frame** : int
Last frame number to parse
**xmin** : float
Minimum coordinate boundary for x-axis (angstroms)
**xmax** : float
Maximum coordinate boundary for x-axis (angstroms)
**ymin** : float
Minimum coordinate boundary for y-axis (angstroms)
**ymax** : float
Maximum coordinate boundary for y-axis (angstroms)
**maximum_delta_magnitude** : float
Absolute value of the largest displacement tolerated for the centroid of a group of particles (
angstroms). Values above this displacement will not count in the streamplot (treated as excessively large
displacements crossing the periodic boundary)
**num_cores** : int, optional
The number of cores to use. (Default 'maximum' uses all available cores)
:Returns:
**dx_array** : array of floats
An array object containing the displacements in the x direction
**dy_array** : array of floats
An array object containing the displacements in the y direction
**average_displacement** : float
:math:`\\frac {\\sum \\sqrt[]{dx^2 + dy^2}} {N}`
**standard_deviation_of_displacement** : float
standard deviation of :math:`\\sqrt[]{dx^2 + dy^2}`
:Examples:
::
import matplotlib, matplotlib.pyplot, np
import MDAnalysis, MDAnalysis.visualization.streamlines
u1, v1, average_displacement,standard_deviation_of_displacement =
MDAnalysis.visualization.streamlines.generate_streamlines('testing.gro','testing_filtered.xtc',grid_spacing =
20, MDA_selection = 'name PO4',start_frame=2,end_frame=3,xmin=-8.73000049591,xmax= 1225.96008301,
ymin= -12.5799999237, ymax=1224.34008789,maximum_delta_magnitude = 1.0,num_cores=16)
x = np.linspace(0,1200,61)
y = np.linspace(0,1200,61)
speed = np.sqrt(u1*u1 + v1*v1)
fig = matplotlib.pyplot.figure()
ax = fig.add_subplot(111,aspect='equal')
ax.set_xlabel('x ($\AA$)')
ax.set_ylabel('y ($\AA$)')
ax.streamplot(x,y,u1,v1,density=(10,10),color=speed,linewidth=3*speed/speed.max())
fig.savefig('testing_streamline.png',dpi=300)
.. image:: testing_streamline.png
.. [Chavent2014] Chavent, M.\*, Reddy, T.\*, Dahl, C.E., Goose, J., Jobard, B., and Sansom, M.S.P. (2014)
Methodologies for the analysis of instantaneous lipid diffusion in MD simulations of large membrane systems.
*Faraday Discussions* **169**: **Accepted**
'''
# work out the number of cores to use:
if num_cores == 'maximum':
num_cores = multiprocessing.cpu_count() # use all available cores
else:
num_cores = num_cores # use the value specified by the user
#assert isinstance(num_cores,(int,long)), "The number of specified cores must (of course) be an integer."
np.seterr(all='warn', over='raise')
parent_list_deltas = [] # collect all data from child processes here
def log_result_to_parent(delta_array):
parent_list_deltas.extend(delta_array)
tuple_of_limits = (xmin, xmax, ymin, ymax)
grid = produce_grid(tuple_of_limits=tuple_of_limits, grid_spacing=grid_spacing)
list_square_vertex_arrays_per_core, list_parent_index_values, total_rows, total_columns = \
split_grid(grid=grid,
num_cores=num_cores)
pool = multiprocessing.Pool(num_cores)
for vertex_sublist, index_sublist in zip(list_square_vertex_arrays_per_core, list_parent_index_values):
pool.apply_async(per_core_work, args=(
coordinate_file_path, trajectory_file_path, vertex_sublist, MDA_selection, start_frame, end_frame,
index_sublist, maximum_delta_magnitude), callback=log_result_to_parent)
pool.close()
pool.join()
dx_array = np.zeros((total_rows, total_columns))
dy_array = np.zeros((total_rows, total_columns))
#the parent_list_deltas is shaped like this: [ ([row_index,column_index],[dx,dy]), ... (...),...,]
for index_array, delta_array in parent_list_deltas: # go through the list in the parent process and assign to the
# appropriate positions in the dx and dy matrices:
#build in a filter to replace all values at the cap (currently between -8,8) with 0 to match Matthieu's code
# (I think eventually we'll reduce the cap to a narrower boundary though)
index_1 = index_array.tolist()[0]
index_2 = index_array.tolist()[1]
if abs(delta_array[0]) == maximum_delta_magnitude:
dx_array[index_1, index_2] = 0
else:
dx_array[index_1, index_2] = delta_array[0]
if abs(delta_array[1]) == maximum_delta_magnitude:
dy_array[index_1, index_2] = 0
else:
dy_array[index_1, index_2] = delta_array[1]
#at Matthieu's request, we now want to calculate the average and standard deviation of the displacement values:
displacement_array = np.sqrt(dx_array ** 2 + dy_array ** 2)
average_displacement = np.average(displacement_array)
standard_deviation_of_displacement = np.std(displacement_array)
return (dx_array, dy_array, average_displacement, standard_deviation_of_displacement)
# if __name__ == '__main__': #execute the main control function only if this file is called as a top-level script
#will probably mostly use this for testing on a trajectory:
| gpl-2.0 |
OpenDroneMap/OpenDroneMap | opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py | 2 | 1694 | import numpy as np
from sklearn.linear_model import RANSACRegressor
from .dimension import Dimension
class DistanceDimension(Dimension):
"""Assign each point the distance to the estimated ground"""
def __init__(self):
super(DistanceDimension, self).__init__()
def assign_default(self, point_cloud):
default = np.full(point_cloud.len(), -1)
super(DistanceDimension, self)._set_values(point_cloud, default)
def assign(self, *point_clouds, **kwargs):
for point_cloud in point_clouds:
xy = point_cloud.get_xy()
# Calculate RANSCAC model
model = RANSACRegressor().fit(xy, point_cloud.get_z())
# Calculate angle between estimated plane and XY plane
angle = self.__calculate_angle(model)
if angle >= 45:
# If the angle is higher than 45 degrees, then don't calculate the difference, since it will probably be way off
diff = np.full(point_cloud.len(), 0)
else:
predicted = model.predict(xy)
diff = point_cloud.get_z() - predicted
# Ignore the diff when the diff is below the ground
diff[diff < 0] = 0
super(DistanceDimension, self)._set_values(point_cloud, diff)
def get_name(self):
return 'distance_to_ground'
def get_las_type(self):
return 10
def __calculate_angle(self, model):
"Calculate the angle between the estimated plane and the XY plane"
a = model.estimator_.coef_[0]
b = model.estimator_.coef_[1]
angle = np.arccos(1 / np.sqrt(a ** 2 + b ** 2 + 1))
return np.degrees(angle)
| gpl-3.0 |
bikong2/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
SMTorg/smt | smt/applications/mfkpls.py | 2 | 2458 | # -*- coding: utf-8 -*-
"""
Created on Fri May 04 10:26:49 2018
@author: Mostafa Meliani <[email protected]>
Multi-Fidelity co-Kriging: recursive formulation with autoregressive model of order 1 (AR1)
Partial Least Square decomposition added on highest fidelity level
Adapted on March 2020 by Nathalie Bartoli to the new SMT version
Adapted on January 2021 by Andres Lopez-Lopera to the new SMT version
"""
import numpy as np
from packaging import version
from sklearn import __version__ as sklversion
if version.parse(sklversion) < version.parse("0.22"):
from sklearn.cross_decomposition.pls_ import PLSRegression as pls
else:
from sklearn.cross_decomposition import PLSRegression as pls
from sklearn.metrics.pairwise import manhattan_distances
from smt.applications import MFK
from smt.utils.kriging_utils import componentwise_distance_PLS
class MFKPLS(MFK):
"""
Multi-Fidelity model + PLS (done on the highest fidelity level)
"""
def _initialize(self):
super(MFKPLS, self)._initialize()
declare = self.options.declare
# Like KPLS, MFKPLS used only with "abs_exp" and "squar_exp" correlations
declare(
"corr",
"squar_exp",
values=("abs_exp", "squar_exp"),
desc="Correlation function type",
types=(str),
)
declare("n_comp", 1, types=int, desc="Number of principal components")
self.name = "MFKPLS"
def _differences(self, X, Y):
"""
Overrides differences function for MFK
Compute the manhattan_distances
"""
return manhattan_distances(X, Y, sum_over_features=False)
def _componentwise_distance(self, dx, opt=0):
d = componentwise_distance_PLS(
dx, self.options["corr"], self.options["n_comp"], self.coeff_pls
)
return d
def _compute_pls(self, X, y):
_pls = pls(self.options["n_comp"])
# As of sklearn 0.24.1 PLS with zeroed outputs raises an exception while sklearn 0.23 returns zeroed x_rotations
# For now the try/except below is a workaround to restore the 0.23 behaviour
try:
self.coeff_pls = _pls.fit(X.copy(), y.copy()).x_rotations_
except StopIteration:
self.coeff_pls = np.zeros((X.shape[1], self.options["n_comp"]))
return X, y
def _get_theta(self, i):
return np.sum(self.optimal_theta[i] * self.coeff_pls ** 2, axis=1)
| bsd-3-clause |
bert9bert/statsmodels | statsmodels/graphics/dotplots.py | 31 | 18190 | import numpy as np
from statsmodels.compat import range
from . import utils
def dot_plot(points, intervals=None, lines=None, sections=None,
styles=None, marker_props=None, line_props=None,
split_names=None, section_order=None, line_order=None,
stacked=False, styles_order=None, striped=False,
horizontal=True, show_names="both",
fmt_left_name=None, fmt_right_name=None,
show_section_titles=None, ax=None):
"""
Produce a dotplot similar in style to those in Cleveland's
"Visualizing Data" book. These are also known as "forest plots".
Parameters
----------
points : array_like
The quantitative values to be plotted as markers.
intervals : array_like
The intervals to be plotted around the points. The elements
of `intervals` are either scalars or sequences of length 2. A
scalar indicates the half width of a symmetric interval. A
sequence of length 2 contains the left and right half-widths
(respectively) of a nonsymmetric interval. If None, no
intervals are drawn.
lines : array_like
A grouping variable indicating which points/intervals are
drawn on a common line. If None, each point/interval appears
on its own line.
sections : array_like
A grouping variable indicating which lines are grouped into
sections. If None, everything is drawn in a single section.
styles : array_like
A grouping label defining the plotting style of the markers
and intervals.
marker_props : dict
A dictionary mapping style codes (the values in `styles`) to
dictionaries defining key/value pairs to be passed as keyword
arguments to `plot` when plotting markers. Useful keyword
arguments are "color", "marker", and "ms" (marker size).
line_props : dict
A dictionary mapping style codes (the values in `styles`) to
dictionaries defining key/value pairs to be passed as keyword
arguments to `plot` when plotting interval lines. Useful
keyword arguments are "color", "linestyle", "solid_capstyle",
and "linewidth".
split_names : string
If not None, this is used to split the values of `lines` into
substrings that are drawn in the left and right margins,
respectively. If None, the values of `lines` are drawn in the
left margin.
section_order : array_like
The section labels in the order in which they appear in the
dotplot.
line_order : array_like
The line labels in the order in which they appear in the
dotplot.
stacked : boolean
If True, when multiple points or intervals are drawn on the
same line, they are offset from each other.
styles_order : array_like
If stacked=True, this is the order in which the point styles
on a given line are drawn from top to bottom (if horizontal
is True) or from left to right (if horiontal is False). If
None (default), the order is lexical.
striped : boolean
If True, every other line is enclosed in a shaded box.
horizontal : boolean
If True (default), the lines are drawn horizontally, otherwise
they are drawn vertically.
show_names : string
Determines whether labels (names) are shown in the left and/or
right margins (top/bottom margins if `horizontal` is True).
If `both`, labels are drawn in both margins, if 'left', labels
are drawn in the left or top margin. If `right`, labels are
drawn in the right or bottom margin.
fmt_left_name : function
The left/top margin names are passed through this function
before drawing on the plot.
fmt_right_name : function
The right/bottom marginnames are passed through this function
before drawing on the plot.
show_section_titles : bool or None
If None, section titles are drawn only if there is more than
one section. If False/True, section titles are never/always
drawn, respectively.
ax : matplotlib.axes
The axes on which the dotplot is drawn. If None, a new axes
is created.
Returns
-------
fig : Figure
The figure given by `ax.figure` or a new instance.
Notes
-----
`points`, `intervals`, `lines`, `sections`, `styles` must all have
the same length whenever present.
Examples
--------
This is a simple dotplot with one point per line:
>>> dot_plot(points=point_values)
This dotplot has labels on the lines (if elements in
`label_values` are repeated, the corresponding points appear on
the same line):
>>> dot_plot(points=point_values, lines=label_values)
References
----------
* Cleveland, William S. (1993). "Visualizing Data". Hobart
Press.
* Jacoby, William G. (2006) "The Dot Plot: A Graphical Display
for Labeled Quantitative Values." The Political Methodologist
14(1): 6-14.
"""
import matplotlib.transforms as transforms
fig, ax = utils.create_mpl_ax(ax)
# Convert to numpy arrays if that is not what we are given.
points = np.asarray(points)
asarray_or_none = lambda x : None if x is None else np.asarray(x)
intervals = asarray_or_none(intervals)
lines = asarray_or_none(lines)
sections = asarray_or_none(sections)
styles = asarray_or_none(styles)
# Total number of points
npoint = len(points)
# Set default line values if needed
if lines is None:
lines = np.arange(npoint)
# Set default section values if needed
if sections is None:
sections = np.zeros(npoint)
# Set default style values if needed
if styles is None:
styles = np.zeros(npoint)
# The vertical space (in inches) for a section title
section_title_space = 0.5
# The number of sections
nsect = len(set(sections))
if section_order is not None:
nsect = len(set(section_order))
# The number of section titles
if show_section_titles == False:
draw_section_titles = False
nsect_title = 0
elif show_section_titles == True:
draw_section_titles = True
nsect_title = nsect
else:
draw_section_titles = nsect > 1
nsect_title = nsect if nsect > 1 else 0
# The total vertical space devoted to section titles.
section_space_total = section_title_space * nsect_title
# Add a bit of room so that points that fall at the axis limits
# are not cut in half.
ax.set_xmargin(0.02)
ax.set_ymargin(0.02)
if section_order is None:
lines0 = list(set(sections))
lines0.sort()
else:
lines0 = section_order
if line_order is None:
lines1 = list(set(lines))
lines1.sort()
else:
lines1 = line_order
# A map from (section,line) codes to index positions.
lines_map = {}
for i in range(npoint):
if section_order is not None and sections[i] not in section_order:
continue
if line_order is not None and lines[i] not in line_order:
continue
ky = (sections[i], lines[i])
if ky not in lines_map:
lines_map[ky] = []
lines_map[ky].append(i)
# Get the size of the axes on the parent figure in inches
bbox = ax.get_window_extent().transformed(
fig.dpi_scale_trans.inverted())
awidth, aheight = bbox.width, bbox.height
# The number of lines in the plot.
nrows = len(lines_map)
# The positions of the lowest and highest guideline in axes
# coordinates (for horizontal dotplots), or the leftmost and
# rightmost guidelines (for vertical dotplots).
bottom, top = 0, 1
if horizontal:
# x coordinate is data, y coordinate is axes
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
else:
# x coordinate is axes, y coordinate is data
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
# Space used for a section title, in axes coordinates
title_space_axes = section_title_space / aheight
# Space between lines
if horizontal:
dpos = (top - bottom - nsect_title*title_space_axes) /\
float(nrows)
else:
dpos = (top - bottom) / float(nrows)
# Determine the spacing for stacked points
if styles_order is not None:
style_codes = styles_order
else:
style_codes = list(set(styles))
style_codes.sort()
# Order is top to bottom for horizontal plots, so need to
# flip.
if horizontal:
style_codes = style_codes[::-1]
# nval is the maximum number of points on one line.
nval = len(style_codes)
if nval > 1:
stackd = dpos / (2.5*(float(nval)-1))
else:
stackd = 0.
# Map from style code to its integer position
#style_codes_map = {x: style_codes.index(x) for x in style_codes}
# python 2.6 compat version:
style_codes_map = dict((x, style_codes.index(x)) for x in style_codes)
# Setup default marker styles
colors = ["r", "g", "b", "y", "k", "purple", "orange"]
if marker_props is None:
#marker_props = {x: {} for x in style_codes}
# python 2.6 compat version:
marker_props = dict((x, {}) for x in style_codes)
for j in range(nval):
sc = style_codes[j]
if "color" not in marker_props[sc]:
marker_props[sc]["color"] = colors[j % len(colors)]
if "marker" not in marker_props[sc]:
marker_props[sc]["marker"] = "o"
if "ms" not in marker_props[sc]:
marker_props[sc]["ms"] = 10 if stackd == 0 else 6
# Setup default line styles
if line_props is None:
#line_props = {x: {} for x in style_codes}
# python 2.6 compat version:
line_props = dict((x, {}) for x in style_codes)
for j in range(nval):
sc = style_codes[j]
if "color" not in line_props[sc]:
line_props[sc]["color"] = "grey"
if "linewidth" not in line_props[sc]:
line_props[sc]["linewidth"] = 2 if stackd > 0 else 8
if horizontal:
# The vertical position of the first line.
pos = top - dpos/2 if nsect == 1 else top
else:
# The horizontal position of the first line.
pos = bottom + dpos/2
# Points that have already been labeled
labeled = set()
# Positions of the y axis grid lines
ticks = []
# Loop through the sections
for k0 in lines0:
# Draw a section title
if draw_section_titles:
if horizontal:
y0 = pos + dpos/2 if k0 == lines0[0] else pos
ax.fill_between((0, 1), (y0,y0),
(pos-0.7*title_space_axes,
pos-0.7*title_space_axes),
color='darkgrey',
transform=ax.transAxes,
zorder=1)
txt = ax.text(0.5, pos - 0.35*title_space_axes, k0,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
txt.set_fontweight("bold")
pos -= title_space_axes
else:
m = len([k for k in lines_map if k[0] == k0])
ax.fill_between((pos-dpos/2+0.01,
pos+(m-1)*dpos+dpos/2-0.01),
(1.01,1.01), (1.06,1.06),
color='darkgrey',
transform=ax.transAxes,
zorder=1, clip_on=False)
txt = ax.text(pos + (m-1)*dpos/2, 1.02, k0,
horizontalalignment='center',
verticalalignment='bottom',
transform=ax.transAxes)
txt.set_fontweight("bold")
jrow = 0
for k1 in lines1:
# No data to plot
if (k0, k1) not in lines_map:
continue
# Draw the guideline
if horizontal:
ax.axhline(pos, color='grey')
else:
ax.axvline(pos, color='grey')
# Set up the labels
if split_names is not None:
us = k1.split(split_names)
if len(us) >= 2:
left_label, right_label = us[0], us[1]
else:
left_label, right_label = k1, None
else:
left_label, right_label = k1, None
if fmt_left_name is not None:
left_label = fmt_left_name(left_label)
if fmt_right_name is not None:
right_label = fmt_right_name(right_label)
# Draw the stripe
if striped and jrow % 2 == 0:
if horizontal:
ax.fill_between((0, 1), (pos-dpos/2, pos-dpos/2),
(pos+dpos/2, pos+dpos/2),
color='lightgrey',
transform=ax.transAxes,
zorder=0)
else:
ax.fill_between((pos-dpos/2, pos+dpos/2),
(0, 0), (1, 1),
color='lightgrey',
transform=ax.transAxes,
zorder=0)
jrow += 1
# Draw the left margin label
if show_names.lower() in ("left", "both"):
if horizontal:
ax.text(-0.1/awidth, pos, left_label,
horizontalalignment="right",
verticalalignment='center',
transform=ax.transAxes,
family='monospace')
else:
ax.text(pos, -0.1/aheight, left_label,
horizontalalignment="center",
verticalalignment='top',
transform=ax.transAxes,
family='monospace')
# Draw the right margin label
if show_names.lower() in ("right", "both"):
if right_label is not None:
if horizontal:
ax.text(1 + 0.1/awidth, pos, right_label,
horizontalalignment="left",
verticalalignment='center',
transform=ax.transAxes,
family='monospace')
else:
ax.text(pos, 1 + 0.1/aheight, right_label,
horizontalalignment="center",
verticalalignment='bottom',
transform=ax.transAxes,
family='monospace')
# Save the vertical position so that we can place the
# tick marks
ticks.append(pos)
# Loop over the points in one line
for ji,jp in enumerate(lines_map[(k0,k1)]):
# Calculate the vertical offset
yo = 0
if stacked:
yo = -dpos/5 + style_codes_map[styles[jp]]*stackd
pt = points[jp]
# Plot the interval
if intervals is not None:
# Symmetric interval
if np.isscalar(intervals[jp]):
lcb, ucb = pt - intervals[jp],\
pt + intervals[jp]
# Nonsymmetric interval
else:
lcb, ucb = pt - intervals[jp][0],\
pt + intervals[jp][1]
# Draw the interval
if horizontal:
ax.plot([lcb, ucb], [pos+yo, pos+yo], '-',
transform=trans,
**line_props[styles[jp]])
else:
ax.plot([pos+yo, pos+yo], [lcb, ucb], '-',
transform=trans,
**line_props[styles[jp]])
# Plot the point
sl = styles[jp]
sll = sl if sl not in labeled else None
labeled.add(sl)
if horizontal:
ax.plot([pt,], [pos+yo,], ls='None',
transform=trans, label=sll,
**marker_props[sl])
else:
ax.plot([pos+yo,], [pt,], ls='None',
transform=trans, label=sll,
**marker_props[sl])
if horizontal:
pos -= dpos
else:
pos += dpos
# Set up the axis
if horizontal:
ax.xaxis.set_ticks_position("bottom")
ax.yaxis.set_ticks_position("none")
ax.set_yticklabels([])
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_position(('axes', -0.1/aheight))
ax.set_ylim(0, 1)
ax.yaxis.set_ticks(ticks)
ax.autoscale_view(scaley=False, tight=True)
else:
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("none")
ax.set_xticklabels([])
ax.spines['bottom'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_position(('axes', -0.1/awidth))
ax.set_xlim(0, 1)
ax.xaxis.set_ticks(ticks)
ax.autoscale_view(scalex=False, tight=True)
return fig
| bsd-3-clause |
MechCoder/scikit-learn | examples/plot_isotonic_regression.py | 55 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
pianomania/scikit-learn | sklearn/ensemble/__init__.py | 153 | 1382 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .iforest import IsolationForest
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "IsolationForest", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
btabibian/scikit-learn | benchmarks/bench_saga.py | 45 | 8474 | """Author: Arthur Mensch
Benchmarks of sklearn SAGA vs lightning SAGA vs Liblinear. Shows the gain
in using multinomial logistic regression in term of learning time.
"""
import json
import time
from os.path import expanduser
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_rcv1, load_iris, load_digits, \
fetch_20newsgroups_vectorized
from sklearn.externals.joblib import delayed, Parallel, Memory
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.utils.extmath import safe_sparse_dot, softmax
def fit_single(solver, X, y, penalty='l2', single_target=True, C=1,
max_iter=10, skip_slow=False):
if skip_slow and solver == 'lightning' and penalty == 'l1':
print('skip_slowping l1 logistic regression with solver lightning.')
return
print('Solving %s logistic regression with penalty %s, solver %s.'
% ('binary' if single_target else 'multinomial',
penalty, solver))
if solver == 'lightning':
from lightning.classification import SAGAClassifier
if single_target or solver not in ['sag', 'saga']:
multi_class = 'ovr'
else:
multi_class = 'multinomial'
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42,
stratify=y)
n_samples = X_train.shape[0]
n_classes = np.unique(y_train).shape[0]
test_scores = [1]
train_scores = [1]
accuracies = [1 / n_classes]
times = [0]
if penalty == 'l2':
alpha = 1. / (C * n_samples)
beta = 0
lightning_penalty = None
else:
alpha = 0.
beta = 1. / (C * n_samples)
lightning_penalty = 'l1'
for this_max_iter in range(1, max_iter + 1, 2):
print('[%s, %s, %s] Max iter: %s' %
('binary' if single_target else 'multinomial',
penalty, solver, this_max_iter))
if solver == 'lightning':
lr = SAGAClassifier(loss='log', alpha=alpha, beta=beta,
penalty=lightning_penalty,
tol=-1, max_iter=this_max_iter)
else:
lr = LogisticRegression(solver=solver,
multi_class=multi_class,
C=C,
penalty=penalty,
fit_intercept=False, tol=1e-24,
max_iter=this_max_iter,
random_state=42,
)
t0 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t0
scores = []
for (X, y) in [(X_train, y_train), (X_test, y_test)]:
try:
y_pred = lr.predict_proba(X)
except NotImplementedError:
# Lightning predict_proba is not implemented for n_classes > 2
y_pred = _predict_proba(lr, X)
score = log_loss(y, y_pred, normalize=False) / n_samples
score += (0.5 * alpha * np.sum(lr.coef_ ** 2) +
beta * np.sum(np.abs(lr.coef_)))
scores.append(score)
train_score, test_score = tuple(scores)
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
test_scores.append(test_score)
train_scores.append(train_score)
accuracies.append(accuracy)
times.append(train_time)
return lr, times, train_scores, test_scores, accuracies
def _predict_proba(lr, X):
pred = safe_sparse_dot(X, lr.coef_.T)
if hasattr(lr, "intercept_"):
pred += lr.intercept_
return softmax(pred)
def exp(solvers, penalties, single_target, n_samples=30000, max_iter=20,
dataset='rcv1', n_jobs=1, skip_slow=False):
mem = Memory(cachedir=expanduser('~/cache'), verbose=0)
if dataset == 'rcv1':
rcv1 = fetch_rcv1()
lbin = LabelBinarizer()
lbin.fit(rcv1.target_names)
X = rcv1.data
y = rcv1.target
y = lbin.inverse_transform(y)
le = LabelEncoder()
y = le.fit_transform(y)
if single_target:
y_n = y.copy()
y_n[y > 16] = 1
y_n[y <= 16] = 0
y = y_n
elif dataset == 'digits':
digits = load_digits()
X, y = digits.data, digits.target
if single_target:
y_n = y.copy()
y_n[y < 5] = 1
y_n[y >= 5] = 0
y = y_n
elif dataset == 'iris':
iris = load_iris()
X, y = iris.data, iris.target
elif dataset == '20newspaper':
ng = fetch_20newsgroups_vectorized()
X = ng.data
y = ng.target
if single_target:
y_n = y.copy()
y_n[y > 4] = 1
y_n[y <= 16] = 0
y = y_n
X = X[:n_samples]
y = y[:n_samples]
cached_fit = mem.cache(fit_single)
out = Parallel(n_jobs=n_jobs, mmap_mode=None)(
delayed(cached_fit)(solver, X, y,
penalty=penalty, single_target=single_target,
C=1, max_iter=max_iter, skip_slow=skip_slow)
for solver in solvers
for penalty in penalties)
res = []
idx = 0
for solver in solvers:
for penalty in penalties:
if not (skip_slow and solver == 'lightning' and penalty == 'l1'):
lr, times, train_scores, test_scores, accuracies = out[idx]
this_res = dict(solver=solver, penalty=penalty,
single_target=single_target,
times=times, train_scores=train_scores,
test_scores=test_scores,
accuracies=accuracies)
res.append(this_res)
idx += 1
with open('bench_saga.json', 'w+') as f:
json.dump(res, f)
def plot():
import pandas as pd
with open('bench_saga.json', 'r') as f:
f = json.load(f)
res = pd.DataFrame(f)
res.set_index(['single_target', 'penalty'], inplace=True)
grouped = res.groupby(level=['single_target', 'penalty'])
colors = {'saga': 'blue', 'liblinear': 'orange', 'lightning': 'green'}
for idx, group in grouped:
single_target, penalty = idx
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(131)
train_scores = group['train_scores'].values
ref = np.min(np.concatenate(train_scores)) * 0.999
for scores, times, solver in zip(group['train_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Training objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(132)
test_scores = group['test_scores'].values
ref = np.min(np.concatenate(test_scores)) * 0.999
for scores, times, solver in zip(group['test_scores'], group['times'],
group['solver']):
scores = scores / ref - 1
ax.plot(times, scores, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test objective (relative to min)')
ax.set_yscale('log')
ax = fig.add_subplot(133)
for accuracy, times, solver in zip(group['accuracies'], group['times'],
group['solver']):
ax.plot(times, accuracy, label=solver, color=colors[solver])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
name = 'single_target' if single_target else 'multi_target'
name += '_%s' % penalty
plt.suptitle(name)
name += '.png'
fig.tight_layout()
fig.subplots_adjust(top=0.9)
plt.savefig(name)
plt.close(fig)
if __name__ == '__main__':
solvers = ['saga', 'liblinear', 'lightning']
penalties = ['l1', 'l2']
single_target = True
exp(solvers, penalties, single_target, n_samples=None, n_jobs=1,
dataset='20newspaper', max_iter=20)
plot()
| bsd-3-clause |
vybstat/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
fspaolo/scikit-learn | examples/svm/plot_custom_kernel.py | 8 | 1525 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.pcolormesh(xx, yy, Z, cmap=pl.cm.Paired)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.title('3-Class classification using Support Vector Machine with custom'
' kernel')
pl.axis('tight')
pl.show()
| bsd-3-clause |
sserkez/ocelot | demos/sr/phase_tune.py | 2 | 1506 | __author__ = 'Sergey Tomin'
from ocelot.rad import *
from ocelot import *
from ocelot.gui import *
font = {'size' : 14}
matplotlib.rc('font', **font)
beam = Beam()
beam.E = 17.5
beam.I = 0.1
beam.beta_x = 12.84
beam.beta_y = 6.11
beam.Dx = 0.526
und = Undulator(Kx = 4., nperiods=125, lperiod=0.04, eid= "und")
D = Drift(l=0.5, eid="D")
b1 = Hcor(l=0.1, angle = 5*-0.00001, eid="b1")
b2 = Hcor(l=0.2, angle = 5*0.00002, eid="b2")
b3 = Hcor(l=0.1, angle = 5*-0.00001, eid="b3")
phase_shift = (b1, b2, b3)
cell = (und, D, phase_shift, D, und)
lat = MagneticLattice(cell)
screen = Screen()
screen.z = 100.0
screen.size_x = 0.0
screen.size_y = 0.0
screen.nx = 1
screen.ny = 1
screen.start_energy = 7900 #eV
screen.end_energy = 8200 #eV
screen.num_energy = 1000
print_rad_props(beam, K=und.Kx, lu=und.lperiod, L=und.l, distance=screen.z)
screen = calculate_radiation(lat, screen, beam)
# trajectory
for u in screen.motion:
plt.plot(u[4::9], u[0::9], "r")
plt.show()
show_flux(screen, unit="mrad")
und = Undulator(Kx = 4., nperiods=125, lperiod=0.04, eid= "und")
D = Drift(l=0.5, eid="D")
b1 = Hcor(l=0.1, angle = 10*-0.00001, eid="b1")
b2 = Hcor(l=0.2, angle = 10*0.00002, eid="b2")
b3 = Hcor(l=0.1, angle = 10*-0.00001, eid="b3")
phase_shift = (b1, b2, b3)
cell = (und, D, phase_shift, D, und)
lat = MagneticLattice(cell)
screen = calculate_radiation(lat, screen, beam)
# trajectory
for u in screen.motion:
plt.plot(u[4::9], u[0::9], "r")
plt.show()
show_flux(screen, unit="mrad") | gpl-3.0 |
glouppe/scikit-learn | sklearn/linear_model/sag.py | 9 | 9905 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from .base import make_dataset
from .sgd_fast import Log, SquaredLoss
from .sag_fast import sag, get_max_squared_sum
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
"""
if loss == 'log':
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared'
Loss function that will be optimized.
'log' is used for classification, like in LogisticRegression.
'squared' is used for regression, like in Ridge.
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criterea is not reached. Defaults to 1000.
tol: double, optional
The stopping criterea for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. It is currently
not used in Ridge.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and eventually the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
coef_init = np.zeros(n_features, dtype=np.float64, order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.size == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1]
coef_init = coef_init[:-1]
else:
intercept_init = 0.0
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient_init = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient_init = 0.0
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros(n_samples, dtype=np.float64,
order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros(n_features, dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
if loss == 'log':
class_loss = Log()
elif loss == 'squared':
class_loss = SquaredLoss()
else:
raise ValueError("Invalid loss parameter: got %r instead of "
"one of ('log', 'squared')" % loss)
intercept_, num_seen, n_iter_, intercept_sum_gradient = \
sag(dataset, coef_init.ravel(),
intercept_init, n_samples,
n_features, tol,
max_iter,
class_loss,
step_size, alpha_scaled,
sum_gradient_init.ravel(),
gradient_memory_init.ravel(),
seen_init.ravel(),
num_seen_init,
fit_intercept,
intercept_sum_gradient_init,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
coef_ = coef_init
if fit_intercept:
coef_ = np.append(coef_, intercept_)
warm_start_mem = {'coef': coef_, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
mila-udem/fuel | docs/conf.py | 4 | 10813 | # -*- coding: utf-8 -*-
#
# Fuel documentation build configuration file, created by
# sphinx-quickstart2 on Wed Oct 8 17:59:44 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import inspect
import os
import sys
from mock import Mock as MagicMock
from sphinx.ext.autodoc import cut_lines
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx',
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.linkcode'
]
intersphinx_mapping = {
'theano': ('http://theano.readthedocs.org/en/latest/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'python': ('http://docs.python.org/3.4', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None)
}
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['h5py', 'zmq']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
graphviz_dot_args = ['-Gbgcolor=# fcfcfc'] # To match the RTD theme
# Render todo lists
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Fuel'
copyright = u'2014, Université de Montréal'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import fuel
version = '.'.join(fuel.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = fuel.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Fueldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Fuel.tex', u'Fuel Documentation',
u'Université de Montréal', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fuel', u'Fuel Documentation',
[u'Université de Montréal'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Fuel', u'Fuel Documentation',
u'Université de Montréal', 'Fuel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
def skip_abc(app, what, name, obj, skip, options):
return skip or name.startswith('_abc')
def setup(app):
app.connect('autodoc-process-docstring', cut_lines(2, what=['module']))
app.connect('autodoc-skip-member', skip_abc)
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
if hasattr(obj, '__wrapped__'):
obj = obj.__wrapped__
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
_, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = os.path.relpath(fn, start=os.path.dirname(fuel.__file__))
github = "https://github.com/mila-udem/fuel/blob/master/fuel/{}{}"
return github.format(fn, linespec)
| mit |
gaulinmp/panda_cub | panda_cub/pandas.py | 1 | 9985 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import pandas as pd
import scipy.stats as stats
try:
import statsmodels.api as sm
except ImportError:
sm = None
__logger = logging.getLogger(__name__)
def _listify(obj):
if obj is None:
return None
if not isinstance(obj, (tuple, list, set)):
return [obj]
return list(obj)
def two_way_t_test(df, x_rank, y_rank, value):
"""
Create table of means by two rank variables, with differences
[max(x/y_rank)-min(x/y_rank)] and T-/P-values.
If `statsmodel` package is available, use that to calculate
diff-in-diff estimator.
Example: df.two_way_t_test('age_decile', 'size_decile', 'ROA')
Args:
x_rank: Variable for separating groups in X-dimension
y_rank: Variable for separating groups in Y-dimension
value: Variable to t-test across groups.
Returns:
Data frame of the results, with:
Columns: X-rank unique values, and diff (+ t-stat/p-value) across min/max x-rank groups.
Rows: Y-rank unique values, and diff (+ t-stat/p-value) across min/max y-rank groups.
Raises:
ValueError:
"""
_cols = [x_rank, y_rank, value]
_df = df.loc[df[_cols].notnull().all(axis=1), _cols]
_xs = sorted(_df[x_rank].unique())
_ys = sorted(_df[y_rank].unique())
_ret = (_df.groupby([x_rank, y_rank])
.mean()
.reset_index()
.pivot(index=y_rank, columns=x_rank, values=value)
)
# Check that all intersections of x and y are non-empty
if _ret.isnull().any().any():
_ = _ret.loc[_ret.isnull().any(axis=1), _ret.isnull().any(axis=0)]
raise ValueError("One of the intersections was NaN:\n\n{}"
.format(_.fillna('NaN')[_.isnull()].fillna('')))
# Add difference column
_ret.loc[_ys, 'diff'] = _ret.loc[_ys, max(_xs)] - _ret.loc[_ys, min(_xs)]
# Add difference row
_ret.loc['diff', _xs] = _ret.loc[max(_ys), _xs] - _ret.loc[min(_ys), _xs]
for _x in _xs: # Iterate across X-values
sel = (_df[x_rank] == _x) & (_df[value].notnull())
test = stats.ttest_ind(_df.loc[(_df[y_rank] == max(_ys)) & sel, value],
_df.loc[(_df[y_rank] == min(_ys)) & sel, value])
_ret.loc['t-stat', _x] = test.statistic
_ret.loc['p-value', _x] = test.pvalue
for _y in _ys: # Iterate across Y-values
sel = (_df[y_rank] == _y) & (_df[value].notnull())
test = stats.ttest_ind(_df.loc[(_df[x_rank] == max(_xs)) & sel, value],
_df.loc[(_df[x_rank] == min(_xs)) & sel, value])
_ret.loc[_y, 't-stat'] = test.statistic
_ret.loc[_y, 'p-value'] = test.pvalue
# diff in diff estimator
if sm is not None:
_df[x_rank+'_max'] = (_df[x_rank] == max(_xs))+0
_df[x_rank+'_min'] = (_df[x_rank] == min(_xs))+0
_df[y_rank+'_max'] = (_df[y_rank] == max(_ys))+0
_df[y_rank+'_min'] = (_df[y_rank] == min(_ys))+0
# Diff-in-diff estimation is the interaction term in
# value = intercept + Y_max + X_max + Y_max*X_max
dind_name = '_rank_interaction__'
_df[dind_name] = _df[x_rank+'_max'] * _df[y_rank+'_max']
dd_axes = _df.columns[-5:]
sel = ( (_df[dd_axes[0:2]].sum(axis=1) > 0)
& (_df[dd_axes[2:4]].sum(axis=1) > 0)
& _df[value].notnull() )
# [0::2] grabs maxes and interaction
y, x = _df.loc[sel, value], _df.loc[sel, dd_axes[0::2]]
try:
# cov_type='HC1' uses the robust sandwich estimator
fit = sm.OLS(y, sm.add_constant(x)).fit(cov_type='HC1')
_ret.loc['diff', 'diff'] = fit.params[dind_name]
_ret.loc['t-stat', 't-stat'] = fit.tvalues[dind_name]
_ret.loc['p-value', 'p-value'] = fit.pvalues[dind_name]
except:
# Must not have had statsmodels
pass
return _ret.fillna('')
def winsor(df, columns, p=0.01, inplace=False, prefix=None, suffix=None):
"""
Winsorize columns by setting values outside of the p and 1-p percentiles
equal to the p and 1-p percentiles respectively.
Set inplace=True to make new column called prefix+'column'+suffix.
Set inplace=False to return array of winsorized Series conforming to length of `columns`
"""
new_cols = []
for column in _listify(columns):
new_col_name = '{}{}{}'.format(prefix or '', column, suffix or '')
if column not in df:
if inplace:
__logger.warning("Column %s not found in df.", column)
continue
else:
__logger.warning("Column %s not found in df.", column)
raise KeyError("Column {} not found in data frame".format(column))
p = max(0, min(.5, p))
low=df[column].quantile(p)
hi=df[column].quantile(1-p)
if pd.np.isnan(low) or pd.np.isnan(hi):
__logger.warning("One of the quantiles is NAN! low: {}, high: {}"
.format(low, hi))
continue
__logger.info("{}: Num < {:0.2f}: {} ({:0.3f}), num > {:0.2f}: {} ({:0.3f})"
.format(column, low, sum(df[column]<low),
sum(df[column]<low)/len(df[column]), hi,
sum(df[column]>hi ),
sum(df[column]>hi )/len(df[column])))
if inplace:
df[new_col_name] = df[column].copy()
df.loc[df[new_col_name]>hi, new_col_name] = hi
df.loc[df[new_col_name]<low, new_col_name] = low
else:
_tmpcol = df[column].copy()
_tmpcol.loc[_tmpcol<low] = low
_tmpcol.loc[_tmpcol>hi] = hi
new_cols.append(_tmpcol)
if inplace:
return df
return new_cols
def normalize(df, columns, p=0, inplace=False, prefix=None, suffix=None):
"""
Normalize columns to have mean=0 and standard deviation = 1.
Mean and StdDev. are calculated excluding the <p and >1-p percentiles.
Set inplace=True to make new column called prefix+'column'+suffix.
Set inplace=False to return array of winsorized Series conforming to length of `columns`
"""
new_cols = []
for column in _listify(columns):
if p > 0 & p < .5:
low=df[column].quantile(p)
hi=df[column].quantile(1-p)
sel = (df[column]>=low)&(df[column]<=hi)
else:
sel = df[column].notnull()
_mu = df.loc[sel, column].mean()
_rho = df.loc[sel,column].std()
if not _rho > 0:
raise ValueError('0 standard deviation found when normalizing '
'{} (mean={})'.format(column, _mu))
new_col_name = '{}{}{}'.format(prefix or '', column, suffix or '')
__logger.info('{} = ({} - {:.2f}) / {:.2f}'
.format(new_col_name, column, _mu, _rho))
if inplace:
df[new_col_name] = (df[column] - _mu) / _rho
else:
new_cols.append((df[column] - _mu) / _rho)
if inplace:
return df
return new_cols
def coalesce(df, *cols, no_scalar=False):
"""Fills in missing values with subsequently defined columns.
Element-wise equivalent of: (col[0] or col[1] or ... or col[-1])
The last provided value in *cols is assumed to be a scalar,
and .fillna(col[-1]) is called unless `no_scalar` is set to True.
"""
if len(cols) < 1:
raise ValueError('must specify list of columns, got: {!r}'
.format(cols))
if len(cols) == 1:
return df[cols[0]].copy()
_cols = list(cols) if no_scalar else list(cols[:-1])
_return_column = df[_cols.pop(0)].copy()
for col in _cols:
if col in df:
_return_column = _return_column.fillna(df[col])
if not no_scalar:
_return_column = _return_column.fillna(cols[-1])
return _return_column
def get_duplicated(df, columns):
"""
Return dataframe of all rows which match duplicated criteria.
Returns `df[df.duplicated(cols, keep=False)]` and a sort on `cols`.
"""
_cols = _listify(columns) if columns else df.columns
return df[df.duplicated(_cols, keep=False)].sort_values(_cols)
def value_counts_full(series, normalize=False, sort=True, cumulative=True, **kwargs):
"""
Series.value_counts() gives a series with the counts OR frequencies (normalize=True),
but doesn't show both. Also doesn't show the cumulative frequency.
This method provides that in a pretty little table (DataFrame).
Monkey-patch onto pandas with pd.Series.value_counts_full = value_counts_full to
be able to call it like: ``df.column_to_count.value_counts_full()`` just like you
would the normal ``Series.value_counts()``.
"""
_v = series.value_counts(normalize=False, **kwargs)
_p = series.value_counts(normalize=True, **kwargs)*100
_ret = pd.merge(_v, _p, left_index=True,
right_index=True, suffixes=('', ' %'))
# Some cosmetics
_ret.columns = ('Count', 'Percent')
_ret.index.name = series.name
# sort=False doesn't seem to work as expected with dropna=False,
# so just force the index sort.
if not sort:
_ret.sort_index(inplace=True)
if cumulative:
_ret['Cumulative'] = _ret['Percent'].cumsum()
return _ret
# Now monkey patch pandas.
__logger.info("Run monkey_patch_pandas() to monkey patch pandas.")
def monkey_patch_pandas():
pd.DataFrame.two_way_t_test = two_way_t_test
pd.DataFrame.normalize = normalize
pd.DataFrame.winsor = winsor
pd.DataFrame.coalesce = coalesce
pd.DataFrame.get_duplicated = get_duplicated
pd.Series.value_counts_full = value_counts_full
__logger.info("Added to DataFrame: two_way_t_test, normalize, winsor, coalesce, and get_duplicated.")
__logger.info("Added to Series: value_counts_full.")
| mit |
yugangzhang/chxanalys | chxanalys/XPCS_GiSAXS.py | 1 | 90446 | """
Dec 10, 2015 Developed by Y.G.@CHX
[email protected]
This module is for the GiSAXS XPCS analysis
"""
from chxanalys.chx_generic_functions import *
from chxanalys.chx_compress import ( compress_eigerdata, read_compressed_eigerdata,init_compress_eigerdata, get_avg_imgc,Multifile)
from chxanalys.chx_correlationc import ( cal_g2c )
from chxanalys.chx_libs import ( colors, markers, colors_, markers_)
def get_gisaxs_roi( Qr, Qz, qr_map, qz_map, mask=None, qval_dict=None ):
'''Y.G. 2016 Dec 31
Get xpcs roi of gisaxs
Parameters:
Qr: list, = [qr_start , qr_end, qr_width, qr_num], corresponding to qr start, qr end, qr width, qr number
Qz: list, = [qz_start , qz_end, qz_width, qz_num], corresponding to qz start, qz end, qz width, qz number
qr_map: two-d array, the same shape as gisaxs frame, a qr map
qz_map: two-d array, the same shape as gisaxs frame, a qz map
mask: array, the scattering mask
qval_dict: a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-)
if not None, the new returned qval_dict will include the old one
Return:
roi_mask: array, the same shape as gisaxs frame, the label array of roi
qval_dict, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-)
'''
qr_edge, qr_center = get_qedge( *Qr )
qz_edge, qz_center = get_qedge( *Qz )
label_array_qz = get_qmap_label(qz_map, qz_edge)
label_array_qr = get_qmap_label(qr_map, qr_edge)
label_array_qzr, qzc, qrc = get_qzrmap(label_array_qz, label_array_qr,qz_center, qr_center)
labels_qzr, indices_qzr = roi.extract_label_indices(label_array_qzr)
labels_qz, indices_qz = roi.extract_label_indices(label_array_qz)
labels_qr, indices_qr = roi.extract_label_indices(label_array_qr)
if mask is None:
mask=1
roi_mask = label_array_qzr * mask
qval_dict = get_qval_dict( np.round(qr_center, 5) , np.round(qz_center,5), qval_dict = qval_dict )
return roi_mask, qval_dict
############
##developed at Octo 11, 2016
def get_qr( data, Qr, Qz, qr, qz, mask = None ):
'''Octo 12, 2016, Y.G.@CHX
plot one-d of I(q) as a function of qr for different qz
data: a image/Eiger frame
Qr: info for qr, = qr_start , qr_end, qr_width, qr_num
Qz: info for qz, = qz_start, qz_end, qz_width , qz_num
qr: qr-map
qz: qz-map
mask: a mask for qr-1d integration, default is None
Return: qr_1d, a dataframe, with columns as qr1, qz1 (float value), qr2, qz2,....
Examples:
#to make two-qz, from 0.018 to 0.046, width as 0.008,
qz_width = 0.008
qz_start = 0.018 + qz_width/2
qz_end = 0.046 - qz_width/2
qz_num= 2
#to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012
qr_width = 0.1-0.02
qr_start = 0.02 + qr_width /2
qr_end = 0.01 - qr_width /2
qr_num = 1
Qr = [qr_start , qr_end, qr_width, qr_num]
Qz= [qz_start, qz_end, qz_width , qz_num ]
new_mask[ :, 1020:1045] =0
ticks = show_qzr_map( qr,qz, inc_x0, data = avg_imgmr, Nzline=10, Nrline=10 )
qx, qy, qr, qz = convert_gisaxs_pixel_to_q( inc_x0, inc_y0,refl_x0,refl_y0, lamda=lamda, Lsd=Lsd )
qr_1d = get_qr( avg_imgr, Qr, Qz, qr, qz, new_mask)
'''
qr_start , qr_end, qr_width, qr_num =Qr
qz_start, qz_end, qz_width , qz_num =Qz
qr_edge, qr_center = get_qedge(qr_start , qr_end, qr_width, qr_num )
qz_edge, qz_center = get_qedge( qz_start, qz_end, qz_width , qz_num )
label_array_qr = get_qmap_label( qr, qr_edge)
#qr_1d ={}
#columns=[]
for i,qzc_ in enumerate(qz_center):
#print (i,qzc_)
label_array_qz = get_qmap_label( qz, qz_edge[i*2:2*i+2])
#print (qzc_, qz_edge[i*2:2*i+2])
label_array_qzr,qzc,qrc = get_qzrmap(label_array_qz, label_array_qr,qz_center, qr_center )
#print (np.unique(label_array_qzr ))
if mask is not None:label_array_qzr *= mask
roi_pixel_num = np.sum( label_array_qzr, axis=0)
qr_ = qr *label_array_qzr
data_ = data*label_array_qzr
qr_ave = np.sum( qr_, axis=0)/roi_pixel_num
data_ave = np.sum( data_, axis=0)/roi_pixel_num
qr_ave,data_ave = zip(* sorted( zip( * [ qr_ave[~np.isnan(qr_ave)] , data_ave[~np.isnan( data_ave)] ]) ) )
if i==0:
N_interp = len( qr_ave )
qr_ave_intp = np.linspace( np.min( qr_ave ), np.max( qr_ave ), N_interp)
data_ave = np.interp( qr_ave_intp, qr_ave, data_ave)
#columns.append( ['qr%s'%i, str(round(qzc_,4))] )
if i==0:
df = np.hstack( [ (qr_ave_intp).reshape( N_interp,1) ,
data_ave.reshape( N_interp,1) ] )
else:
df = np.hstack( [ df, (qr_ave_intp).reshape( N_interp,1) ,
data_ave.reshape( N_interp,1) ] )
#df = DataFrame( df )
#df.columns = np.concatenate( columns )
return df
########################
# get one-d of I(q) as a function of qr for different qz
#####################
def cal_1d_qr( data, Qr,Qz, qr, qz, inc_x0=None, mask=None, path=None, uid=None, setup_pargs=None, save = True,
print_save_message=True):
''' Revised at July 18, 2017 by YG, to correct a divide by zero bug
Dec 16, 2016, Y.G.@CHX
calculate one-d of I(q) as a function of qr for different qz
data: a dataframe
Qr: info for qr, = qr_start , qr_end, qr_width, qr_num, the purpose of Qr is only for the defination of qr range (qr number does not matter)
Qz: info for qz, = qz_start, qz_end, qz_width , qz_num
qr: qr-map
qz: qz-map
inc_x0: x-center of incident beam
mask: a mask for qr-1d integration
setup_pargs: gives path, filename...
Return: qr_1d, a dataframe, with columns as qr1, qz1 (float value), qz2,....
Plot 1D cureve as a function of Qr for each Qz
Examples:
#to make two-qz, from 0.018 to 0.046, width as 0.008,
qz_width = 0.008
qz_start = 0.018 + qz_width/2
qz_end = 0.046 - qz_width/2
qz_num= 2
#to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012
qr_width = 0.1-0.02
qr_start = 0.02 + qr_width /2
qr_end = 0.01 - qr_width /2
qr_num = 1
Qr = [qr_start , qr_end, qr_width, qr_num]
Qz= [qz_start, qz_end, qz_width , qz_num ]
new_mask[ :, 1020:1045] =0
qx, qy, qr, qz = convert_gisaxs_pixel_to_q( inc_x0, inc_y0,refl_x0,refl_y0, lamda=lamda, Lsd=Lsd )
qr_1d = get_1d_qr( avg_imgr, Qr, Qz, qr, qz, inc_x0, new_mask)
A plot example:
plot1D( x= qr_1d['qr1'], y = qr_1d['0.0367'], logxy=True )
'''
qr_start , qr_end, qr_width, qr_num =Qr
qz_start, qz_end, qz_width , qz_num =Qz
qr_edge, qr_center = get_qedge(qr_start , qr_end, qr_width, qr_num,verbose=False )
qz_edge, qz_center = get_qedge( qz_start, qz_end, qz_width , qz_num,verbose=False )
#print ('The qr_edge is: %s\nThe qr_center is: %s'%(qr_edge, qr_center))
#print ('The qz_edge is: %s\nThe qz_center is: %s'%(qz_edge, qz_center))
label_array_qr = get_qmap_label( qr, qr_edge)
#qr_1d ={}
columns=[]
for i,qzc_ in enumerate(qz_center):
#print (i,qzc_)
label_array_qz = get_qmap_label( qz, qz_edge[i*2:2*i+2])
#print (qzc_, qz_edge[i*2:2*i+2])
label_array_qzr,qzc,qrc = get_qzrmap(label_array_qz, label_array_qr,qz_center, qr_center )
#print (np.unique(label_array_qzr ))
if mask is not None:
label_array_qzr *= mask
roi_pixel_num = np.sum( label_array_qzr, axis=0)
#print( label_array_qzr )
qr_ = qr *label_array_qzr
data_ = data*label_array_qzr
w = np.where(roi_pixel_num)
qr_ave = np.zeros_like( roi_pixel_num, dtype= float )[w]
data_ave = np.zeros_like( roi_pixel_num, dtype= float )[w]
qr_ave = (np.sum( qr_, axis=0))[w]/roi_pixel_num[w]
data_ave = (np.sum( data_, axis=0))[w]/roi_pixel_num[w]
qr_ave, data_ave = zip(* sorted( zip( * [ qr_ave[~np.isnan(qr_ave)] , data_ave[~np.isnan( data_ave)] ]) ) )
if i==0:
N_interp = len( qr_ave )
columns.append( ['qr'] )
#qr_1d[i]= qr_ave_intp
qr_ave_intp = np.linspace( np.min( qr_ave ), np.max( qr_ave ), N_interp)
data_ave = np.interp( qr_ave_intp, qr_ave, data_ave)
#qr_1d[i]= [qr_ave_intp, data_ave]
columns.append( ['qz%s=%s'%( i, str(round(qzc_,4)) )] )
if i==0:
df = np.hstack( [ (qr_ave_intp).reshape( N_interp,1) ,
data_ave.reshape( N_interp,1) ] )
else:
df = np.hstack( [ df,
data_ave.reshape( N_interp,1) ] )
df = DataFrame( df )
df.columns = np.concatenate( columns )
if save:
if path is None:
path = setup_pargs['path']
if uid is None:
uid = setup_pargs['uid']
filename = os.path.join(path, '%s_qr_1d.csv'% (uid) )
df.to_csv(filename)
if print_save_message:
print( 'The qr_1d is saved in %s with filename as %s_qr_1d.csv'%(path, uid))
return df
def get_t_qrc( FD, frame_edge, Qr, Qz, qr, qz, mask=None, path=None, uid=None, save=True, *argv,**kwargs):
'''Get t-dependent qr
Parameters
----------
FD: a compressed imgs series handler
frame_edge: list, the ROI frame regions, e.g., [ [0,100], [200,400] ]
mask: a image mask
Returns
---------
qrt_pds: dataframe, with columns as [qr, qz0_fra_from_beg1_to_end1, qz0_fra_from_beg2_to_end2, ...
qz1_fra_from_beg1_to_end1, qz1_fra_from_beg2_to_end2, ...
...
]
'''
Nt = len( frame_edge )
iqs = list( np.zeros( Nt ) )
qz_start, qz_end, qz_width , qz_num =Qz
qz_edge, qz_center = get_qedge( qz_start, qz_end, qz_width , qz_num, verbose=False )
#print('here')
#qr_1d = np.zeros( )
if uid is None:
uid = 'uid'
for i in range(Nt):
#str(round(qz_center[j], 4 )
t1,t2 = frame_edge[i]
avg_imgx = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1, plot_ = False )
qrti = cal_1d_qr( avg_imgx, Qr, Qz, qr, qz, mask = mask, save=False )
if i == 0:
qrt_pds = np.zeros( [len(qrti), 1 + Nt * qz_num ] )
columns = np.zeros( 1 + Nt * qz_num, dtype=object )
columns[0] = 'qr'
qrt_pds[:,0] = qrti['qr']
for j in range(qz_num):
coli = qrti.columns[1+j]
qrt_pds[:, 1 + i + Nt*j] = qrti[ coli ]
columns[ 1 + i + Nt*j ] = coli + '_fra_%s_to_%s'%( t1, t2 )
qrt_pds = DataFrame( qrt_pds )
qrt_pds.columns = columns
if save:
if path is None:
path = setup_pargs['path']
if uid is None:
uid = setup_pargs['uid']
filename = os.path.join(path, '%s_qrt_pds.csv'% (uid) )
qrt_pds.to_csv(filename)
print( 'The qr~time is saved in %s with filename as %s_qrt_pds.csv'%(path, uid))
return qrt_pds
def plot_qrt_pds( qrt_pds, frame_edge, qz_index = 0, uid = 'uid', path = '',fontsize=8, *argv,**kwargs):
'''Y.G. Jan 04, 2017
plot t-dependent qr
Parameters
----------
qrt_pds: dataframe, with columns as [qr, qz0_fra_from_beg1_to_end1, qz0_fra_from_beg2_to_end2, ...
qz1_fra_from_beg1_to_end1, qz1_fra_from_beg2_to_end2, ...
...
]
frame_edge: list, the ROI frame regions, e.g., [ [0,100], [200,400] ]
qz_index, if = integer, e.g. =0, only plot the qr~t for qz0
if None, plot all qzs
Returns
'''
fig,ax = plt.subplots(figsize=(8, 6))
cols = np.array( qrt_pds.columns )
Nt = len( frame_edge )
#num_qz = int( (len( cols ) -1 ) /Nt )
qr = qrt_pds['qr']
if qz_index is None:
r = range( 1, len(cols ) )
else:
r = range( 1 + qz_index*Nt, 1 + (1+qz_index) * Nt )
for i in r:
y = qrt_pds[ cols[i] ]
ax.semilogy(qr, y, label= cols[i], marker = markers[i], color=colors[i], ls='-')
#ax.set_xlabel("q in pixel")
ax.set_xlabel(r'$Q_r$' + r'($\AA^{-1}$)')
ax.set_ylabel("I(q)")
if 'xlim' in kwargs.keys():
ax.set_xlim( kwargs['xlim'] )
if 'ylim' in kwargs.keys():
ax.set_ylim( kwargs['ylim'] )
ax.legend(loc = 'best', fontsize=fontsize)
title = ax.set_title('%s_Iq_t'%uid)
title.set_y(1.01)
fp = path + '%s_Iq_t'%uid + '.png'
fig.savefig( fp, dpi=fig.dpi)
def plot_t_qrc( qr_1d, frame_edge, save=False, pargs=None,fontsize=8, *argv,**kwargs):
'''plot t-dependent qr
Parameters
----------
qr_1d: array, with shape as time length, frame_edge
frame_edge: list, the ROI frame regions, e.g., [ [0,100], [200,400] ]
save: save the plot
if save, all the following paramters are given in argv
{
'path':
'uid': }
Returns
'''
fig,ax = plt.subplots(figsize=(8, 6))
Nt = qr_1d.shape[1]
q=qr_1d[:,0]
for i in range( Nt-1 ):
t1,t2 = frame_edge[i]
ax.semilogy(q, qr_1d[:,i+1], 'o-', label="frame: %s--%s"%( t1,t2) )
#ax.set_xlabel("q in pixel")
ax.set_xlabel(r'$Q_r$' + r'($\AA^{-1}$)')
ax.set_ylabel("I(q)")
if 'xlim' in kwargs.keys():
ax.set_xlim( kwargs['xlim'] )
if 'ylim' in kwargs.keys():
ax.set_ylim( kwargs['ylim'] )
ax.legend(loc = 'best', fontsize=fontsize)
uid = pargs['uid']
title = ax.set_title('uid= %s--t~I(q)'%uid)
title.set_y(1.01)
if save:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
path = pargs['path']
uid = pargs['uid']
#fp = path + 'uid= %s--Iq~t-'%uid + CurTime + '.png'
fp = path + 'uid=%s--Iq-t-'%uid + '.png'
fig.savefig( fp, dpi=fig.dpi)
save_arrays( np.vstack( [q, np.array(iqs)]).T,
label= ['q_A-1']+ ['Fram-%s-%s'%(t[0],t[1]) for t in frame_edge],
filename='uid=%s-q-Iqt'%uid, path= path )
##########################################
###Functions for GiSAXS
##########################################
def make_gisaxs_grid( qr_w= 10, qz_w = 12, dim_r =100,dim_z=120):
''' Dec 16, 2015, Y.G.@CHX
'''
y, x = np.indices( [dim_z,dim_r] )
Nr = int(dim_r/qp_w)
Nz = int(dim_z/qz_w)
noqs = Nr*Nz
ind = 1
for i in range(0,Nr):
for j in range(0,Nz):
y[ qr_w*i: qr_w*(i+1), qz_w*j:qz_w*(j+1)]= ind
ind += 1
return y
###########################################
#for Q-map, convert pixel to Q
###########################################
def get_incident_angles( inc_x0, inc_y0, refl_x0, refl_y0, pixelsize=[75,75], Lsd=5.0):
'''
Dec 16, 2015, Y.G.@CHX
giving: incident beam center: bcenx,bceny
reflected beam on detector: rcenx, rceny
sample to detector distance: Lsd, in meters
pixelsize: 75 um for Eiger4M detector
get incident_angle (alphai), the title angle (phi)
'''
if Lsd>=1000:
Lsd = Lsd/1000.
px,py = pixelsize
phi = np.arctan2( (-refl_x0 + inc_x0)*px *10**(-6), (refl_y0 - inc_y0)*py *10**(-6) )
alphai = np.arctan2( (refl_y0 -inc_y0)*py *10**(-6), Lsd ) /2.
#thetai = np.arctan2( (rcenx - bcenx)*px *10**(-6), Lsd ) /2. #??
return alphai,phi
def get_reflected_angles(inc_x0, inc_y0, refl_x0, refl_y0, thetai=0.0,
pixelsize=[75,75], Lsd=5.0,dimx = 2070.,dimy=2167.):
''' Dec 16, 2015, Y.G.@CHX
giving: incident beam center: bcenx,bceny
reflected beam on detector: rcenx, rceny
sample to detector distance: Lsd, in mm
pixelsize: 75 um for Eiger4M detector
detector image size: dimx = 2070,dimy=2167 for Eiger4M detector
get reflected angle alphaf (outplane)
reflected angle thetaf (inplane )
'''
#if Lsd>=1000:#it should be something wrong and the unit should be meter
#convert Lsd from mm to m
if Lsd>=1000:
Lsd = Lsd/1000.
alphai, phi = get_incident_angles( inc_x0, inc_y0, refl_x0, refl_y0, pixelsize, Lsd)
print ('The incident_angle (alphai) is: %s'%(alphai* 180/np.pi))
px,py = pixelsize
y, x = np.indices( [int(dimy),int(dimx)] )
#alphaf = np.arctan2( (y-inc_y0)*py*10**(-6), Lsd )/2 - alphai
alphaf = np.arctan2( (y-inc_y0)*py*10**(-6), Lsd ) - alphai
thetaf = np.arctan2( (x-inc_x0)*px*10**(-6), Lsd )/2 - thetai
return alphaf,thetaf, alphai, phi
def convert_gisaxs_pixel_to_q( inc_x0, inc_y0, refl_x0, refl_y0,
pixelsize=[75,75], Lsd=5.0,dimx = 2070.,dimy=2167.,
thetai=0.0, lamda=1.0 ):
'''
Dec 16, 2015, Y.G.@CHX
giving: incident beam center: bcenx,bceny
reflected beam on detector: rcenx, rceny
sample to detector distance: Lsd, in meters
pixelsize: 75 um for Eiger4M detector
detector image size: dimx = 2070,dimy=2167 for Eiger4M detector
wavelength: angstron
get: q_parallel (qp), q_direction_z (qz)
'''
alphaf,thetaf,alphai, phi = get_reflected_angles( inc_x0, inc_y0, refl_x0, refl_y0, thetai, pixelsize, Lsd,dimx,dimy)
pref = 2*np.pi/lamda
qx = np.cos( alphaf)*np.cos( 2*thetaf) - np.cos( alphai )*np.cos( 2*thetai)
qy_ = np.cos( alphaf)*np.sin( 2*thetaf) - np.cos( alphai )*np.sin ( 2*thetai)
qz_ = np.sin(alphaf) + np.sin(alphai)
qy = qz_* np.sin( phi) + qy_*np.cos(phi)
qz = qz_* np.cos( phi) - qy_*np.sin(phi)
qr = np.sqrt( qx**2 + qy**2 )
return qx*pref , qy*pref , qr*pref , qz*pref
def get_qedge( qstart,qend,qwidth,noqs,verbose=True ):
''' July 18, 2017 Revised by Y.G.@CHX,
Add print info for noqs=1
Dec 16, 2015, Y.G.@CHX
DOCUMENT get_qedge( )
give qstart,qend,qwidth,noqs
return a qedge by giving the noqs, qstart,qend,qwidth.
a qcenter, which is center of each qedge
KEYWORD: None '''
import numpy as np
if noqs!=1:
spacing = (qend - qstart - noqs* qwidth )/(noqs-1) # spacing between rings
qedges = (roi.ring_edges(qstart,qwidth,spacing, noqs)).ravel()
qcenter = ( qedges[::2] + qedges[1::2] )/2
else:
spacing = 0
qedges = (roi.ring_edges(qstart,qwidth,spacing, noqs)).ravel()
#qedges = np.array( [qstart, qend] )
qcenter = [( qedges[1] + qedges[0] )/2]
if verbose:
print("Since noqs=1, the qend is actually defined by qstart + qwidth.")
return qedges, qcenter
def get_qedge2( qstart,qend,qwidth,noqs, ):
''' DOCUMENT make_qlist( )
give qstart,qend,qwidth,noqs
return a qedge by giving the noqs, qstart,qend,qwidth.
a qcenter, which is center of each qedge
KEYWORD: None '''
import numpy as np
qcenter = np.linspace(qstart,qend,noqs)
#print ('the qcenter is: %s'%qcenter )
qedge=np.zeros(2*noqs)
qedge[::2]= ( qcenter- (qwidth/2) ) #+1 #render even value
qedge[1::2]= ( qcenter+ qwidth/2) #render odd value
return qedge, qcenter
###########################################
#for plot Q-map
###########################################
def get_qmap_label( qmap, qedge ):
import numpy as np
'''
April 20, 2016, Y.G.@CHX
give a qmap and qedge to bin the qmap into a label array
'''
edges = np.atleast_2d(np.asarray(qedge)).ravel()
label_array = np.digitize(qmap.ravel(), edges, right=False)
label_array = np.int_(label_array)
label_array = (np.where(label_array % 2 != 0, label_array, 0) + 1) // 2
label_array = label_array.reshape( qmap.shape )
return label_array
def get_qzrmap(label_array_qz, label_array_qr, qz_center, qr_center ):
'''April 20, 2016, Y.G.@CHX, get qzrmap '''
qzmax = label_array_qz.max()
label_array_qr_ = np.zeros( label_array_qr.shape )
ind = np.where(label_array_qr!=0)
label_array_qr_[ind ] = label_array_qr[ ind ] + 1E4 #add some large number to qr
label_array_qzr = label_array_qz * label_array_qr_
#convert label_array_qzr to [1,2,3,...]
uqzr = np.unique( label_array_qzr )[1:]
uqz = np.unique( label_array_qz )[1:]
uqr = np.unique( label_array_qr )[1:]
#print (uqzr)
label_array_qzr_ = np.zeros_like( label_array_qzr )
newl = np.arange( 1, len(uqzr)+1)
qzc =list(qz_center) * len( uqr )
qrc= [ [qr_center[i]]*len( uqz ) for i in range(len( uqr )) ]
for i, label in enumerate(uqzr):
#print (i, label)
label_array_qzr_.ravel()[ np.where( label_array_qzr.ravel() == label)[0] ] = newl[i]
return np.int_(label_array_qzr_), np.array( qzc ), np.concatenate(np.array(qrc ))
def show_label_array_on_image(ax, image, label_array, cmap=None,norm=None, log_img=True,alpha=0.3,
imshow_cmap='gray', **kwargs): #norm=LogNorm(),
"""
This will plot the required ROI's(labeled array) on the image
Additional kwargs are passed through to `ax.imshow`.
If `vmin` is in kwargs, it is clipped to minimum of 0.5.
Parameters
----------
ax : Axes
The `Axes` object to add the artist too
image : array
The image array
label_array : array
Expected to be an unsigned integer array. 0 is background,
positive integers label region of interest
cmap : str or colormap, optional
Color map to use for plotting the label_array, defaults to 'None'
imshow_cmap : str or colormap, optional
Color map to use for plotting the image, defaults to 'gray'
norm : str, optional
Normalize scale data, defaults to 'Lognorm()'
Returns
-------
im : AxesImage
The artist added to the axes
im_label : AxesImage
The artist added to the axes
"""
ax.set_aspect('equal')
if log_img:
im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',norm=LogNorm(norm),**kwargs) #norm=norm,
else:
im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',norm=norm,**kwargs) #norm=norm,
im_label = mpl_plot.show_label_array(ax, label_array, cmap=cmap, norm=norm, alpha=alpha,
**kwargs) # norm=norm,
return im, im_label
def show_qz(qz):
'''Dec 16, 2015, Y.G.@CHX
plot qz mape
'''
fig, ax = plt.subplots()
im=ax.imshow(qz, origin='lower' ,cmap='viridis',vmin=qz.min(),vmax= qz.max() )
fig.colorbar(im)
ax.set_title( 'Q-z')
#plt.show()
def show_qr(qr):
'''Dec 16, 2015, Y.G.@CHX
plot qr mape
'''
fig, ax = plt.subplots()
im=ax.imshow(qr, origin='lower' ,cmap='viridis',vmin=qr.min(),vmax= qr.max() )
fig.colorbar(im)
ax.set_title( 'Q-r')
#plt.show()
def show_alphaf(alphaf,):
'''Dec 16, 2015, Y.G.@CHX
plot alphaf mape
'''
fig, ax = plt.subplots()
im=ax.imshow(alphaf*180/np.pi, origin='lower' ,cmap='viridis',vmin=-1,vmax= 1.5 )
#im=ax.imshow(alphaf, origin='lower' ,cmap='viridis',norm= LogNorm(vmin=0.0001,vmax=2.00))
fig.colorbar(im)
ax.set_title( 'alphaf')
#plt.show()
def get_1d_qr( data, Qr,Qz, qr, qz, inc_x0, mask=None, show_roi=True,
ticks=None, alpha=0.3, loglog=False, save=True, setup_pargs=None ):
'''Dec 16, 2015, Y.G.@CHX
plot one-d of I(q) as a function of qr for different qz
data: a dataframe
Qr: info for qr, = qr_start , qr_end, qr_width, qr_num
Qz: info for qz, = qz_start, qz_end, qz_width , qz_num
qr: qr-map
qz: qz-map
inc_x0: x-center of incident beam
mask: a mask for qr-1d integration
show_roi: boolean, if ture, show the interest ROI
ticks: ticks for the plot, = zticks, zticks_label, rticks, rticks_label
alpha: transparency of ROI
loglog: if True, plot in log-log scale
setup_pargs: gives path, filename...
Return: qr_1d, a dataframe, with columns as qr1, qz1 (float value), qr2, qz2,....
Plot 1D cureve as a function of Qr for each Qz
Examples:
#to make two-qz, from 0.018 to 0.046, width as 0.008,
qz_width = 0.008
qz_start = 0.018 + qz_width/2
qz_end = 0.046 - qz_width/2
qz_num= 2
#to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012
qr_width = 0.1-0.02
qr_start = 0.02 + qr_width /2
qr_end = 0.01 - qr_width /2
qr_num = 1
Qr = [qr_start , qr_end, qr_width, qr_num]
Qz= [qz_start, qz_end, qz_width , qz_num ]
new_mask[ :, 1020:1045] =0
ticks = show_qzr_map( qr,qz, inc_x0, data = avg_imgmr, Nzline=10, Nrline=10 )
qx, qy, qr, qz = convert_gisaxs_pixel_to_q( inc_x0, inc_y0,refl_x0,refl_y0, lamda=lamda, Lsd=Lsd )
qr_1d = get_1d_qr( avg_imgr, Qr, Qz, qr, qz, inc_x0, new_mask, True, ticks, .8)
A plot example:
plot1D( x= qr_1d['qr1'], y = qr_1d['0.0367'], logxy=True )
'''
qr_start , qr_end, qr_width, qr_num =Qr
qz_start, qz_end, qz_width , qz_num =Qz
qr_edge, qr_center = get_qedge(qr_start , qr_end, qr_width, qr_num )
qz_edge, qz_center = get_qedge( qz_start, qz_end, qz_width , qz_num )
print ('The qr_edge is: %s\nThe qr_center is: %s'%(qr_edge, qr_center))
print ('The qz_edge is: %s\nThe qz_center is: %s'%(qz_edge, qz_center))
label_array_qr = get_qmap_label( qr, qr_edge)
if show_roi:
label_array_qz0 = get_qmap_label( qz , qz_edge)
label_array_qzr0,qzc0,qrc0 = get_qzrmap(label_array_qz0, label_array_qr,qz_center, qr_center )
if mask is not None:label_array_qzr0 *= mask
#data_ = data*label_array_qzr0
show_qzr_roi( data,label_array_qzr0, inc_x0, ticks, alpha)
fig, ax = plt.subplots()
qr_1d ={}
columns=[]
for i,qzc_ in enumerate(qz_center):
#print (i,qzc_)
label_array_qz = get_qmap_label( qz, qz_edge[i*2:2*i+2])
#print (qzc_, qz_edge[i*2:2*i+2])
label_array_qzr,qzc,qrc = get_qzrmap(label_array_qz, label_array_qr,qz_center, qr_center )
#print (np.unique(label_array_qzr ))
if mask is not None:label_array_qzr *= mask
roi_pixel_num = np.sum( label_array_qzr, axis=0)
qr_ = qr *label_array_qzr
data_ = data*label_array_qzr
qr_ave = np.sum( qr_, axis=0)/roi_pixel_num
data_ave = np.sum( data_, axis=0)/roi_pixel_num
qr_ave,data_ave = zip(* sorted( zip( * [ qr_ave[~np.isnan(qr_ave)] , data_ave[~np.isnan( data_ave)] ]) ) )
if i==0:
N_interp = len( qr_ave )
qr_ave_intp = np.linspace( np.min( qr_ave ), np.max( qr_ave ), N_interp)
data_ave = np.interp( qr_ave_intp, qr_ave, data_ave)
qr_1d[i]= [qr_ave_intp, data_ave]
columns.append( ['qr%s'%i, str(round(qzc_,4))] )
if loglog:
ax.loglog(qr_ave_intp, data_ave, '--o', label= 'qz= %f'%qzc_, markersize=1)
else:
ax.plot( qr_ave_intp, data_ave, '--o', label= 'qz= %f'%qzc_)
if i==0:
df = np.hstack( [ (qr_ave_intp).reshape( N_interp,1) ,
data_ave.reshape( N_interp,1) ] )
else:
df = np.hstack( [ df, (qr_ave_intp).reshape( N_interp,1) ,
data_ave.reshape( N_interp,1) ] )
#ax.set_xlabel( r'$q_r$', fontsize=15)
ax.set_xlabel(r'$q_r$'r'($\AA^{-1}$)', fontsize=18)
ax.set_ylabel('$Intensity (a.u.)$', fontsize=18)
ax.set_yscale('log')
#ax.set_xscale('log')
ax.set_xlim( qr.max(),qr.min() )
ax.legend(loc='best')
df = DataFrame( df )
df.columns = np.concatenate( columns )
if save:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
path = setup_pargs['path']
uid = setup_pargs['uid']
#filename = os.path.join(path, 'qr_1d-%s-%s.csv' % (uid,CurTime))
filename = os.path.join(path, 'uid=%s--qr_1d.csv'% (uid) )
df.to_csv(filename)
print( 'The qr_1d is saved in %s with filename as uid=%s--qr_1d.csv'%(path, uid))
#fp = path + 'Uid= %s--Circular Average'%uid + CurTime + '.png'
fp = path + 'uid=%s--qr_1d-'%uid + '.png'
fig.savefig( fp, dpi=fig.dpi)
#plt.show()
return df
def plot_qr_1d_with_ROI( qr_1d, qr_center, loglog=False, save=True, uid='uid', path='' ):
'''Dec 16, 2015, Y.G.@CHX
plot one-d of I(q) as a function of qr with ROI
qr_1d: a dataframe for qr_1d
qr_center: the center of qr
loglog: if True, plot in log-log scale
Return:
Plot 1D cureve with ROI
A plot example:
plot_1d_qr_with_ROI( df, qr_center, loglog=False, save=True )
'''
fig, ax = plt.subplots()
Ncol = len( qr_1d.columns )
Nqr = Ncol%2
qz_center = qr_1d.columns[1::1]#qr_1d.columns[1::2]
Nqz = len(qz_center)
for i,qzc_ in enumerate(qz_center):
x= qr_1d[ qr_1d.columns[0] ]
y= qr_1d[qzc_]
if loglog:
ax.loglog(x,y, '--o', label= 'qz= %s'%qzc_, markersize=1)
else:
ax.plot( x,y, '--o', label= 'qz= %s'%qzc_)
for qrc in qr_center:
ax.axvline( qrc )#, linewidth = 5 )
#ax.set_xlabel( r'$q_r$', fontsize=15)
ax.set_xlabel(r'$q_r$'r'($\AA^{-1}$)', fontsize=18)
ax.set_ylabel('$Intensity (a.u.)$', fontsize=18)
ax.set_yscale('log')
#ax.set_xscale('log')
ax.set_xlim( x.max(), x.min() )
ax.legend(loc='best')
ax.set_title( '%s_Qr_ROI'%uid)
if save:
fp = path + '%s_Qr_ROI'%uid + '.png'
fig.savefig( fp, dpi=fig.dpi)
#plt.show()
def interp_zeros( data ):
from scipy.interpolate import interp1d
gf = data.ravel()
indice, = gf.nonzero()
start, stop = indice[0], indice[-1]+1
dx,dy = data.shape
x=np.arange( dx*dy )
f = interp1d(x[indice], gf[indice])
gf[start:stop] = f(x[start:stop])
return gf.reshape([dx,dy])
def get_qr_tick_label( qr, label_array_qr, inc_x0, interp=True):
'''
Dec 16, 2015, Y.G.@CHX
get zticks,zticks_label
Parameters:
qr: 2-D array, qr of a gisaxs image (data)
label_array_qr: a labelled array of qr map, get by:
label_array_qr = get_qmap_label( qr, qz_edge)
Options:
interp: if True, make qz label round by np.round(data, 2)
inc_x0: x-center of incident beam
Return:
rticks: list, r-tick positions in unit of pixel
rticks_label: list, r-tick positions in unit of real space
Examples:
rticks,rticks_label = get_qr_tick_label( qr, label_array_qr)
'''
rticks =[]
rticks_label = []
num = len( np.unique( label_array_qr ) )
for i in range( 1, num ):
ind = np.sort( np.where( label_array_qr==i )[1] )
#tick = round( qr[label_array_qr==i].mean(),2)
tick = qr[label_array_qr==i].mean()
if ind[0] < inc_x0 and ind[-1]>inc_x0: #
#mean1 = int( (ind[np.where(ind < inc_x0)[0]]).mean() )
#mean2 = int( (ind[np.where(ind > inc_x0)[0]]).mean() )
mean1 = int( (ind[np.where(ind < inc_x0)[0]])[0] )
mean2 = int( (ind[np.where(ind > inc_x0)[0]])[0] )
rticks.append( mean1)
rticks.append(mean2)
rticks_label.append( tick )
rticks_label.append( tick )
else:
#print('here')
#mean = int( ind.mean() )
mean = int( ind[0] )
#mean = int( (ind[0] +ind[-1])/2 )
rticks.append(mean)
rticks_label.append( tick )
#print (rticks)
#print (mean, tick)
n= len(rticks)
for i, rt in enumerate( rticks):
if rt==0:
rticks[i] = n- i
if interp:
rticks = np.array(rticks)
rticks_label = np.array( rticks_label)
try:
w= np.where( rticks <= inc_x0)[0]
rticks1 = np.int_(np.interp( np.round( rticks_label[w], 3), rticks_label[w], rticks[w] ))
rticks_label1 = np.round( rticks_label[w], 3)
except:
rticks_label1 = []
try:
w= np.where( rticks > inc_x0)[0]
rticks2 = np.int_(np.interp( np.round( rticks_label[w], 3), rticks_label[w], rticks[w] ))
rticks = np.append( rticks1, rticks2)
rticks_label2 = np.round( rticks_label[w], 3)
except:
rticks_label2 = []
rticks_label = np.append( rticks_label1, rticks_label2)
return rticks, rticks_label
def get_qz_tick_label( qz, label_array_qz,interp=True):
'''
Dec 16, 2015, Y.G.@CHX
get zticks,zticks_label
Parameters:
qz: 2-D array, qz of a gisaxs image (data)
label_array_qz: a labelled array of qz map, get by:
label_array_qz = get_qmap_label( qz, qz_edge)
interp: if True, make qz label round by np.round(data, 2)
Return:
zticks: list, z-tick positions in unit of pixel
zticks_label: list, z-tick positions in unit of real space
Examples:
zticks,zticks_label = get_qz_tick_label( qz, label_array_qz)
'''
num = len( np.unique( label_array_qz ) )
#zticks = np.array( [ int( np.where( label_array_qz==i )[0].mean() ) for i in range( 1,num ) ])
zticks = np.array( [ int( np.where( label_array_qz==i )[0][0] ) for i in range( 1,num ) ])
#zticks_label = np.array( [ round( qz[label_array_qz==i].mean(),4) for i in range( 1, num ) ])
#zticks_label = np.array( [ qz[label_array_qz==i].mean() for i in range( 1, num ) ])
zticks_label = np.array( [ qz[label_array_qz==i][0] for i in range( 1, num ) ])
if interp:
zticks = np.int_(np.interp( np.round( zticks_label, 3), zticks_label, zticks ))
zticks_label = np.round( zticks_label, 3)
return zticks,zticks_label
def get_qzr_map( qr, qz, inc_x0, Nzline=10,Nrline=10, interp = True,
return_qrz_label= True, *argv,**kwargs):
'''
Dec 31, 2016, Y.G.@CHX
Calculate a qzr map of a gisaxs image (data) without plot
Parameters:
qr: 2-D array, qr of a gisaxs image (data)
qz: 2-D array, qz of a gisaxs image (data)
inc_x0: the incident beam center x
Options:
Nzline: int, z-line number
Nrline: int, r-line number
Return:
if return_qrz_label
zticks: list, z-tick positions in unit of pixel
zticks_label: list, z-tick positions in unit of real space
rticks: list, r-tick positions in unit of pixel
rticks_label: list, r-tick positions in unit of real space
else: return the additional two below
label_array_qr: qr label array with the same shpae as gisaxs image
label_array_qz: qz label array with the same shpae as gisaxs image
Examples:
ticks = get_qzr_map( qr, qz, inc_x0 )
'''
qr_start, qr_end, qr_num = qr.min(),qr.max(), Nrline
qz_start, qz_end, qz_num = qz.min(),qz.max(), Nzline
qr_edge, qr_center = get_qedge(qr_start , qr_end, ( qr_end- qr_start)/(qr_num+100), qr_num )
qz_edge, qz_center = get_qedge( qz_start, qz_end, (qz_end - qz_start)/(qz_num+100 ) , qz_num )
label_array_qz = get_qmap_label( qz, qz_edge)
label_array_qr = get_qmap_label( qr, qr_edge)
labels_qz, indices_qz = roi.extract_label_indices( label_array_qz )
labels_qr, indices_qr = roi.extract_label_indices( label_array_qr )
num_qz = len(np.unique( labels_qz ))
num_qr = len(np.unique( labels_qr ))
zticks,zticks_label = get_qz_tick_label(qz,label_array_qz)
#rticks,rticks_label = get_qr_tick_label(label_array_qr,inc_x0)
try:
rticks,rticks_label = zip(*np.sort( zip( *get_qr_tick_label( qr, label_array_qr, inc_x0,interp=interp) )) )
except:
rticks,rticks_label = zip(* sorted( zip( *get_qr_tick_label( qr, label_array_qr, inc_x0,interp=interp) )) )
#stride = int(len(zticks)/10)
ticks=[ zticks,zticks_label,rticks,rticks_label ]
if return_qrz_label:
return zticks,zticks_label,rticks,rticks_label, label_array_qr, label_array_qz
else:
return zticks,zticks_label,rticks,rticks_label
def plot_qzr_map( qr, qz, inc_x0, ticks = None, data=None,
uid='uid', path ='', *argv,**kwargs):
'''
Dec 31, 2016, Y.G.@CHX
plot a qzr map of a gisaxs image (data)
Parameters:
qr: 2-D array, qr of a gisaxs image (data)
qz: 2-D array, qz of a gisaxs image (data)
inc_x0: the incident beam center x
ticks = [ zticks,zticks_label,rticks,rticks_label ], use ticks = get_qzr_map( qr, qz, inc_x0 ) to get
zticks: list, z-tick positions in unit of pixel
zticks_label: list, z-tick positions in unit of real space
rticks: list, r-tick positions in unit of pixel
rticks_label: list, r-tick positions in unit of real space
label_array_qr: qr label array with the same shpae as gisaxs image
label_array_qz: qz label array with the same shpae as gisaxs image
inc_x0: the incident beam center x
Options:
data: 2-D array, a gisaxs image, if None, =qr+qz
Nzline: int, z-line number
Nrline: int, r-line number
Return:
None
Examples:
ticks = plot_qzr_map( ticks, inc_x0, data = None, Nzline=10, Nrline= 10 )
ticks = plot_qzr_map( ticks, inc_x0, data = avg_imgmr, Nzline=10, Nrline=10 )
'''
import matplotlib.pyplot as plt
import copy
import matplotlib.cm as mcm
if ticks is None:
zticks,zticks_label,rticks,rticks_label, label_array_qr, label_array_qz = get_qzr_map(
qr, qz, inc_x0, return_qrz_label=True )
else:
zticks,zticks_label,rticks,rticks_label, label_array_qr, label_array_qz = ticks
cmap='viridis'
_cmap = copy.copy((mcm.get_cmap(cmap)))
_cmap.set_under('w', 0)
fig, ax = plt.subplots( )
if data is None:
data=qr+qz
im = ax.imshow(data, cmap='viridis',origin='lower')
else:
im = ax.imshow(data, cmap='viridis',origin='lower', norm= LogNorm(vmin=0.001, vmax=1e1))
imr=ax.imshow(label_array_qr, origin='lower' ,cmap='viridis', vmin=0.5,vmax= None )#,interpolation='nearest',)
imz=ax.imshow(label_array_qz, origin='lower' ,cmap='viridis', vmin=0.5,vmax= None )#,interpolation='nearest',)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
ax.set_xlabel(r'$q_r$', fontsize=18)
ax.set_ylabel(r'$q_z$',fontsize=18)
stride = 1
ax.set_yticks( zticks[::stride] )
yticks = zticks_label[::stride]
ax.set_yticklabels(yticks, fontsize=7)
#stride = int(len(rticks)/10)
stride = 1
ax.set_xticks( rticks[::stride] )
xticks = rticks_label[::stride]
ax.set_xticklabels(xticks, fontsize=7)
ax.set_title( '%s_Qr_Qz_Map'%uid, y=1.03,fontsize=18)
fp = path + '%s_Qr_Qz_Map'%(uid) + '.png'
fig.savefig( fp, dpi=fig.dpi)
def show_qzr_map( qr, qz, inc_x0, data=None, Nzline=10,Nrline=10 ,
interp=True, *argv,**kwargs):
'''
Dec 16, 2015, Y.G.@CHX
plot a qzr map of a gisaxs image (data)
Parameters:
qr: 2-D array, qr of a gisaxs image (data)
qz: 2-D array, qz of a gisaxs image (data)
inc_x0: the incident beam center x
Options:
data: 2-D array, a gisaxs image, if None, =qr+qz
Nzline: int, z-line number
Nrline: int, r-line number
Return:
zticks: list, z-tick positions in unit of pixel
zticks_label: list, z-tick positions in unit of real space
rticks: list, r-tick positions in unit of pixel
rticks_label: list, r-tick positions in unit of real space
Examples:
ticks = show_qzr_map( qr, qz, inc_x0, data = None, Nzline=10, Nrline= 10 )
ticks = show_qzr_map( qr,qz, inc_x0, data = avg_imgmr, Nzline=10, Nrline=10 )
'''
import matplotlib.pyplot as plt
import copy
import matplotlib.cm as mcm
cmap='viridis'
_cmap = copy.copy((mcm.get_cmap(cmap)))
_cmap.set_under('w', 0)
qr_start, qr_end, qr_num = qr.min(),qr.max(), Nrline
qz_start, qz_end, qz_num = qz.min(),qz.max(), Nzline
qr_edge, qr_center = get_qedge(qr_start , qr_end, ( qr_end- qr_start)/(qr_num+100), qr_num )
qz_edge, qz_center = get_qedge( qz_start, qz_end, (qz_end - qz_start)/(qz_num+100 ) , qz_num )
label_array_qz = get_qmap_label( qz, qz_edge)
label_array_qr = get_qmap_label( qr, qr_edge)
labels_qz, indices_qz = roi.extract_label_indices( label_array_qz )
labels_qr, indices_qr = roi.extract_label_indices( label_array_qr )
num_qz = len(np.unique( labels_qz ))
num_qr = len(np.unique( labels_qr ))
fig, ax = plt.subplots( figsize=(8,14) )
if data is None:
data=qr+qz
im = ax.imshow(data, cmap='viridis',origin='lower')
else:
im = ax.imshow(data, cmap='viridis',origin='lower', norm= LogNorm(vmin=0.001, vmax=1e1))
imr=ax.imshow(label_array_qr, origin='lower' ,cmap='viridis', vmin=0.5,vmax= None )#,interpolation='nearest',)
imz=ax.imshow(label_array_qz, origin='lower' ,cmap='viridis', vmin=0.5,vmax= None )#,interpolation='nearest',)
#caxr = fig.add_axes([0.88, 0.2, 0.03, .7]) #x,y, width, heigth
#cba = fig.colorbar(im, cax=caxr )
#cba = fig.colorbar(im, fraction=0.046, pad=0.04)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
#fig.colorbar(im, shrink =.82)
#cba = fig.colorbar(im)
ax.set_xlabel(r'$q_r$', fontsize=18)
ax.set_ylabel(r'$q_z$',fontsize=18)
zticks,zticks_label = get_qz_tick_label(qz,label_array_qz)
#rticks,rticks_label = get_qr_tick_label(label_array_qr,inc_x0)
try:
rticks,rticks_label = zip(*np.sort( zip( *get_qr_tick_label( qr, label_array_qr, inc_x0,interp=interp) )) )
except:
rticks,rticks_label = zip(* sorted( zip( *get_qr_tick_label( qr, label_array_qr, inc_x0,interp=interp) )) )
#stride = int(len(zticks)/10)
stride = 1
ax.set_yticks( zticks[::stride] )
yticks = zticks_label[::stride]
ax.set_yticklabels(yticks, fontsize=7)
#stride = int(len(rticks)/10)
stride = 1
ax.set_xticks( rticks[::stride] )
xticks = rticks_label[::stride]
ax.set_xticklabels(xticks, fontsize=7)
if 'uid' in kwargs:
uid=kwargs['uid']
else:
uid='uid'
ax.set_title( '%s_Qr_Qz_Map'%uid, y=1.03,fontsize=18)
save=False
if 'save' in kwargs:
save=kwargs['save']
if save:
path=kwargs['path']
fp = path + '%s_Qr_Qz_Map'%(uid) + '.png'
fig.savefig( fp, dpi=fig.dpi)
#plt.show()
return zticks,zticks_label,rticks,rticks_label
def show_qzr_roi( data, rois, inc_x0, ticks, alpha=0.3, uid='uid', path = '', save=False, return_fig=False, *argv,**kwargs):
'''
Dec 16, 2015, Y.G.@CHX
plot a qzr map of a gisaxs image with rois( a label array)
Parameters:
data: 2-D array, a gisaxs image
rois: 2-D array, a label array
inc_x0: the incident beam center x
ticks: zticks, zticks_label, rticks, rticks_label = ticks
zticks: list, z-tick positions in unit of pixel
zticks_label: list, z-tick positions in unit of real space
rticks: list, r-tick positions in unit of pixel
rticks_label: list, r-tick positions in unit of real space
Options:
alpha: transparency of the label array on top of data
Return:
a plot of a qzr map of a gisaxs image with rois( a label array)
Examples:
show_qzr_roi( avg_imgr, box_maskr, inc_x0, ticks)
'''
zticks, zticks_label, rticks, rticks_label = ticks
avg_imgr, box_maskr = data, rois
num_qzr = len(np.unique( box_maskr)) -1
#fig, ax = plt.subplots(figsize=(8,12))
fig, ax = plt.subplots(figsize=(8,8))
ax.set_title("%s_ROI--Labeled Array on Data"%uid)
im,im_label = show_label_array_on_image(ax, avg_imgr, box_maskr, imshow_cmap='viridis',
cmap='Paired', alpha=alpha,
vmin=0.01, vmax=30. , origin="lower")
for i in range( 1, num_qzr+1 ):
ind = np.where( box_maskr == i)[1]
indz = np.where( box_maskr == i)[0]
c = '%i'%i
y_val = int( indz.mean() )
#print (ind[0], ind[-1], inc_x0 )
M,m = max( ind ), min( ind )
#if ind[0] < inc_x0 and ind[-1]>inc_x0:
if m < inc_x0 and M > inc_x0:
x_val1 = int( (ind[np.where(ind < inc_x0)[0]]).mean() )
x_val2 = int( (ind[np.where(ind > inc_x0)[0]]).mean() )
ax.text(x_val1, y_val, c, va='center', ha='center')
ax.text(x_val2, y_val, c, va='center', ha='center')
else:
x_val = int( ind.mean() )
#print (xval, y)
ax.text(x_val, y_val, c, va='center', ha='center')
#print (x_val1,x_val2)
#stride = int(len(zticks)/3)
stride = 1
ax.set_yticks( zticks[::stride] )
yticks = zticks_label[::stride]
ax.set_yticklabels(yticks, fontsize=9)
#stride = int(len(rticks)/3)
stride = 1
ax.set_xticks( rticks[::stride] )
xticks = rticks_label[::stride]
ax.set_xticklabels(xticks, fontsize=9)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
ax.set_xlabel(r'$q_r$', fontsize=22)
ax.set_ylabel(r'$q_z$',fontsize=22)
fp = path + '%s_ROI_on_Image'%(uid) + '.png'
if save:
fig.savefig( fp, dpi=fig.dpi)
if return_fig:
return fig, ax
#plot g2 results
def plot_gisaxs_g2( g2, taus, res_pargs=None, one_plot = False, *argv,**kwargs):
'''Dec 16, 2015, Y.G.@CHX
plot g2 results,
g2: one-time correlation function
taus: the time delays
res_pargs, a dict, can contains
uid/path/qr_center/qz_center/
one_plot: if True, show all qz in one plot
kwargs: can contains
vlim: [vmin,vmax]: for the plot limit of y, the y-limit will be [vmin * min(y), vmx*max(y)]
ylim/xlim: the limit of y and x
e.g.
plot_gisaxs_g2( g2b, taus= np.arange( g2b.shape[0]) *timeperframe, q_ring_center = q_ring_center, vlim=[.99, 1.01] )
'''
if res_pargs is not None:
uid = res_pargs['uid']
path = res_pargs['path']
qz_center = res_pargs[ 'qz_center']
num_qz = len( qz_center)
qr_center = res_pargs[ 'qr_center']
num_qr = len( qr_center)
else:
if 'uid' in kwargs.keys():
uid = kwargs['uid']
else:
uid = 'uid'
if 'path' in kwargs.keys():
path = kwargs['path']
else:
path = ''
if 'qz_center' in kwargs.keys():
qz_center = kwargs[ 'qz_center']
num_qz = len( qz_center)
else:
print( 'Please give qz_center')
if 'qr_center' in kwargs.keys():
qr_center = kwargs[ 'qr_center']
num_qr = len( qr_center)
else:
print( 'Please give qr_center')
if not one_plot:
for qz_ind in range(num_qz):
fig = plt.figure(figsize=(10, 12))
#fig = plt.figure()
title_qz = ' Qz= %.5f '%( qz_center[qz_ind]) + r'$\AA^{-1}$'
plt.title('uid= %s:--->'%uid + title_qz,fontsize=20, y =1.1)
#print (qz_ind,title_qz)
if num_qz!=1:
if num_qr!=1:
plt.axis('off')
sx = int(round(np.sqrt(num_qr)) )
if num_qr%sx == 0:
sy = int(num_qr/sx)
else:
sy=int(num_qr/sx+1)
for sn in range(num_qr):
ax = fig.add_subplot(sx,sy,sn+1 )
ax.set_ylabel("g2")
ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16)
title_qr = " Qr= " + '%.5f '%( qr_center[sn]) + r'$\AA^{-1}$'
if num_qz==1:
title = 'uid= %s:--->'%uid + title_qz + '__' + title_qr
else:
title = title_qr
ax.set_title( title )
y=g2[:, sn + qz_ind * num_qr]
ax.semilogx(taus, y, '-o', markersize=6)
if 'ylim' in kwargs:
ax.set_ylim( kwargs['ylim'])
elif 'vlim' in kwargs:
vmin, vmax =kwargs['vlim']
ax.set_ylim([min(y)*vmin, max(y[1:])*vmax ])
else:
pass
if 'xlim' in kwargs:
ax.set_xlim( kwargs['xlim'])
fp = path + 'uid=%s--g2-qz=%s'%(uid,qz_center[qz_ind]) + '.png'
fig.savefig( fp, dpi=fig.dpi)
fig.tight_layout()
#plt.show()
else:
if num_qz==1:
if num_qr==1:
fig = plt.figure(figsize=(8,8))
else:
fig = plt.figure(figsize=(10, 12))
else:
fig = plt.figure(figsize=(10, 12))
plt.title('uid= %s'%uid,fontsize=20, y =1.05)
if num_qz!=1:
if num_qr!=1:
plt.axis('off')
if num_qz==1:
if num_qr!=1:
plt.axis('off')
sx = int(round(np.sqrt(num_qr)) )
if num_qr%sx == 0:
sy = int(num_qr/sx)
else:
sy=int(num_qr/sx+1)
for sn in range(num_qr):
ax = fig.add_subplot(sx,sy,sn+1 )
ax.set_ylabel("g2")
ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16)
#title_qr = " Qr= " + '%.5f '%( qr_center[sn]) + r'$\AA^{-1}$'
title_qr = " Qr= " + '%.5s '%( qr_center[sn]) + r'$\AA^{-1}$'
title = title_qr
ax.set_title( title )
for qz_ind in range(num_qz):
y=g2[:, sn + qz_ind * num_qr]
if sn ==0:
title_qz = ' Qz= %.5f '%( qz_center[qz_ind]) + r'$\AA^{-1}$'
ax.semilogx(taus, y, '-o', markersize=6, label = title_qz )
else:
ax.semilogx(taus, y, '-o', markersize=6, label='' )
if 'ylim' in kwargs:
ax.set_ylim( kwargs['ylim'])
elif 'vlim' in kwargs:
vmin, vmax =kwargs['vlim']
ax.set_ylim([min(y)*vmin, max(y[1:])*vmax ])
else:
pass
if 'xlim' in kwargs:
ax.set_xlim( kwargs['xlim'])
if sn ==0:
ax.legend(loc='best', fontsize = 6)
fp = path + 'uid=%s--g2'%(uid) + '.png'
fig.savefig( fp, dpi=fig.dpi)
fig.tight_layout()
#plt.show()
#plot g2 results
def plot_gisaxs_two_g2( g2, taus, g2b, tausb,res_pargs=None,one_plot=False, *argv,**kwargs):
'''Dec 16, 2015, Y.G.@CHX
plot g2 results,
g2: one-time correlation function from a multi-tau method
g2b: another g2 from a two-time method
taus: the time delays
kwargs: can contains
vlim: [vmin,vmax]: for the plot limit of y, the y-limit will be [vmin * min(y), vmx*max(y)]
ylim/xlim: the limit of y and x
e.g.
plot_saxs_g2( g2b, taus= np.arange( g2b.shape[0]) *timeperframe, q_ring_center = q_ring_center, vlim=[.99, 1.01] )
'''
if res_pargs is not None:
uid = res_pargs['uid']
path = res_pargs['path']
qz_center = res_pargs[ 'qz_center']
num_qz = len( qz_center)
qr_center = res_pargs[ 'qr_center']
num_qr = len( qr_center)
else:
if 'uid' in kwargs.keys():
uid = kwargs['uid']
else:
uid = 'uid'
if 'path' in kwargs.keys():
path = kwargs['path']
else:
path = ''
if 'qz_center' in kwargs.keys():
qz_center = kwargs[ 'qz_center']
num_qz = len( qz_center)
else:
print( 'Please give qz_center')
if 'qr_center' in kwargs.keys():
qr_center = kwargs[ 'qr_center']
num_qr = len( qr_center)
else:
print( 'Please give qr_center')
if not one_plot:
for qz_ind in range(num_qz):
fig = plt.figure(figsize=(12, 10))
#fig = plt.figure()
title_qz = ' Qz= %.5f '%( qz_center[qz_ind]) + r'$\AA^{-1}$'
plt.title('uid= %s:--->'%uid + title_qz,fontsize=20, y =1.1)
#print (qz_ind,title_qz)
if num_qz!=1:plt.axis('off')
sx = int(round(np.sqrt(num_qr)) )
if num_qr%sx == 0:
sy = int(num_qr/sx)
else:
sy=int(num_qr/sx+1)
for sn in range(num_qr):
ax = fig.add_subplot(sx,sy,sn+1 )
ax.set_ylabel("g2")
ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16)
title_qr = " Qr= " + '%.5f '%( qr_center[sn]) + r'$\AA^{-1}$'
if num_qz==1:
title = 'uid= %s:--->'%uid + title_qz + '__' + title_qr
else:
title = title_qr
ax.set_title( title )
y=g2b[:, sn + qz_ind * num_qr]
ax.semilogx( tausb, y, '--r', markersize=6,label= 'by-two-time')
#y2=g2[:, sn]
y2=g2[:, sn + qz_ind * num_qr]
ax.semilogx(taus, y2, 'o', markersize=6, label= 'by-multi-tau')
if sn + qz_ind * num_qr==0:
ax.legend(loc='best')
if 'ylim' in kwargs:
ax.set_ylim( kwargs['ylim'])
elif 'vlim' in kwargs:
vmin, vmax =kwargs['vlim']
ax.set_ylim([min(y)*vmin, max(y[1:])*vmax ])
else:
pass
if 'xlim' in kwargs:
ax.set_xlim( kwargs['xlim'])
fp = path + 'uid=%s--two-g2-qz=%s'%(uid,qz_center[qz_ind]) + '.png'
fig.savefig( fp, dpi=fig.dpi)
fig.tight_layout()
#plt.show()
else:
fig = plt.figure(figsize=(12, 10))
plt.title('uid= %s'%uid,fontsize=20, y =1.05)
if num_qz!=1:
if num_qr!=1:
plt.axis('off')
if num_qz==1:
if num_qr!=1:
plt.axis('off')
sx = int(round(np.sqrt(num_qr)) )
if num_qr%sx == 0:
sy = int(num_qr/sx)
else:
sy=int(num_qr/sx+1)
for sn in range(num_qr):
ax = fig.add_subplot(sx,sy,sn+1 )
ax.set_ylabel("g2")
ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16)
title_qr = " Qr= " + '%.5s '%( qr_center[sn]) + r'$\AA^{-1}$'
#title_qr = " Qr= " + '%.5f '%( qr_center[sn]) + r'$\AA^{-1}$'
title = title_qr
ax.set_title( title )
for qz_ind in range(num_qz):
y=g2b[:, sn + qz_ind * num_qr]
y2=g2[:, sn + qz_ind * num_qr]
title_qz = ' Qz= %.5f '%( qz_center[qz_ind]) + r'$\AA^{-1}$'
label1 = ''
label2 =''
if sn ==0:
label2 = title_qz
elif sn==1:
if qz_ind ==0:
label1= 'by-two-time'
label2= 'by-multi-tau'
ax.semilogx(tausb, y, '-r', markersize=6, linewidth=4, label=label1)
ax.semilogx(taus, y2, 'o', markersize=6, label=label2)
if 'ylim' in kwargs:
ax.set_ylim( kwargs['ylim'])
elif 'vlim' in kwargs:
vmin, vmax =kwargs['vlim']
ax.set_ylim([min(y)*vmin, max(y[1:])*vmax ])
else:
pass
if 'xlim' in kwargs:
ax.set_xlim( kwargs['xlim'])
if (sn ==0) or (sn==1):
ax.legend(loc='best', fontsize = 6)
fp = path + 'uid=%s--g2--two-g2-'%uid + '.png'
fig.savefig( fp, dpi=fig.dpi)
fig.tight_layout()
#plt.show()
def save_gisaxs_g2( g2, res_pargs, time_label= False, taus=None, filename=None, *argv,**kwargs):
'''
Aug 8, 2016, Y.G.@CHX
save g2 results,
res_pargs should contain
g2: one-time correlation function
res_pargs: contions taus, q_ring_center values
path:
uid:
'''
if taus is None:
taus = res_pargs[ 'taus']
try:
qz_center = res_pargs['qz_center']
qr_center = res_pargs['qr_center']
except:
roi_label= res_pargs['roi_label']
path = res_pargs['path']
uid = res_pargs['uid']
df = DataFrame( np.hstack( [ (taus).reshape( len(g2),1) , g2] ) )
columns=[]
columns.append('tau')
try:
for qz in qz_center:
for qr in qr_center:
columns.append( [str(qz),str(qr)] )
except:
columns.append( [ v for (k,v) in roi_label.items()] )
df.columns = columns
if filename is None:
if time_label:
dt =datetime.now()
CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
filename = os.path.join(path, 'g2-%s-%s.csv' %(uid,CurTime))
else:
filename = os.path.join(path, 'uid=%s--g2.csv' % (uid))
else:
filename = os.path.join(path, filename)
df.to_csv(filename)
print( 'The correlation function of uid= %s is saved with filename as %s'%(uid, filename))
def stretched_auto_corr_scat_factor(x, beta, relaxation_rate, alpha=1.0, baseline=1):
return beta * (np.exp(-2 * relaxation_rate * x))**alpha + baseline
def simple_exponential(x, beta, relaxation_rate, baseline=1):
return beta * np.exp(-2 * relaxation_rate * x) + baseline
def fit_gisaxs_g2( g2, res_pargs, function='simple_exponential', one_plot=False, *argv,**kwargs):
'''
July 20,2016, Y.G.@CHX
Fit one-time correlation function
The support functions include simple exponential and stretched/compressed exponential
Parameters
----------
g2: one-time correlation function for fit, with shape as [taus, qs]
res_pargs: a dict, contains keys
taus: the time delay, with the same length as g2
q_ring_center: the center of q rings, for the title of each sub-plot
uid: unique id, for the title of plot
kwargs:
variables: if exist, should be a dict, like
{ 'lags': True, #always True
'beta', Ture, # usually True
'relaxation_rate': False, #always False
'alpha':False, #False for simple exponential, True for stretched/compressed
'baseline': True #sometimes be False, keep as 1
}
function:
'simple_exponential': fit by a simple exponential function, defined as
beta * np.exp(-2 * relaxation_rate * lags) + baseline
'streched_exponential': fit by a streched exponential function, defined as
beta * (np.exp(-2 * relaxation_rate * lags))**alpha + baseline
Returns
-------
fit resutls:
a dict, with keys as
'baseline':
'beta':
'relaxation_rate':
an example:
result = fit_g2( g2, res_pargs, function = 'simple')
result = fit_g2( g2, res_pargs, function = 'stretched')
TO DO:
add variables to options
'''
taus = res_pargs[ 'taus']
qz_center = res_pargs[ 'qz_center']
num_qz = len( qz_center)
qr_center = res_pargs[ 'qr_center']
num_qr = len( qr_center)
uid=res_pargs['uid']
path=res_pargs['path']
#uid=res_pargs['uid']
num_rings = g2.shape[1]
beta = np.zeros( num_rings ) # contrast factor
rate = np.zeros( num_rings ) # relaxation rate
alpha = np.zeros( num_rings ) # alpha
baseline = np.zeros( num_rings ) # baseline
if function=='simple_exponential' or function=='simple':
_vars = np.unique ( _vars + ['alpha'])
mod = Model(stretched_auto_corr_scat_factor)#, independent_vars= list( _vars) )
elif function=='stretched_exponential' or function=='stretched':
mod = Model(stretched_auto_corr_scat_factor)#, independent_vars= _vars)
else:
print ("The %s is not supported.The supported functions include simple_exponential and stretched_exponential"%function)
#mod.set_param_hint( 'beta', value = 0.05 )
#mod.set_param_hint( 'alpha', value = 1.0 )
#mod.set_param_hint( 'relaxation_rate', value = 0.005 )
#mod.set_param_hint( 'baseline', value = 1.0, min=0.5, max= 1.5 )
mod.set_param_hint( 'baseline', min=0.5, max= 2.5 )
mod.set_param_hint( 'beta', min=0.0 )
mod.set_param_hint( 'alpha', min=0.0 )
mod.set_param_hint( 'relaxation_rate', min=0.0 )
if 'fit_variables' in kwargs:
additional_var = kwargs['fit_variables']
#print ( additional_var )
_vars =[ k for k in list( additional_var.keys()) if additional_var[k] is False]
else:
_vars = []
if 'guess_values' in kwargs:
if 'beta' in list(kwargs['guess_values'].keys()):
beta_ = kwargs['guess_values']['beta']
else:
beta_=0.05
if 'alpha' in list(kwargs['guess_values'].keys()):
alpha_= kwargs['guess_values']['alpha']
else:
alpha_=1.0
if 'relaxation_rate' in list(kwargs['guess_values'].keys()):
relaxation_rate_= kwargs['guess_values']['relaxation_rate']
else:
relaxation_rate_=0.005
if 'baseline' in list(kwargs['guess_values'].keys()):
baseline_= kwargs['guess_values']['baseline']
else:
baseline_=1.0
pars = mod.make_params( beta=beta_, alpha=alpha_, relaxation_rate = relaxation_rate_, baseline=baseline_)
else:
pars = mod.make_params( beta=.05, alpha=1.0, relaxation_rate =0.005, baseline=1.0)
for v in _vars:
pars['%s'%v].vary = False
#print ( pars['%s'%v], pars['%s'%v].vary )
result = {}
if not one_plot:
for qz_ind in range(num_qz):
#fig = plt.figure(figsize=(10, 12))
fig = plt.figure(figsize=(12, 10))
#fig = plt.figure()
title_qz = ' Qz= %.5f '%( qz_center[qz_ind]) + r'$\AA^{-1}$'
plt.title('uid= %s:--->'%uid + title_qz,fontsize=20, y =1.1)
#print (qz_ind,title_qz)
if num_qz!=1:plt.axis('off')
sx = int(round(np.sqrt(num_qr)) )
if num_qr%sx == 0:
sy = int(num_qr/sx)
else:
sy=int(num_qr/sx+1)
for sn in range(num_qr):
ax = fig.add_subplot(sx,sy,sn+1 )
ax.set_ylabel("g2")
ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16)
title_qr = " Qr= " + '%.5f '%( qr_center[sn]) + r'$\AA^{-1}$'
if num_qz==1:
title = 'uid= %s:--->'%uid + title_qz + '__' + title_qr
else:
title = title_qr
ax.set_title( title )
i = sn + qz_ind * num_qr
y=g2[1:, i]
result1 = mod.fit(y, pars, x = taus[1:] )
#print ( result1.best_values)
rate[i] = result1.best_values['relaxation_rate']
#rate[i] = 1e-16
beta[i] = result1.best_values['beta']
#baseline[i] = 1.0
baseline[i] = result1.best_values['baseline']
if function=='simple_exponential' or function=='simple':
alpha[i] =1.0
elif function=='stretched_exponential' or function=='stretched':
alpha[i] = result1.best_values['alpha']
ax.semilogx(taus[1:], y, 'bo')
ax.semilogx(taus[1:], result1.best_fit, '-r')
if 'ylim' in kwargs:
ax.set_ylim( kwargs['ylim'])
elif 'vlim' in kwargs:
vmin, vmax =kwargs['vlim']
ax.set_ylim([min(y)*vmin, max(y[1:])*vmax ])
else:
pass
if 'xlim' in kwargs:
ax.set_xlim( kwargs['xlim'])
txts = r'$\tau$' + r'$ = %.3f$'%(1/rate[i]) + r'$ s$'
ax.text(x =0.02, y=.55 +.3, s=txts, fontsize=14, transform=ax.transAxes)
txts = r'$\alpha$' + r'$ = %.3f$'%(alpha[i])
#txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$'
ax.text(x =0.02, y=.45+.3, s=txts, fontsize=14, transform=ax.transAxes)
txts = r'$baseline$' + r'$ = %.3f$'%( baseline[i])
ax.text(x =0.02, y=.35 + .3, s=txts, fontsize=14, transform=ax.transAxes)
result = dict( beta=beta, rate=rate, alpha=alpha, baseline=baseline )
fp = path + 'uid=%s--g2-qz=%s--fit'%(uid,qz_center[qz_ind]) + '.png'
fig.savefig( fp, dpi=fig.dpi)
fig.tight_layout()
#plt.show()
else:
#fig = plt.figure(figsize=(10, 12))
#fig = plt.figure(figsize=(12, 10))
if num_qz==1:
if num_qr==1:
fig = plt.figure(figsize=(8,8))
else:
fig = plt.figure(figsize=(10, 12))
else:
fig = plt.figure(figsize=(10, 12))
plt.title('uid= %s'%uid,fontsize=20, y =1.05)
if num_qz!=1:
if num_qr!=1:
plt.axis('off')
sx = int(round(np.sqrt(num_qr)) )
if num_qr%sx == 0:
sy = int(num_qr/sx)
else:
sy=int(num_qr/sx+1)
for sn in range(num_qr):
ax = fig.add_subplot(sx,sy,sn+1 )
ax.set_ylabel("g2")
ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16)
#title_qr = " Qr= " + '%.5f '%( qr_center[sn]) + r'$\AA^{-1}$'
title_qr = " Qr= " + '%.5s '%( qr_center[sn]) + r'$\AA^{-1}$'
title = title_qr
ax.set_title( title )
for qz_ind in range(num_qz):
i = sn + qz_ind * num_qr
y=g2[1:, i]
result1 = mod.fit(y, pars, x = taus[1:] )
#print ( result1.best_values)
rate[i] = result1.best_values['relaxation_rate']
#rate[i] = 1e-16
beta[i] = result1.best_values['beta']
#baseline[i] = 1.0
baseline[i] = result1.best_values['baseline']
if function=='simple_exponential' or function=='simple':
alpha[i] =1.0
elif function=='stretched_exponential' or function=='stretched':
alpha[i] = result1.best_values['alpha']
if sn ==0:
title_qz = ' Qz= %.5f '%( qz_center[qz_ind]) + r'$\AA^{-1}$'
ax.semilogx(taus[1:], y, 'o', markersize=6, label = title_qz )
else:
ax.semilogx(taus[1:], y, 'o', markersize=6, label='' )
ax.semilogx(taus[1:], result1.best_fit, '-r')
#print( result1.best_values['relaxation_rate'], result1.best_values['beta'] )
txts = r'$q_z$' + r'$_%s$'%qz_ind + r'$\tau$' + r'$ = %.3f$'%(1/rate[i]) + r'$ s$'
ax.text(x =0.02, y=.55 +.3 - 0.1*qz_ind, s=txts, fontsize=14, transform=ax.transAxes)
if 'ylim' in kwargs:
ax.set_ylim( kwargs['ylim'])
elif 'vlim' in kwargs:
vmin, vmax =kwargs['vlim']
ax.set_ylim([min(y)*vmin, max(y[1:])*vmax ])
else:
pass
if 'xlim' in kwargs:
ax.set_xlim( kwargs['xlim'])
if sn ==0:
ax.legend(loc='best', fontsize = 6)
result = dict( beta=beta, rate=rate, alpha=alpha, baseline=baseline )
fp = path + 'uid=%s--g2--fit-'%(uid) + '.png'
fig.savefig( fp, dpi=fig.dpi)
fig.tight_layout()
#plt.show()
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
#fp = path + 'g2--uid=%s-qz=%s-fit'%(uid,qz_center[qz_ind]) + CurTime + '.png'
#fig.savefig( fp, dpi=fig.dpi)
#result = dict( beta=beta, rate=rate, alpha=alpha, baseline=baseline )
#fp = path + 'uid=%s--g2--fit-'%(uid) + '.png'
#fig.savefig( fp, dpi=fig.dpi)
#fig.tight_layout()
#plt.show()
return result
#GiSAXS End
###############################
def get_each_box_mean_intensity( data_series, box_mask, sampling, timeperframe, plot_ = True , *argv,**kwargs):
'''Dec 16, 2015, Y.G.@CHX
get each box (ROI) mean intensity as a function of time
'''
mean_int_sets, index_list = roi.mean_intensity(np.array( data_series[::sampling]), box_mask)
try:
N = len(data_series)
except:
N = data_series.length
times = np.arange( N )*timeperframe # get the time for each frame
num_rings = len( np.unique( box_mask)[1:] )
if plot_:
fig, ax = plt.subplots(figsize=(8, 8))
uid = 'uid'
if 'uid' in kwargs.keys():
uid = kwargs['uid']
ax.set_title("uid= %s--Mean intensity of each box"%uid)
for i in range(num_rings):
ax.plot( times[::sampling], mean_int_sets[:,i], label="Box "+str(i+1),marker = 'o', ls='-')
ax.set_xlabel("Time")
ax.set_ylabel("Mean Intensity")
ax.legend()
#fp = path + 'uid=%s--Mean intensity of each box-'%(uid) + '.png'
if 'path' not in kwargs.keys():
path=''
else:
path = kwargs['path']
fp = path + 'uid=%s--Mean-intensity-of-each-ROI-'%(uid) + '.png'
fig.savefig( fp, dpi=fig.dpi)
#plt.show()
return times, mean_int_sets
def power_func(x, D0, power=2):
return D0 * x**power
def fit_qr_qz_rate( qr, qz, rate, plot_=True, *argv,**kwargs):
'''
Option:
if power_variable = False, power =2 to fit q^2~rate,
Otherwise, power is variable.
'''
power_variable=False
x=qr
if 'fit_range' in kwargs.keys():
fit_range = kwargs['fit_range']
else:
fit_range= None
if 'uid' in kwargs.keys():
uid = kwargs['uid']
else:
uid = 'uid'
if 'path' in kwargs.keys():
path = kwargs['path']
else:
path = ''
if fit_range is not None:
y=rate[fit_range[0]:fit_range[1]]
x=q[fit_range[0]:fit_range[1]]
mod = Model( power_func )
#mod.set_param_hint( 'power', min=0.5, max= 10 )
#mod.set_param_hint( 'D0', min=0 )
pars = mod.make_params( power = 2, D0=1*10^(-5) )
if power_variable:
pars['power'].vary = True
else:
pars['power'].vary = False
Nqr = len( qr)
Nqz = len( qz)
D0= np.zeros( Nqz )
power= 2 #np.zeros( Nqz )
res= []
for i, qz_ in enumerate(qz):
try:
y = np.array( rate['rate'][ i*Nqr : (i+1)*Nqr ] )
except:
y = np.array( rate[ i*Nqr : (i+1)*Nqr ] )
#print( len(x), len(y) )
_result = mod.fit(y, pars, x = x )
res.append( _result )
D0[i] = _result.best_values['D0']
#power[i] = _result.best_values['power']
print ('The fitted diffusion coefficient D0 is: %.3e A^2S-1'%D0[i])
if plot_:
fig,ax = plt.subplots()
plt.title('Q%s-Rate--uid= %s_Fit'%(power,uid),fontsize=20, y =1.06)
for i, qz_ in enumerate(qz):
ax.plot(x**power, y, marker = 'o',
label=r'$q_z=%.5f$'%qz_)
ax.plot(x**power, res[i].best_fit, '-r')
txts = r'$D0: %.3e$'%D0[i] + r' $A^2$' + r'$s^{-1}$'
dy=0.1
ax.text(x =0.15, y=.65 -dy *i, s=txts, fontsize=14, transform=ax.transAxes)
legend = ax.legend(loc='best')
ax.set_ylabel('Relaxation rate 'r'$\gamma$'"($s^{-1}$)")
ax.set_xlabel("$q^%s$"r'($\AA^{-2}$)'%power)
dt =datetime.now()
CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
#fp = path + 'Q%s-Rate--uid=%s'%(power,uid) + CurTime + '--Fit.png'
fp = path + 'uid=%s--Q-Rate'%(uid) + '--fit-.png'
fig.savefig( fp, dpi=fig.dpi)
fig.tight_layout()
#plt.show()
return D0
#plot g4 results
def plot_gisaxs_g4( g4, taus, res_pargs=None, one_plot=False, *argv,**kwargs):
'''Dec 16, 2015, Y.G.@CHX
plot g4 results,
g4: four-time correlation function
taus: the time delays
res_pargs, a dict, can contains
uid/path/qr_center/qz_center/
kwargs: can contains
vlim: [vmin,vmax]: for the plot limit of y, the y-limit will be [vmin * min(y), vmx*max(y)]
ylim/xlim: the limit of y and x
e.g.
plot_gisaxs_g4( g4, taus= np.arange( g2b.shape[0]) *timeperframe, q_ring_center = q_ring_center, vlim=[.99, 1.01] )
'''
if res_pargs is not None:
uid = res_pargs['uid']
path = res_pargs['path']
qz_center = res_pargs[ 'qz_center']
num_qz = len( qz_center)
qr_center = res_pargs[ 'qr_center']
num_qr = len( qr_center)
else:
if 'uid' in kwargs.keys():
uid = kwargs['uid']
else:
uid = 'uid'
if 'path' in kwargs.keys():
path = kwargs['path']
else:
path = ''
if 'qz_center' in kwargs.keys():
qz_center = kwargs[ 'qz_center']
num_qz = len( qz_center)
else:
print( 'Please give qz_center')
if 'qr_center' in kwargs.keys():
qr_center = kwargs[ 'qr_center']
num_qr = len( qr_center)
else:
print( 'Please give qr_center')
if not one_plot:
for qz_ind in range(num_qz):
fig = plt.figure(figsize=(12, 10))
#fig = plt.figure()
title_qz = ' Qz= %.5f '%( qz_center[qz_ind]) + r'$\AA^{-1}$'
plt.title('uid= %s:--->'%uid + title_qz,fontsize=20, y =1.1)
#print (qz_ind,title_qz)
if num_qz!=1:
if num_qr!=1:
plt.axis('off')
sx = int(round(np.sqrt(num_qr)) )
if num_qr%sx == 0:
sy = int(num_qr/sx)
else:
sy=int(num_qr/sx+1)
for sn in range(num_qr):
ax = fig.add_subplot(sx,sy,sn+1 )
ax.set_ylabel("g4")
ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16)
title_qr = " Qr= " + '%.5f '%( qr_center[sn]) + r'$\AA^{-1}$'
if num_qz==1:
title = 'uid= %s:--->'%uid + title_qz + '__' + title_qr
else:
title = title_qr
ax.set_title( title )
y=g4[:, sn + qz_ind * num_qr]
ax.semilogx(taus, y, '-o', markersize=6)
if 'ylim' in kwargs:
ax.set_ylim( kwargs['ylim'])
elif 'vlim' in kwargs:
vmin, vmax =kwargs['vlim']
ax.set_ylim([min(y)*vmin, max(y[1:])*vmax ])
else:
pass
if 'xlim' in kwargs:
ax.set_xlim( kwargs['xlim'])
fp = path + 'uid=%s--g4-qz=%s'%(uid,qz_center[qz_ind]) + '.png'
fig.savefig( fp, dpi=fig.dpi)
fig.tight_layout()
#plt.show()
else:
fig = plt.figure(figsize=(12, 10))
plt.title('uid= %s'%uid,fontsize=20, y =1.05)
if num_qz!=1:
if num_qr!=1:
plt.axis('off')
sx = int(round(np.sqrt(num_qr)) )
if num_qr%sx == 0:
sy = int(num_qr/sx)
else:
sy=int(num_qr/sx+1)
for sn in range(num_qr):
ax = fig.add_subplot(sx,sy,sn+1 )
ax.set_ylabel("g4")
ax.set_xlabel(r"$\tau $ $(s)$", fontsize=16)
title_qr = " Qr= " + '%.5f '%( qr_center[sn]) + r'$\AA^{-1}$'
title = title_qr
ax.set_title( title )
for qz_ind in range(num_qz):
y=g4[:, sn + qz_ind * num_qr]
if sn ==0:
title_qz = ' Qz= %.5f '%( qz_center[qz_ind]) + r'$\AA^{-1}$'
ax.semilogx(taus, y, '-o', markersize=6, label = title_qz )
else:
ax.semilogx(taus, y, '-o', markersize=6, label='' )
if 'ylim' in kwargs:
ax.set_ylim( kwargs['ylim'])
elif 'vlim' in kwargs:
vmin, vmax =kwargs['vlim']
ax.set_ylim([min(y)*vmin, max(y[1:])*vmax ])
else:
pass
if 'xlim' in kwargs:
ax.set_xlim( kwargs['xlim'])
if sn ==0:
ax.legend(loc='best', fontsize = 6)
fp = path + 'uid=%s--g4-'%(uid) + '.png'
fig.savefig( fp, dpi=fig.dpi)
fig.tight_layout()
#plt.show()
def multi_uids_gisaxs_xpcs_analysis( uids, md, run_num=1, sub_num=None,good_start=10, good_end= None,
force_compress=False,
fit = True, compress=True, para_run=False ):
''''Sep 16, 2016, YG@CHX-NSLS2
Do SAXS-XPCS analysis for multi uid data
uids: a list of uids to be analyzed
md: metadata, should at least include
mask: array, mask data
data_dir: the path to save data, the result will be saved in data_dir/uid/...
dpix:
Ldet:
lambda:
timeperframe:
center
run_num: the run number
sub_num: the number in each sub-run
fit: if fit, do fit for g2 and show/save all fit plots
compress: apply a compress algorithm
Save g2/metadata/g2-fit plot/g2 q-rate plot/ of each uid in data_dir/uid/...
return:
g2s: a dictionary, {run_num: sub_num: g2_of_each_uid}
taus,
use_uids: return the valid uids
'''
g2s = {} # g2s[run_number][sub_seq] = g2 of each uid
lag_steps = [0]
useful_uids = {}
if sub_num is None:
sub_num = len( uids )//run_num
mask = md['mask']
maskr = mask[::-1,:]
data_dir = md['data_dir']
box_maskr = md['ring_mask']
qz_center= md['qz_center']
qr_center= md['qr_center']
for run_seq in range(run_num):
g2s[ run_seq + 1] = {}
useful_uids[ run_seq + 1] = {}
i=0
for sub_seq in range( 0, sub_num ):
uid = uids[ sub_seq + run_seq * sub_num ]
print( 'The %i--th uid to be analyzed is : %s'%(i, uid) )
try:
detector = get_detector( db[uid ] )
imgs = load_data( uid, detector )
except:
print( 'The %i--th uid: %s can not load data'%(i, uid) )
imgs=0
data_dir_ = os.path.join( data_dir, '%s/'%uid)
os.makedirs(data_dir_, exist_ok=True)
i +=1
if imgs !=0:
Nimg = len(imgs)
md_ = imgs.md
useful_uids[ run_seq + 1][i] = uid
imgsr = reverse_updown( imgs )
imgsra = apply_mask( imgsr, maskr )
if compress:
filename = '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%uid
maskr, avg_imgr, imgsum, bad_frame_list = compress_eigerdata(imgsr, maskr, md_, filename,
force_compress= force_compress, bad_pixel_threshold= 5e9,nobytes=4,
para_compress=True, num_sub= 100)
try:
md['Measurement']= db[uid]['start']['Measurement']
#md['sample']=db[uid]['start']['sample']
#print( md['Measurement'] )
except:
md['Measurement']= 'Measurement'
md['sample']='sample'
dpix = md['x_pixel_size'] * 1000. #in mm, eiger 4m is 0.075 mm
lambda_ =md['incident_wavelength'] # wavelegth of the X-rays in Angstroms
Ldet = md['detector_distance']
# detector to sample distance (mm), currently, *1000 for saxs, *1 for gisaxs
exposuretime= md['count_time']
acquisition_period = md['frame_time']
timeperframe = acquisition_period#for g2
#timeperframe = exposuretime#for visiblitly
#timeperframe = 2 ## manual overwrite!!!! we apparently writing the wrong metadata....
setup_pargs=dict(uid=uid, dpix= dpix, Ldet=Ldet, lambda_= lambda_,
timeperframe=timeperframe, path= data_dir)
md['avg_img'] = avg_imgr
min_inten = 0
#good_start = np.where( np.array(imgsum) > min_inten )[0][0]
#good_start = 0
#good_start = max(good_start, np.where( np.array(imgsum) > min_inten )[0][0] )
good_start = good_start
if good_end is None:
good_end_ = len(imgs)
else:
good_end_= good_end
FD = Multifile(filename, good_start, good_end_ )
good_start = max(good_start, np.where( np.array(imgsum) > min_inten )[0][0] )
print ('With compression, the good_start frame number is: %s '%good_start)
print ('The good_end frame number is: %s '%good_end_)
if not para_run:
g2, lag_steps_ =cal_g2c( FD, box_maskr, bad_frame_list,good_start, num_buf = 8,
imgsum= None, norm= None )
else:
g2, lag_steps_ =cal_g2p( FD, box_maskr, bad_frame_list,good_start, num_buf = 8,
imgsum= None, norm= None )
if len( lag_steps) < len(lag_steps_):
lag_steps = lag_steps_
else:
sampling = 1000 #sampling should be one
#good_start = check_shutter_open( imgsra, min_inten=5, time_edge = [0,10], plot_ = False )
good_start = 0
good_series = apply_mask( imgsar[good_start: ], maskr )
imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = sampling,
bad_pixel_threshold=1.2e8, plot_ = False, uid=uid)
bad_image_process = False
if len(bad_frame_list):
bad_image_process = True
print( bad_image_process )
g2, lag_steps_ =cal_g2( good_series, box_maskr, bad_image_process,
bad_frame_list, good_start, num_buf = 8 )
if len( lag_steps) < len(lag_steps_):
lag_steps = lag_step_
taus_ = lag_steps_ * timeperframe
taus = lag_steps * timeperframe
res_pargs = dict(taus=taus_, qz_center=qz_center, qr_center=qr_center, path=data_dir_, uid=uid )
save_gisaxs_g2( g2, res_pargs )
#plot_gisaxs_g2( g2, taus, vlim=[0.95, 1.1], res_pargs=res_pargs, one_plot=True)
if fit:
fit_result = fit_gisaxs_g2( g2, res_pargs, function = 'stretched', vlim=[0.95, 1.1],
fit_variables={'baseline':True, 'beta':True, 'alpha':False,'relaxation_rate':True},
guess_values={'baseline':1.229,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01},
one_plot= True)
fit_qr_qz_rate( qr_center, qz_center, fit_result, power_variable= False,
uid=uid, path= data_dir_ )
psave_obj( md, data_dir_ + 'uid=%s-md'%uid ) #save the setup parameters
g2s[run_seq + 1][i] = g2
print ('*'*40)
print()
return g2s, taus, useful_uids
| bsd-3-clause |
SparkFreedom/DataDive | getting_started_with_d3/cleaning_code/plaza_traffic.py | 1 | 1350 | import pandas
import json
import numpy as np
# import the data into a pandas table
df = pandas.read_csv('TBTA_DAILY_PLAZA_TRAFFIC.csv')
# make a little function that takes the terrible string
# in the CASH and ETC columns and converts them to an int
toint = lambda x: int(x.replace(',',''))
# convert both columns
df['ETC'] = df['ETC'].apply(toint)
df['CASH'] = df['CASH'].apply(toint)
# calculate the mean number of people paying cash
mean_cash = df.groupby("PLAZAID")['CASH'].aggregate(np.mean)
mean_etc = df.groupby("PLAZAID")['ETC'].aggregate(np.mean)
# build the key
key = {
1 : "Robert F. Kennedy Bridge Bronx Plaza",
2 : "Robert F. Kennedy Bridge Manhattan Plaza",
3 : "Bronx-Whitestone Bridge",
4 : "Henry Hudson Bridge",
5 : "Marine Parkway-Gil Hodges Memorial Bridge",
6 : "Cross Bay Veterans Memorial Bridge",
7 : "Queens Midtown Tunnel",
8 : "Brooklyn-Battery Tunnel",
9 : "Throgs Neck Bridge",
11 : "Verrazano-Narrows Bridge"
}
# output to JSON we can use in d3
cash = [
{"id":d[0], "count":d[1], "name":key[d[0]]}
for d in mean_cash.to_dict().items()
]
electronic = [
{"id":d[0], "count":d[1], "name":key[d[0]]}
for d in mean_etc.to_dict().items()
]
out = {
"cash": cash,
"electronic": electronic
}
json.dump(out, open('../viz/data/plaza_traffic.json', 'w'))
| mit |
mavrix93/LightCurvesClassifier | lcc_web/web/interface/lcc_views/jobs.py | 1 | 6507 | import glob
import json
import os
from wsgiref.util import FileWrapper
import shutil
import pandas as pd
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http.response import HttpResponse
from django.shortcuts import render
from interface.models import DbQuery
from interface.models import StarsFilter
@login_required(login_url='login/')
def all_filters(request):
stars_filters_path = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters")
header = ["Job id", "Status", "Start date",
"Finish date", "Descriptors", "Deciders", "Link"]
dat = []
for star_filt in StarsFilter.objects.filter(user=request.user):
row = [star_filt.id,
star_filt.status,
str(star_filt.start_date),
str(star_filt.finish_date),
star_filt.descriptors.replace(";", "<br>"),
star_filt.deciders,
str(star_filt.id)]
dat.append(row)
table = pd.DataFrame(
dat, columns=["fold_name", "status", "start", "stop", "descr", "decid", "job_id"])
table["start"] = pd.to_datetime(table["start"])
table.sort_values(by="start", ascending=False, inplace=True)
job_ids = table["job_id"].values.tolist()
table = table.drop('job_id', 1)
return render(request, 'interface/jobs.html', {"page_title": "Star filter jobs",
"header": header,
"stars_filter": True,
"delete_prefix" : '"../{}/delete/"'.format(os.environ.get(
"DOCKYARD_APP_CONTEXT"), ""),
"table": zip(table.values.tolist(), job_ids)})
@login_required(login_url='login/')
def _all_filters(request):
stars_filters_path = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters")
header = ["Job id", "Status", "Date", "Descriptors", "Deciders", "Link"]
dat = []
for folder_name in os.listdir(stars_filters_path):
try:
with open(os.path.join(stars_filters_path, folder_name, "status.json"), 'r') as status_file:
status = json.load(status_file)
row = [folder_name,
status.get("status", ""),
status.get("start", ""),
status.get("descriptors", ""),
status.get("deciders", ""),
str(folder_name)]
dat.append(row)
except:
pass
table = pd.DataFrame(
dat, columns=["fold_name", "status", "start", "descr", "decid", "job_id"])
table["start"] = pd.to_datetime(table["start"])
table.sort_values(by="start", ascending=False, inplace=True)
job_ids = table["job_id"].values.tolist()
table = table.drop('job_id', 1)
return render(request, 'interface/jobs.html', {"page_title": "Star filter jobs",
"header": header,
"stars_filter": True,
"table": zip(table.values.tolist(), job_ids)})
@login_required(login_url='login/')
def all_results(request):
queries_path = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "query_results")
header = ["Job id", "Status", "Started",
"Finished", "Queries", "Connectors", "Link"]
dat = []
for query in DbQuery.objects.filter(user=request.user):
row = [query.id,
query.status,
str(query.start_date),
str(query.finish_date),
str(query.queries),
query.connectors,
str(query.id)]
dat.append(row)
table = pd.DataFrame(
dat, columns=["fold_name", "status", "started", "finished", "queries", "conn", "job_id"])
table["started"] = pd.to_datetime(table["started"])
table.sort_values(by="started", ascending=False, inplace=True)
job_ids = table["job_id"].values.tolist()
table = table.drop('job_id', 1)
return render(request, 'interface/jobs.html', {"page_title": "Queries jobs",
"stars_filter": False,
"header": header,
"delete_prefix": '"../{}/delete/"'.format(os.environ.get(
"DOCKYARD_APP_CONTEXT")),
"table": zip(table.values.tolist(), job_ids)})
def download_file(request, file_name):
"""
Send a file through Django without loading the whole file into
memory at once. The FileWrapper will turn the file object into an
iterator for chunks of 8KB.
"""
if file_name.startswith("estim"):
file_type = "estim"
file_name = file_name[9:]
filename = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters", file_name, "estimator")
elif not file_name.startswith("filt"):
file_type = "query"
filename = os.path.join(
settings.MEDIA_ROOT, str(request.user.id), "query_results", file_name + ".zip")
else:
file_type = "filter"
file_name = file_name[4:]
pa = os.path.join(settings.MEDIA_ROOT, str(request.user.id), "stars_filters", file_name)
filter_names = glob.glob(pa + "/*.filter")
if filter_names:
filter_name = os.path.basename(filter_names[0])
filename = os.path.join(
settings.MEDIA_ROOT, str(request.user.id), "stars_filters", file_name, filter_name)
else:
return render(request, 'interface/error_page.html', {"error_m": "There is no filter in %s" % file_name})
wrapper = FileWrapper(open(filename, 'rb'))
response = HttpResponse(wrapper, content_type='text/plain')
response['Content-Length'] = os.path.getsize(filename)
if file_type == "filter":
response[
'Content-Disposition'] = 'attachment; filename="%s.filter"' % filter_name
elif file_type == "estim":
response[
'Content-Disposition'] = 'attachment; filename="estimator"'
else:
response[
'Content-Disposition'] = 'attachment; filename="results_%s.zip"' % file_name
return response
| mit |
iamkingmaker/zipline | zipline/data/ffc/synthetic.py | 5 | 8325 | """
Synthetic data loaders for testing.
"""
from bcolz import ctable
from numpy import (
arange,
array,
float64,
full,
iinfo,
uint32,
)
from pandas import (
DataFrame,
Timestamp,
)
from sqlite3 import connect as sqlite3_connect
from six import iteritems
from zipline.data.ffc.base import FFCLoader
from zipline.data.ffc.frame import DataFrameFFCLoader
from zipline.data.ffc.loaders.us_equity_pricing import (
BcolzDailyBarWriter,
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
US_EQUITY_PRICING_BCOLZ_COLUMNS,
)
UINT_32_MAX = iinfo(uint32).max
def nanos_to_seconds(nanos):
return nanos / (1000 * 1000 * 1000)
class MultiColumnLoader(FFCLoader):
"""
FFCLoader that can delegate to sub-loaders.
Parameters
----------
loaders : dict
Dictionary mapping columns -> loader
"""
def __init__(self, loaders):
self._loaders = loaders
def load_adjusted_array(self, columns, mask):
"""
Load by delegating to sub-loaders.
"""
out = []
for column in columns:
try:
loader = self._loaders[column]
except KeyError:
raise ValueError("Couldn't find loader for %s" % column)
out.append(loader.load_adjusted_array([column], mask))
return out
class ConstantLoader(MultiColumnLoader):
"""
Synthetic FFCLoader that returns a constant value for each column.
Parameters
----------
constants : dict
Map from column to value(s) to use for that column.
Values can be anything that can be passed as the first positional
argument to a DataFrame of the same shape as `mask`.
mask : pandas.DataFrame
Mask indicating when assets existed.
Indices of this frame are used to align input queries.
Notes
-----
Adjustments are unsupported with ConstantLoader.
"""
def __init__(self, constants, dates, assets):
loaders = {}
for column, const in iteritems(constants):
frame = DataFrame(
const,
index=dates,
columns=assets,
dtype=column.dtype,
)
loaders[column] = DataFrameFFCLoader(
column=column,
baseline=frame,
adjustments=None,
)
super(ConstantLoader, self).__init__(loaders)
class SyntheticDailyBarWriter(BcolzDailyBarWriter):
"""
Bcolz writer that creates synthetic data based on asset lifetime metadata.
For a given asset/date/column combination, we generate a corresponding raw
value using the following formula for OHLCV columns:
data(asset, date, column) = (100,000 * asset_id)
+ (10,000 * column_num)
+ (date - Jan 1 2000).days # ~6000 for 2015
where:
column_num('open') = 0
column_num('high') = 1
column_num('low') = 2
column_num('close') = 3
column_num('volume') = 4
We use days since Jan 1, 2000 to guarantee that there are no collisions
while also the produced values smaller than UINT32_MAX / 1000.
For 'day' and 'id', we use the standard format expected by the base class.
Parameters
----------
asset_info : DataFrame
DataFrame with asset_id as index and 'start_date'/'end_date' columns.
calendar : DatetimeIndex
Calendar to use for constructing asset lifetimes.
"""
OHLCV = ('open', 'high', 'low', 'close', 'volume')
OHLC = ('open', 'high', 'low', 'close')
PSEUDO_EPOCH = Timestamp('2000-01-01', tz='UTC')
def __init__(self, asset_info, calendar):
super(SyntheticDailyBarWriter, self).__init__()
assert (
# Using .value here to avoid having to care about UTC-aware dates.
self.PSEUDO_EPOCH.value <
calendar.min().value <=
asset_info['start_date'].min().value
)
assert (asset_info['start_date'] < asset_info['end_date']).all()
self._asset_info = asset_info
self._calendar = calendar
def _raw_data_for_asset(self, asset_id):
"""
Generate 'raw' data that encodes information about the asset.
See class docstring for a description of the data format.
"""
# Get the dates for which this asset existed according to our asset
# info.
dates = self._calendar[
self._calendar.slice_indexer(
self.asset_start(asset_id), self.asset_end(asset_id)
)
]
data = full(
(len(dates), len(US_EQUITY_PRICING_BCOLZ_COLUMNS)),
asset_id * (100 * 1000),
dtype=uint32,
)
# Add 10,000 * column-index to OHLCV columns
data[:, :5] += arange(5) * (10 * 1000)
# Add days since Jan 1 2001 for OHLCV columns.
data[:, :5] += (dates - self.PSEUDO_EPOCH).days[:, None]
frame = DataFrame(
data,
index=dates,
columns=US_EQUITY_PRICING_BCOLZ_COLUMNS,
)
frame['day'] = nanos_to_seconds(dates.asi8)
frame['id'] = asset_id
return ctable.fromdataframe(frame)
def asset_start(self, asset):
ret = self._asset_info.loc[asset]['start_date']
if ret.tz is None:
ret = ret.tz_localize('UTC')
assert ret.tzname() == 'UTC', "Unexpected non-UTC timestamp"
return ret
def asset_end(self, asset):
ret = self._asset_info.loc[asset]['end_date']
if ret.tz is None:
ret = ret.tz_localize('UTC')
assert ret.tzname() == 'UTC', "Unexpected non-UTC timestamp"
return ret
@classmethod
def expected_value(cls, asset_id, date, colname):
"""
Check that the raw value for an asset/date/column triple is as
expected.
Used by tests to verify data written by a writer.
"""
from_asset = asset_id * 100 * 1000
from_colname = cls.OHLCV.index(colname) * (10 * 1000)
from_date = (date - cls.PSEUDO_EPOCH).days
return from_asset + from_colname + from_date
def expected_values_2d(self, dates, assets, colname):
"""
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Values before/after an assets lifetime are filled with 0 for volume and
NaN for price columns.
"""
if colname == 'volume':
dtype = uint32
missing = 0
else:
dtype = float64
missing = float('nan')
data = full((len(dates), len(assets)), missing, dtype=dtype)
for j, asset in enumerate(assets):
start, end = self.asset_start(asset), self.asset_end(asset)
for i, date in enumerate(dates):
# No value expected for dates outside the asset's start/end
# date.
if not (start <= date <= end):
continue
data[i, j] = self.expected_value(asset, date, colname)
return data
# BEGIN SUPERCLASS INTERFACE
def gen_tables(self, assets):
for asset in assets:
yield asset, self._raw_data_for_asset(asset)
def to_uint32(self, array, colname):
if colname in {'open', 'high', 'low', 'close'}:
# Data is stored as 1000 * raw value.
assert array.max() < (UINT_32_MAX / 1000), "Test data overflow!"
return array * 1000
else:
assert colname in ('volume', 'day'), "Unknown column: %s" % colname
return array
# END SUPERCLASS INTERFACE
class NullAdjustmentReader(SQLiteAdjustmentReader):
"""
A SQLiteAdjustmentReader that stores no adjustments and uses in-memory
SQLite.
"""
def __init__(self):
conn = sqlite3_connect(':memory:')
writer = SQLiteAdjustmentWriter(conn)
empty = DataFrame({
'sid': array([], dtype=uint32),
'effective_date': array([], dtype=uint32),
'ratio': array([], dtype=float),
})
writer.write(splits=empty, mergers=empty, dividends=empty)
super(NullAdjustmentReader, self).__init__(conn)
| apache-2.0 |
devanshdalal/scikit-learn | examples/svm/plot_svm_kernels.py | 96 | 2019 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10, edgecolors='k')
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired,
edgecolors='k')
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
wlamond/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
krbeverx/Firmware | src/modules/sensors/vehicle_magnetometer/mag_compensation/python/mag_compensation.py | 2 | 10043 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File: mag_compensation.py
Author: Tanja Baumann
Email: [email protected]
Github: https://github.com/baumanta
Description:
Computes linear coefficients for mag compensation from thrust and current
Usage:
python mag_compensation.py /path/to/log/logfile.ulg current --instance 1
Remark:
If your logfile does not contain some of the topics, e.g.battery_status/current_a
you will have to comment out the corresponding parts in the script
"""
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from pyulog import ULog
from pyulog.px4 import PX4ULog
from pylab import *
import numpy as np
import textwrap as tw
import argparse
#arguments
parser = argparse.ArgumentParser(description='Calculate compensation parameters from ulog')
parser.add_argument('logfile', type=str, nargs='?', default=[],
help='full path to ulog file')
parser.add_argument('type', type=str, nargs='?', choices=['current', 'thrust'], default=[],
help='Power signal used for compensation, supported is "current" or "thrust".')
parser.add_argument('--instance', type=int, nargs='?', default=0,
help='instance of the current or thrust signal to use (0 or 1)')
args = parser.parse_args()
log_name = args.logfile
comp_type = args.type
comp_instance = args.instance
#Load the log data (produced by pyulog)
log = ULog(log_name)
pxlog = PX4ULog(log)
def get_data(topic_name, variable_name, index):
try:
dataset = log.get_dataset(topic_name, index)
return dataset.data[variable_name]
except:
return []
def ms2s_list(time_ms_list):
if len(time_ms_list) > 0:
return 1e-6 * time_ms_list
else:
return time_ms_list
# Select msgs and copy into arrays
armed = get_data('vehicle_status', 'arming_state', 0)
t_armed = ms2s_list(get_data('vehicle_status', 'timestamp', 0))
if comp_type == "thrust":
power = get_data('vehicle_rates_setpoint', 'thrust_body[2]', comp_instance)
power_t = ms2s_list(get_data('vehicle_rates_setpoint', 'timestamp', comp_instance))
comp_type_param = 1
factor = 1
unit = "[G]"
elif comp_type == "current":
power = get_data('battery_status', 'current_a', comp_instance)
power = np.true_divide(power, 1000) #kA
power_t = ms2s_list(get_data('battery_status', 'timestamp', comp_instance))
comp_type_param = 2 + comp_instance
factor = -1
unit = "[G/kA]"
else:
print("unknown compensation type {}. Supported is either 'thrust' or 'current'.".format(comp_type))
sys.exit(1)
if len(power) == 0:
print("could not retrieve power signal from log, zero data points")
sys.exit(1)
mag0X_body = get_data('sensor_mag', 'x', 0)
mag0Y_body = get_data('sensor_mag', 'y', 0)
mag0Z_body = get_data('sensor_mag', 'z', 0)
t_mag0 = ms2s_list(get_data('sensor_mag', 'timestamp', 0))
mag0_ID = get_data('sensor_mag', 'device_id', 0)
mag1X_body = get_data('sensor_mag', 'x', 1)
mag1Y_body = get_data('sensor_mag', 'y', 1)
mag1Z_body = get_data('sensor_mag', 'z', 1)
t_mag1 = ms2s_list(get_data('sensor_mag', 'timestamp', 1))
mag1_ID = get_data('sensor_mag', 'device_id', 1)
mag2X_body = get_data('sensor_mag', 'x', 2)
mag2Y_body = get_data('sensor_mag', 'y', 2)
mag2Z_body = get_data('sensor_mag', 'z', 2)
t_mag2 = ms2s_list(get_data('sensor_mag', 'timestamp', 2))
mag2_ID = get_data('sensor_mag', 'device_id', 2)
mag3X_body = get_data('sensor_mag', 'x', 3)
mag3Y_body = get_data('sensor_mag', 'y', 3)
mag3Z_body = get_data('sensor_mag', 'z', 3)
t_mag3 = ms2s_list(get_data('sensor_mag', 'timestamp', 3))
mag3_ID = get_data('sensor_mag', 'device_id', 3)
magX_body = []
magY_body = []
magZ_body = []
mag_id = []
t_mag = []
if len(mag0X_body) > 0:
magX_body.append(mag0X_body)
magY_body.append(mag0Y_body)
magZ_body.append(mag0Z_body)
t_mag.append(t_mag0)
mag_id.append(mag0_ID[0])
if len(mag1X_body) > 0:
magX_body.append(mag1X_body)
magY_body.append(mag1Y_body)
magZ_body.append(mag1Z_body)
t_mag.append(t_mag1)
mag_id.append(mag1_ID[0])
if len(mag2X_body) > 0:
magX_body.append(mag2X_body)
magY_body.append(mag2Y_body)
magZ_body.append(mag2Z_body)
t_mag.append(t_mag2)
mag_id.append(mag2_ID[0])
if len(mag3X_body) > 0:
magX_body.append(mag3X_body)
magY_body.append(mag3Y_body)
magZ_body.append(mag3Z_body)
t_mag.append(t_mag3)
mag_id.append(mag3_ID[0])
n_mag = len(magX_body)
#log index does not necessarily match mag calibration instance number
calibration_instance = []
instance_found = False
for idx in range(n_mag):
instance_found = False
for j in range(4):
if mag_id[idx] == log.initial_parameters["CAL_MAG{}_ID".format(j)]:
calibration_instance.append(j)
instance_found = True
if not instance_found:
print('Mag {} calibration instance not found, run compass calibration first.'.format(mag_id[idx]))
#get first arming sequence from data
start_time = 0
stop_time = 0
for i in range(len(armed)-1):
if armed[i] == 1 and armed[i+1] == 2:
start_time = t_armed[i+1]
if armed[i] == 2 and armed[i+1] == 1:
stop_time = t_armed[i+1]
break
#cut unarmed sequences from mag data
index_start = 0
index_stop = 0
for idx in range(n_mag):
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > start_time:
index_start = i
break
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > stop_time:
index_stop = i -1
break
t_mag[idx] = t_mag[idx][index_start:index_stop]
magX_body[idx] = magX_body[idx][index_start:index_stop]
magY_body[idx] = magY_body[idx][index_start:index_stop]
magZ_body[idx] = magZ_body[idx][index_start:index_stop]
#resample data
power_resampled = []
for idx in range(n_mag):
power_resampled.append(interp(t_mag[idx], power_t, power))
#fit linear to get coefficients
px = []
py = []
pz = []
for idx in range(n_mag):
px_temp, res_x, _, _, _ = polyfit(power_resampled[idx], magX_body[idx], 1,full = True)
py_temp, res_y, _, _, _ = polyfit(power_resampled[idx], magY_body[idx], 1,full = True)
pz_temp, res_z, _, _, _ = polyfit(power_resampled[idx], magZ_body[idx], 1, full = True)
px.append(px_temp)
py.append(py_temp)
pz.append(pz_temp)
#print to console
for idx in range(n_mag):
print('Mag{} device ID {} (calibration instance {})'.format(idx, mag_id[idx], calibration_instance[idx]))
print('\033[91m \n{}-based compensation: \033[0m'.format(comp_type))
print('\nparam set CAL_MAG_COMP_TYP {}'.format(comp_type_param))
for idx in range(n_mag):
print('\nparam set CAL_MAG{}_XCOMP {:.3f}'.format(calibration_instance[idx], factor * px[idx][0]))
print('param set CAL_MAG{}_YCOMP {:.3f}'.format(calibration_instance[idx], factor * py[idx][0]))
print('param set CAL_MAG{}_ZCOMP {:.3f}'.format(calibration_instance[idx], factor * pz[idx][0]))
#plot data
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Compensation Parameter Fit \n{} \nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(1,3,1)
plt.plot(power_resampled[idx], magX_body[idx], 'yo', power_resampled[idx], px[idx][0]*power_resampled[idx]+px[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag X [G]')
plt.subplot(1,3,2)
plt.plot(power_resampled[idx], magY_body[idx], 'yo', power_resampled[idx], py[idx][0]*power_resampled[idx]+py[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Y [G]')
plt.subplot(1,3,3)
plt.plot(power_resampled[idx], magZ_body[idx], 'yo', power_resampled[idx], pz[idx][0]*power_resampled[idx]+pz[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Z [G]')
# display results
plt.figtext(0.24, 0.03, 'CAL_MAG{}_XCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * px[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.51, 0.03, 'CAL_MAG{}_YCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * py[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.79, 0.03, 'CAL_MAG{}_ZCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * pz[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
#compensation comparison plots
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Original Data vs. Compensation \n{}\nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(3,1,1)
original_x, = plt.plot(t_mag[idx], magX_body[idx], label='original')
power_x, = plt.plot(t_mag[idx],magX_body[idx] - px[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_x, power_x])
plt.xlabel('Time [s]')
plt.ylabel('Mag X corrected[G]')
plt.subplot(3,1,2)
original_y, = plt.plot(t_mag[idx], magY_body[idx], label='original')
power_y, = plt.plot(t_mag[idx],magY_body[idx] - py[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_y, power_y])
plt.xlabel('Time [s]')
plt.ylabel('Mag Y corrected[G]')
plt.subplot(3,1,3)
original_z, = plt.plot(t_mag[idx], magZ_body[idx], label='original')
power_z, = plt.plot(t_mag[idx],magZ_body[idx] - pz[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_z, power_z])
plt.xlabel('Time [s]')
plt.ylabel('Mag Z corrected[G]')
plt.show()
| bsd-3-clause |
gimli-org/gimli | pygimli/viewer/mpl/overlayimage.py | 1 | 13613 | # -*- coding: utf-8 -*-
"""Overlay / Underlay an image or a geo referenced map to mpl.ax."""
import os
import math
import numpy as np
import matplotlib.image as mpimg
import pygimli as pg
class OverlayImageMPL(object):
"""TODO Documentme."""
def __init__(self, imageFileName, ax):
"""Constructor."""
self.ax = ax
self.imAxes = None
self.image = mpimg.open(imageFileName)
self.figure = self.ax.get_figure()
self.dx = 0
self.dy = 0
def clear(self):
"""TODO Documentme."""
if self.imAxes in self.figure.ax:
self.figure.delax(self.imAxes)
def setPosition(self, posX, posY, ax=None):
"""TODO Documentme."""
if ax is not None:
self.ax = ax
self.dx = float(self.image.size[0]) / \
self.figure.get_dpi() / self.figure.get_size_inches()[0]
self.dy = float(self.image.size[0]) / \
self.figure.get_dpi() / self.figure.get_size_inches()[1]
xRange = self.ax.get_xlim()[1] - self.ax.get_xlim()[0]
yRange = self.ax.get_ylim()[1] - self.ax.get_ylim()[0]
x = (posX - self.ax.get_xlim()[0]) / xRange
y = (posY - self.ax.get_ylim()[0]) / yRange
x *= (self.ax.get_position().x1 - self.ax.get_position().x0)
y *= (self.ax.get_position().y1 - self.ax.get_position().y0)
# print self.imAxes
# print self.figure.ax
if self.imAxes not in self.figure.ax:
if (x + self.ax.get_position().x0) > 10:
print(("overlay size out of range",
(x + self.ax.get_position().x0)))
print((posX, posY))
print((xRange, yRange))
print((x, y))
print((self.ax.get_position().x0,
self.ax.get_position().x1))
print((self.figure.get_size_inches()))
print(("add ax",
[x + self.ax.get_position().x0 - self.dx / 6.0,
y + self.ax.get_position().y0, self.dx, self.dy]))
# hackish
return
self.imAxes = self.figure.add_ax([
x + self.ax.get_position().x0 - self.dx / 6.0,
y + self.ax.get_position().y0,
self.dx, self.dy], frameon=False, axisbg='y')
else:
self.imAxes.set_position([
x + self.ax.get_position().x0 - self.dx / 6.0,
y + self.ax.get_position().y0,
self.dx, self.dy])
if len(self.imAxes.get_xticks()) > 0:
print("overlay imshow")
self.imAxes.imshow(self.image, origin='lower')
self.imAxes.set_xticks([])
self.imAxes.set_yticks([])
class MapTilesCacheSingleton(object):
__instance = None
_tilesCache = dict()
def __new__(cls):
if MapTilesCacheSingleton.__instance is None:
MapTilesCacheSingleton.__instance = object.__new__(cls)
return MapTilesCacheSingleton.__instance
def add(self, key, tile):
self._tilesCache[key] = tile
def get(self, key):
if key in self._tilesCache:
return self._tilesCache[key]
return None
# We only want one instance of this global cache so its a singleton class
__MatTilesCache__ = MapTilesCacheSingleton()
def deg2MapTile(lon_deg, lat_deg, zoom):
"""TODO Documentme."""
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) +
(1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
#correct the latitude to go from 0 (north) to 180 (south),
#instead of 90(north) to -90(south)
latitude=90 - lat_deg;
#//correct the longitude to go from 0 to 360
longitude=180 + lon_deg;
#//find tile size from zoom level
latTileSize = 180/(pow(2,(17-zoom)))
longTileSize = 360/(pow(2,(17-zoom)))
#//find the tile coordinates
tilex=(int)(longitude/longTileSize)
tiley=(int)(latitude/latTileSize)
return (xtile, ytile)
def mapTile2deg(xtile, ytile, zoom):
"""Calculate the NW-corner of the square.
Use the function with xtile+1 and/or ytile+1 to get the other corners.
With xtile+0.5 ytile+0.5 it will return the center of the tile.
"""
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lon_deg, lat_deg)
def cacheFileName(fullname, vendor):
"""Createfilename and path to cache download data."""
(dirName, fileName) = os.path.split(fullname)
#os.path.joint(pg.getConfigPath(), fileName)
path = os.path.join(pg.getConfigPath(), vendor, dirName)
try:
os.makedirs(path)
except OSError:
pass
return os.path.join(path, fileName)
def getMapTile(xtile, ytile, zoom, vendor='OSM', verbose=False):
"""Get a map tile from public mapping server.
Its not recommended to use the google maps tile server without
the google maps api so if you use it to often your IP will be blacklisted
TODO: look here for more vendors https://github.com/Leaflet/Leaflet
TODO: Try http://scitools.org.uk/cartopy/docs/v0.14/index.html
TODO: Try https://github.com/jwass/mplleaflet
Parameters
----------
xtile : int
ytile : int
zoom : int
vendor : str
. 'OSM' or 'Open Street Map' (tile.openstreetmap.org)
. 'GM' or 'Google Maps' (mt.google.com) (do not use it to much)
verbose : bool [false]
be verbose
"""
imagename = str(zoom) + '/' + str(xtile) + '/' + str(ytile)
if vendor == 'OSM' or vendor == 'Open Street Map':
# http://[abc].tile.openstreetmap.org
serverName = 'tile.openstreetmap.org'
url = 'http://a.' + serverName + '/' + imagename + '.png'
imFormat = '.png'
elif vendor == 'GM' or vendor == 'Google Maps':
# its not recommended to use the google maps tile server without
# google maps api .. if you use it to often your IP will be blacklisted
# mt.google.com will balance itself
serverName = 'http://mt.google.com'
#nr = random.randint(1, 4)
#serverName = 'http://mt' + str(nr) + '.google.com'
# LAYERS:
#h = roads only
#m = standard roadmap
#p = terrain
#r = somehow altered roadmap
#s = satellite only
#t = terrain only
#y = hybrid
#,transit
url = serverName + '/vt/lyrs=m' + \
'&x=' + str(xtile) + '&y=' + str(ytile) + \
'&hl=en' + '&z=' + str(zoom)
imFormat = '.png'
else:
raise "Vendor: " + vendor + \
" not supported (currently only OSM (Open Street Map))"
filename = cacheFileName(imagename, serverName) + imFormat
image = __MatTilesCache__.get(filename)
if image is None:
if os.path.exists(filename):
if verbose:
print(("Read image from disk", filename))
image = mpimg.imread(filename)
image = image[:, :, 0:3]
else:
if verbose:
print(("Get map from url maps", url))
image = mpimg.imread(url)
if verbose:
print(imagename)
mpimg.imsave(filename, image)
if imFormat == '.jpeg':
image = image[::-1, ...] / 256.
__MatTilesCache__.add(filename, image)
else:
if verbose:
print(("Took image from cache", filename))
return image
# def getMapTile(...)
def underlayMap(ax, proj, vendor='OSM', zoom=-1, pixelLimit=None,
verbose=False, fitMap=False):
"""Get a map from public mapping server and underlay it on the given ax.
Parameters
----------
ax : matplotlib.ax
proj : pyproy
Proj Projection
vendor : str
. 'OSM' or 'Open Street Map' (tile.openstreetmap.org)
. 'GM' or 'Google Maps' (mt.google.com)
zoom : int [-1]
Zoom level. If zoom is set to -1, the pixel size of the resulting
image is lower than pixelLimit.
pixelLimit : [int,int]
verbose : bool [false]
be verbose
fitMap : bool
The ax is resized to fit the whole map.
"""
if pixelLimit is None:
pixelLimit = [1024, 1024]
origXLimits = ax.get_xlim()
origYLimits = ax.get_ylim()
ul = proj(ax.get_xlim()[0], ax.get_ylim()[1], inverse=True)
lr = proj(ax.get_xlim()[1], ax.get_ylim()[0], inverse=True)
if zoom == -1:
nXtiles = 1e99
nYtiles = 1e99
zoom = 19
while ((nYtiles * 256) > pixelLimit[0] or
(nXtiles * 256) > pixelLimit[1]):
zoom = zoom - 1
startTile = deg2MapTile(ul[0], ul[1], zoom)
endTile = deg2MapTile(lr[0], lr[1], zoom)
nXtiles = (endTile[0] - startTile[0]) + 1
nYtiles = (endTile[1] - startTile[1]) + 1
if verbose:
print(("tiles: ", zoom, nYtiles, nXtiles))
if nXtiles == 1 and nYtiles == 1:
break
if verbose:
print(("zoom set to ", zoom))
startTile = deg2MapTile(ul[0], ul[1], zoom)
endTile = deg2MapTile(lr[0], lr[1], zoom)
nXtiles = (endTile[0] - startTile[0]) + 1
nYtiles = (endTile[1] - startTile[1]) + 1
image = np.zeros(shape=(256 * nYtiles, 256 * nXtiles, 3))
if verbose:
print(("Mapimage size:", image.shape))
for i in range(nXtiles):
for j in range(nYtiles):
im = getMapTile(startTile[0] + i, startTile[1] + j,
zoom, vendor, verbose=verbose)
image[(j * 256): ((j + 1) * 256),
(i * 256): ((i + 1) * 256)] = im
lonLatStart = mapTile2deg(startTile[0], startTile[1], zoom)
lonLatEnd = mapTile2deg(endTile[0] + 1, endTile[1] + 1, zoom)
imUL = proj(lonLatStart[0], lonLatStart[1])
imLR = proj(lonLatEnd[0], lonLatEnd[1])
extent = np.asarray([imUL[0], imLR[0], imLR[1], imUL[1]])
print(extent)
gci = ax.imshow(image, extent=extent)
if not fitMap:
ax.set_xlim(origXLimits)
ax.set_ylim(origYLimits)
else:
ax.set_xlim(extent[0], extent[1])
ax.set_ylim(extent[2], extent[3])
return gci
def getBKGaddress(xlim, ylim, imsize=1000, zone=32, service='dop40',
usetls=False, epsg=0, uuid='', fmt='image/jpeg',
layer='rgb'):
"""Generate address for rendering web service image from BKG.
Assumes UTM in given zone.
"""
url = 'https://sg.geodatenzentrum.de/wms_' + service
if usetls:
url = 'https://sgtls12.geodatenzentrum.de/wms_' + service # new
stdarg = '&SERVICE=WMS&VERSION=1.1.0&LAYERS=' + layer
stdarg += '&STYLES=default&FORMAT=' + fmt
if epsg == 0:
epsg = 32600 + zone # WGS 84 / UTM zone 32N
# epsg = 25800 + zone # ETRS89 / UTM zone 32N
srsstr = 'SRS=EPSG:' + str(epsg) # EPSG definition of UTM
if imsize is None or imsize <= 1:
imsize = int((xlim[1] - xlim[0])/0.4) + 1 # take 40cm DOP resolution
print('choose image size ', imsize)
box = ','.join(str(int(v)) for v in [xlim[0], ylim[0], xlim[1], ylim[1]])
ysize = int((imsize - 1.) * (ylim[1] - ylim[0]) / (xlim[1] - xlim[0])) + 1
sizestr = 'WIDTH=' + str(imsize) + '&HEIGHT=' + '%d' % ysize
if uuid:
url += '__' + uuid
addr = url + '?REQUEST=GetMap' + stdarg + '&' + srsstr + \
'&' + 'BBOX=' + box + '&' + sizestr
return addr, box
def underlayBKGMap(ax, mode='DOP', utmzone=32, epsg=0, imsize=2500, uuid='',
usetls=False):
"""Underlay digital orthophoto or topographic (mode='DTK') map under axes.
First accessed, the image is obtained from BKG, saved and later loaded.
Parameters
----------
mode : str
'DOP' (digital orthophoto 40cm) or
'DTK' (digital topo map 1:25000)
imsize : int
image width in pixels (height will be automatically determined
"""
try:
import urllib.request as urllib2
except ImportError:
import urllib2
ext = {'DOP': '.jpg', 'DTK': '.png'} # extensions for different map types
wms = {'DOP': 'dop40', 'DTK': 'dtk25'} # wms service name for map types
fmt = {'DOP': 'image/jpeg', 'DTK': 'image/png'} # format
lay = {'DOP': 'rgb', 'DTK': '0'}
if imsize < 1: # 0, -1 or 0.4 could be reasonable parameters
ax = ax.get_xlim()
imsize = int((ax[1] - ax[0]) / 0.4) # use original 40cm pixel size
if imsize > 5000: # limit overly sized images
imsize = 2500 # default value
ad, box = getBKGaddress(ax.get_xlim(), ax.get_ylim(), imsize, zone=utmzone,
service=wms[mode.upper()], usetls=usetls,
uuid=uuid, epsg=epsg,
fmt=fmt[mode.upper()], layer=lay[mode.upper()])
imname = mode + box + ext[mode]
if not os.path.isfile(imname): # not already existing
print('Retrieving file from geodatenzentrum.de using URL: ' + ad)
req = urllib2.Request(ad)
response = urllib2.urlopen(req)
with open(imname, 'wb') as output:
output.write(response.read())
im = mpimg.imread(imname)
bb = [int(bi) for bi in box.split(',')] # bounding box
ax.imshow(im, extent=[bb[0], bb[2], bb[1], bb[3]],
interpolation='nearest')
| apache-2.0 |
open-craft/edx-analytics-pipeline | edx/analytics/tasks/tests/test_course_information.py | 1 | 9841 | """
Test the course structure processing task which extracts the information needed for the internal reporting course
warehouse table.
Testing strategy:
Empty course structure json (expect empty output)
Course structure json with one course listed which is missing some fields (expect Hive nulls for those fields)
Course structure json with one course listed which has all the fields needed
Course structure json with multiple courses listed
Course structure json with a malformed course (like a list) inside
Course structure json with a course with a unicode-containing name inside
"""
import tempfile
import os
import shutil
import datetime
import pandas
import json
from edx.analytics.tasks.load_internal_reporting_course import ProcessCourseStructureAPIData
from edx.analytics.tasks.tests import unittest
from edx.analytics.tasks.tests.target import FakeTarget
from mock import MagicMock
# pylint: disable-msg=anomalous-unicode-escape-in-string
class TestCourseInformation(unittest.TestCase):
"""Tests for the task parsing the course structure information for use in the internal reporting course table."""
TEST_DATE = '2015-08-25'
def setUp(self):
self.temp_rootdir = tempfile.mkdtemp()
self.input_dir = os.path.join(self.temp_rootdir, "input")
os.mkdir(self.input_dir)
self.input_file = os.path.join(self.input_dir, "courses_raw", "dt=" + self.TEST_DATE, "course_structure.json")
self.addCleanup(self.cleanup, self.temp_rootdir)
def cleanup(self, dirname):
"""Remove the temp directory only if it exists."""
if os.path.exists(dirname):
shutil.rmtree(dirname)
def run_task(self, source):
"""Helper utility for running task under test"""
self.input_file = "course_structure.json"
with open(self.input_file, 'w') as fle:
fle.write(source.encode('utf-8'))
fake_warehouse_path = self.input_dir
task = ProcessCourseStructureAPIData(warehouse_path=fake_warehouse_path, run_date=datetime.date(2015, 8, 25))
output_target = FakeTarget()
task.output = MagicMock(return_value=output_target)
class DummyInput(object):
"""A dummy input object to imitate the input to a luigi task."""
def __init__(self, filename):
self.filename = filename
def open(self, mode):
"""Opens the file this object is mocking a past task as having output."""
return open(self.filename, mode)
input_dummy = DummyInput(self.input_file)
task.input = MagicMock(return_value=input_dummy)
task.run()
results = pandas.read_table(output_target.buffer, sep='\t', header=None,
names=['course_id', 'course_org_id', 'course_number', 'course_run',
'course_start', 'course_end', 'course_name'])
return results
def check_structure_entry(self, data, row_num, expected):
"""
Checks if the entries in a row of the data are what we expected, and returns whether this is true.
Args:
data is a pandas data frame representing the output of a run of the task.
row_num is the row number to check, starting from 0.
expected is a dictionary of values we expect to see.
"""
self.assertGreater(data.shape[0], row_num)
row = data.iloc[row_num]
self.assertEqual(dict(row), expected)
def test_empty_structure(self):
"""With an empty structure information json, we expect no rows of data."""
data = self.run_task("{}")
self.assertEquals(data.shape[0], 0)
def test_course_missing_data(self):
"""With a course with some data missing, we expect a row with null values in some columns."""
course_with_missing_data = {"results": [{"id": "foo", "org": "bar"}]}
data = self.run_task(json.dumps(course_with_missing_data))
# We expect an entry in the list of courses, since there is a course in the list.
self.assertEquals(data.shape[0], 1)
# We expect nulls for the columns aside from course id and course org id.
expected = {'course_id': 'foo',
'course_org_id': 'bar',
'course_number': '\N',
'course_run': '\N',
'course_start': '\N',
'course_end': '\N',
'course_name': '\N'}
self.check_structure_entry(data, 0, expected)
def test_single_course(self):
"""With a course with one all the necessary information, we expect to see that course."""
input_data = {"results":
[{"id": "foo",
"name": "Foo",
"org": "bar",
"course": "Baz",
"run": "2T2015",
"start": "2015-08-24T00:00:00Z",
"end": "2016-08-25T00:00:00Z"
}
]
}
data = self.run_task(json.dumps(input_data))
# We expect to see this course with the mock structure information.
self.assertEquals(data.shape[0], 1)
expected = {'course_id': 'foo', 'course_name': 'Foo', 'course_org_id': 'bar', 'course_number': 'Baz',
'course_run': '2T2015', 'course_start': '2015-08-24T00:00:00+00:00',
'course_end': '2016-08-25T00:00:00+00:00'}
self.check_structure_entry(data, 0, expected)
def test_multiple_courses(self):
"""With two courses, we expect to see both of them."""
input_data = {"results":
[{"id": "foo",
"name": "Foo",
"org": "bar",
"course": "Baz",
"run": "2T2015",
"start": "2015-08-24T00:00:00Z",
"end": "2016-08-25T00:00:00Z"
},
{"id": "foo2",
"name": "Foo2",
"org": "bar2",
"course": "Baz",
"run": "2T2015",
"start": "2015-08-24T00:00:00Z"
}
]
}
data = self.run_task(json.dumps(input_data))
# We expect to see two courses.
self.assertEquals(data.shape[0], 2)
course1 = {'course_id': 'foo', 'course_name': 'Foo', 'course_org_id': 'bar', 'course_number': 'Baz',
'course_run': '2T2015', 'course_start': '2015-08-24T00:00:00+00:00', 'course_end':
'2016-08-25T00:00:00+00:00'}
course2 = {'course_id': 'foo2', 'course_name': 'Foo2', 'course_org_id': 'bar2', 'course_number': 'Baz',
'course_run': '2T2015', 'course_start': '2015-08-24T00:00:00+00:00',
'course_end': '\N'}
self.check_structure_entry(data, 0, course1)
self.check_structure_entry(data, 1, course2)
def test_malformed_course(self):
"""
If a single course in the API response is malformed, we want to skip over the malformed course without
throwing an error and load the rest of the courses.
"""
input_data = {"results":
[[],
{"id": "foo2",
"name": "Foo2",
"org": "bar2",
"course": "Baz",
"run": "2T2015",
"start": "2015-08-24T00:00:00Z"
}
]
}
data = self.run_task(json.dumps(input_data))
# We expect to see the second course, which is well-formed, but nothing from the first.
self.assertEquals(data.shape[0], 1)
expected = {'course_id': 'foo2', 'course_name': 'Foo2', 'course_org_id': 'bar2', 'course_number': 'Baz',
'course_run': '2T2015', 'course_start': '2015-08-24T00:00:00+00:00',
'course_end': '\N'}
self.check_structure_entry(data, 0, expected)
def test_unicode_course_name(self):
"""Unicode course names should be handled properly, so that they appear correctly in the database."""
input_data = {"results":
[{"id": "foo",
"name": u"Fo\u263a",
"org": "bar",
"course": "Baz",
"run": "2T2015",
"start": "2015-08-24T00:00:00Z",
"end": "2016-08-25T00:00:00Z"
}
]
}
data = self.run_task(json.dumps(input_data))
# We expect to see this course with the mock structure information.
# NB: if the test fails, you may get an error from nose about "'ascii' codec can't decode byte 0xe2";
# this arises from the fact that nose is trying to print the difference between the result data and expected
# but can't handle the unicode strings. This "error" really indicates a test failure.
self.assertEquals(data.shape[0], 1)
expected = {'course_id': 'foo', 'course_name': 'Fo\xe2\x98\xba', 'course_org_id': 'bar', 'course_number': 'Baz',
'course_run': '2T2015', 'course_start': '2015-08-24T00:00:00+00:00', 'course_end':
'2016-08-25T00:00:00+00:00'}
self.check_structure_entry(data, 0, expected)
| agpl-3.0 |
blublud/networkx | networkx/drawing/nx_pylab.py | 9 | 30251 | """
**********
Matplotlib
**********
Draw networks with matplotlib.
See Also
--------
matplotlib: http://matplotlib.org/
pygraphviz: http://pygraphviz.github.io/
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
__author__ = """Aric Hagberg ([email protected])"""
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell',
'draw_graphviz']
def draw(G, pos=None, ax=None, hold=None, **kwds):
"""Draw the graph G with Matplotlib.
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
hold : bool, optional
Set the Matplotlib hold state. If True subsequent draw
commands will be added to the current axes.
**kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
With pyplot use
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
cf = plt.gcf()
else:
cf = ax.get_figure()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax = cf.add_axes((0, 0, 1, 1))
else:
ax = cf.gca()
if 'with_labels' not in kwds:
kwds['with_labels'] = 'labels' in kwds
b = plt.ishold()
# allow callers to override the hold state by passing hold=True|False
h = kwds.pop('hold', None)
if h is not None:
plt.hold(h)
try:
draw_networkx(G, pos=pos, ax=ax, **kwds)
ax.set_axis_off()
plt.draw_if_interactive()
except:
plt.hold(b)
raise
plt.hold(b)
return
def draw_networkx(G, pos=None, arrows=True, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node and edge transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (default='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import matplotlib.pyplot as plt
>>> limits=plt.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos = nx.drawing.spring_layout(G) # default to spring layout
node_collection = draw_networkx_nodes(G, pos, **kwds)
edge_collection = draw_networkx_edges(G, pos, arrows=arrows, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
plt.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label=None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Returns
-------
matplotlib.collections.PathCollection
`PathCollection` of the nodes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if nodelist is None:
nodelist = list(G)
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
try:
xy = numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
node_collection = ax.scatter(xy[:, 0], xy[:, 1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=1.0,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float, or array of floats
Line width of edges (default=1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Returns
-------
matplotlib.collection.LineCollection
`LineCollection` of the edges
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter, Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edgelist is None:
edgelist = list(G.edges())
if not edgelist or len(edgelist) == 0: # no edges!
return None
# set edge positions
edge_pos = numpy.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color) == 1:
edge_colors = (colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
arrow_collection = None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos = []
p = 1.0-0.25 # make head segment 25 percent of edge length
for src, dst in edge_pos:
x1, y1 = src
x2, y2 = dst
dx = x2-x1 # x offset
dy = y2-y1 # y offset
d = numpy.sqrt(float(dx**2 + dy**2)) # length of edge
if d == 0: # source and target at same position
continue
if dx == 0: # vertical edge
xa = x2
ya = dy*p+y1
if dy == 0: # horizontal edge
ya = y2
xa = dx*p+x1
else:
theta = numpy.arctan2(dy, dx)
xa = p*d*numpy.cos(theta)+x1
ya = p*d*numpy.sin(theta)+y1
a_pos.append(((xa, ya), (x2, y2)))
arrow_collection = LineCollection(a_pos,
colors=arrow_colors,
linewidths=[4*ww for ww in lw],
antialiaseds=(1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
arrow_collection.set_label(label)
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:, :, 0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:, :, 0]))
miny = numpy.amin(numpy.ravel(edge_pos[:, :, 1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:, :, 1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim(corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Returns
-------
dict
`dict` of labels keyed on the nodes
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if labels is None:
labels = dict((n, n) for n in G.nodes())
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = pos[n]
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True,
)
text_items[n] = t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Returns
-------
dict
`dict` of labels keyed on the edges
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edge_labels is None:
labels = dict(((u, v), d) for u, v, d in G.edges(data=True))
else:
labels = edge_labels
text_items = {}
for (n1, n2), label in labels.items():
(x1, y1) = pos[n1]
(x2, y2) = pos[n2]
(x, y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle = numpy.arctan2(y2-y1, x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle -= 180
if angle < - 90:
angle += 180
# transform data coordinate angle to screen coordinate angle
xy = numpy.array((x, y))
trans_angle = ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1, 2)))[0]
else:
trans_angle = 0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform=ax.transData,
bbox=bbox,
zorder=1,
clip_on=True,
)
text_items[(n1, n2)] = t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, circular_layout(G), **kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, random_layout(G), **kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spectral_layout(G), **kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spring_layout(G), **kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
nlist = kwargs.get('nlist', None)
if nlist is not None:
del(kwargs['nlist'])
draw(G, shell_layout(G, nlist=nlist), **kwargs)
def draw_graphviz(G, prog="neato", **kwargs):
"""Draw networkx graph with graphviz layout.
Parameters
----------
G : graph
A networkx graph
prog : string, optional
Name of Graphviz layout program
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
"""
pos = nx.drawing.graphviz_layout(G, prog)
draw(G, pos, **kwargs)
def draw_nx(G, pos, **kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G, pos, **kwds)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS', warn=False)
import matplotlib.pyplot as plt
except:
raise SkipTest("matplotlib not available")
| bsd-3-clause |
rvraghav93/scikit-learn | sklearn/mixture/dpgmm.py | 7 | 35901 | """Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos ([email protected])
# Bertrand Thirion <[email protected]>
#
# Based on mixture.py by:
# Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
# - 'sklearn/mixture/test_gmm.py'
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.linalg import pinvh
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state, check_array, deprecated
from ..utils.fixes import logsumexp
from ..utils.extmath import squared_norm, stable_cumsum
from ..utils.validation import check_is_fitted
from .. import cluster
from .gmm import _GMMBase
@deprecated("The function digamma is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.digamma instead.")
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
@deprecated("The function gammaln is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.gammaln instead.")
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
@deprecated("The function log_normalize is deprecated in 0.18 and "
"will be removed in 0.20.")
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
@deprecated("The function wishart_log_det is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
@deprecated("The function wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approximation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class _DPGMMBase(_GMMBase):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <dpgmm>`.
Parameters
----------
n_components : int, default 1
Number of mixture components.
covariance_type : string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha : float, default 1
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
super(_DPGMMBase, self).__init__(n_components, covariance_type,
random_state=random_state,
tol=tol, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params,
verbose=verbose)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = stable_cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
check_is_fitted(self, 'means_')
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def _fit(self, X, y=None):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.random_state_ = check_random_state(self.random_state)
# initialization step
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
z = np.ones((n_samples, self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state_).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + n_samples)
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
curr_logprob, z = self.score_samples(X)
current_log_likelihood = (
curr_logprob.mean() + self._logprior(z) / n_samples)
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < self.tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
if self.n_iter == 0:
# Need to make sure that there is a z value to output
# Output zeros because it was just a quick initialization
z = np.zeros((X.shape[0], self.n_components))
self._set_weights()
return z
@deprecated("The `DPGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be "
"removed in 0.20.")
class DPGMM(_DPGMMBase):
"""Dirichlet Process Gaussian Mixture Models
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with
parameter ``weight_concentration_prior_type='dirichlet_process'``
instead.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
super(DPGMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
alpha=alpha, random_state=random_state, tol=tol, verbose=verbose,
min_covar=min_covar, n_iter=n_iter, params=params,
init_params=init_params)
@deprecated("The `VBGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. "
"VBGMM is deprecated in 0.18 and will be removed in 0.20.")
class VBGMM(_DPGMMBase):
"""Variational Inference for the Gaussian Mixture Model
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.BayesianGaussianMixture` with parameter
``weight_concentration_prior_type='dirichlet_distribution'`` instead.
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <vbgmm>`.
Parameters
----------
n_components : int, default 1
Number of mixture components.
covariance_type : string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha : float, default 1
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Infinite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
tol=tol, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = alpha
def _fit(self, X, y=None):
"""Estimate model parameters with the variational algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the EM
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you just would like to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.alpha_ = float(self.alpha) / self.n_components
return super(VBGMM, self)._fit(X, y)
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha_ + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha_ * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha_)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha_))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha_)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
| bsd-3-clause |
peter-kiechle/tactile-sensors | python/slip-detection/slip-detection_static.py | 1 | 14075 | # -*- coding: utf-8 -*-
##########################################
# Load configuration file (before pyplot)
##########################################
import os, sys
config_path = os.path.abspath('../matplotlib/')
sys.path.append(config_path)
import configuration as config
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.ticker import MaxNLocator
# Custom libraries
print("CWD: " + os.getcwd() )
lib_path = os.path.abspath('../../lib')
sys.path.append(lib_path)
import framemanager_python
import module_image_moments as IM
import module_normalized_cross_correlation as NCC
# Force reloading of external library (convenient during active development)
reload(IM)
reload(NCC)
reload(framemanager_python)
def loadFrame(frameManager, frameID, matrixID):
tsframe = np.copy( frameManager.get_tsframe(frameID, matrixID) );
# Normalize frame
#tsframe_uint8 = np.uint8(tsframe / (4096.0/255.0)) # scale to [0..255] and convert to uint8
# tsframe /= 4096.0 # scale to [0..1]
return tsframe
############
# Settings
############
matrixID = 1
startID = 13 # slip_and_rotation_teapot_handle
stopID = 93 # slip_and_rotation_teapot_handle
#startID = 22 # slip_examples_pen_and_wooden_block_000093-000189.dsa
#stopID = 80 # slip_examples_pen_and_wooden_block_000093-000189.dsa
thresh_active_cells_translation = 1; # Minimum amount of active cells for slip vector
thresh_active_cells_rotation = 5; # Minimum amount of active cells for slip angle
thresh_eccentricity = 0.6; # Principal axis lengths (disc or square: 0.0, elongated rectangle: ->1.0)
thresh_compactness = 0.9; # How much the object resembles a disc (perfect circle 1.0)
########################
# Load pressure profile
########################
profileName = os.path.abspath("slip_and_rotation_teapot_handle.dsa")
#profileName = os.path.abspath("slip_examples_pen_and_wooden_block_000093-000189.dsa")
frameManager = framemanager_python.FrameManagerWrapper()
frameManager.load_profile(profileName);
numFrames = frameManager.get_tsframe_count();
# Relative timestamps in seconds
timestamps = frameManager.get_tsframe_timestamp_list()[startID:stopID]
timestamps = (timestamps-timestamps[0]) / 1000.0
# Get initial frame (Assumtion: valid frame)
frame0 = loadFrame(frameManager, startID, matrixID)
active_cells0 = frameManager.get_num_active_cells_matrix(startID, matrixID)
# Compute orientation of initial frame
(centroid_x, centroid_y, angle, Cov, lambda1, lambda2,
std_dev_x, std_dev_y, skew_x, skew_y,
compactness1, compactness2, eccentricity1, eccentricity2) = IM.compute_orientation_and_shape_features(frame0)
reference_angle = angle # [0, 180)
previous_angle = angle # [0, 360)
slip_angle = 0 # (-∞, ∞)
n = 0 # Rotation carry
# Records
slipvectors = np.zeros([1,2])
slipvectors_ncc_1 = np.zeros([1,2])
slipvectors_ncc_2 = np.zeros([1,2])
slipvectors_pc = np.zeros([1,2])
slipangles = np.zeros([1,1])
slipvectors_delta = np.zeros([1,2])
slipvectors_ncc_1_delta = np.zeros([1,2])
slipvectors_ncc_2_delta = np.zeros([1,2])
slipvectors_pc_delta = np.zeros([1,2])
slipangles_delta = np.zeros([1,1])
centroids = np.array([centroid_x, centroid_y])
for frameID in xrange(startID+1, stopID):
# Get current frame
frame1 = loadFrame(frameManager, frameID, matrixID)
active_cells1 = frameManager.get_num_active_cells_matrix(frameID, matrixID)
# Compute slip vector
if (active_cells0 > thresh_active_cells_translation and active_cells1 > thresh_active_cells_translation):
slipvector = NCC.normalized_cross_correlation(frame0, frame1)
slipvectors_delta = np.vstack((slipvectors_delta, slipvector))
slipvectors = np.vstack((slipvectors, slipvectors[-1]+slipvector))
slipvector_ncc_1 = NCC.normalized_cross_correlation2(frame0, frame1)
slipvectors_ncc_1_delta = np.vstack((slipvectors_ncc_1_delta, slipvector_ncc_1))
slipvectors_ncc_1 = np.vstack((slipvectors_ncc_1, slipvectors_ncc_1[-1]+slipvector_ncc_1))
slipvector_ncc_2 = NCC.normalized_cross_correlation3(frame0, frame1)
slipvectors_ncc_2_delta = np.vstack((slipvectors_ncc_2_delta, slipvector_ncc_2))
slipvectors_ncc_2 = np.vstack((slipvectors_ncc_2, slipvectors_ncc_2[-1]+slipvector_ncc_2))
slipvector_pc = NCC.normalized_cross_correlation4(frame0, frame1)
slipvectors_pc_delta = np.vstack((slipvectors_pc_delta, slipvector_pc))
slipvectors_pc = np.vstack((slipvectors_pc, slipvectors_pc[-1]+slipvector_pc))
frame0 = frame1
active_cells0 = active_cells1
else:
slipvectors_delta = np.vstack((slipvectors_delta, np.zeros(2)))
slipvectors = np.vstack((slipvectors, slipvectors[-1]))
slipvectors_ncc_1_delta = np.vstack((slipvectors_ncc_1_delta, np.zeros(2)))
slipvectors_ncc_1 = np.vstack((slipvectors_ncc_1, slipvectors_ncc_1[-1]))
slipvectors_ncc_2_delta = np.vstack((slipvectors_ncc_2_delta, np.zeros(2)))
slipvectors_ncc_2 = np.vstack((slipvectors_ncc_2, slipvectors_ncc_2[-1]))
slipvectors_pc_delta = np.vstack((slipvectors_pc_delta, np.zeros(2)))
slipvectors_pc = np.vstack((slipvectors_pc, slipvectors_pc[-1]))
# Compute shape features and orientation
if active_cells1 > thresh_active_cells_translation:
(centroid_x, centroid_y, angle, Cov, lambda1, lambda2,
std_dev_x, std_dev_y, skew_x, skew_y,
compactness1, compactness2, eccentricity1, eccentricity2) = IM.compute_orientation_and_shape_features(frame1)
# Record center of mass movement for comparison with normalized cross correlation
centroids = np.vstack((centroids, [centroid_x, centroid_y]))
# Compute slip angle
if active_cells1 > thresh_active_cells_rotation:
# Track slip angle
if IM.valid_frame(compactness2, eccentricity2, thresh_compactness, thresh_eccentricity):
current_angle, slip_angle, slip_angle_reference, n = IM.track_angle(reference_angle, previous_angle, angle, n)
previous_angle = current_angle
slipangles_delta = np.vstack((slipangles_delta, slip_angle))
slipangles = np.vstack((slipangles, slipangles[-1] + slip_angle))
else:
slipangles_delta = np.vstack((slipangles_delta, np.zeros(1)))
slipangles = np.vstack((slipangles, slipangles[-1]))
slipvectors *= 3.4 # convert from cells to millimeter
slipvectors_ncc_1 *= 3.4
slipvectors_ncc_2 *= 3.4
slipvectors_pc *= 3.4
'''
# Center of mass based slip
centroids_diff = np.diff(centroids, axis=0)
centroids_cumsum = np.cumsum(centroids_diff, axis=0)
slipvector_ncc_2s = np.vstack(([0.0, 0.0], centroids_cumsum))
slipvector_diff = np.abs(slipvectors) - np.abs(slipvector_ncc_2s)
'''
############
# Plotting
############
#------------------------------------------------------------------------
# All in one
#------------------------------------------------------------------------
text_width = 6.30045 # LaTeX text width in inches
golden_ratio = (1 + np.sqrt(5) ) / 2.0
size_factor = 0.45
figure_width = size_factor*text_width
#figure_height = (figure_width / golden_ratio)
figure_height = 2.2 * figure_width
figure_size = [figure_width, figure_height]
config.load_config_medium()
fig = plt.figure(figsize=figure_size)
ax1 = plt.subplot(3,1,1)
x = timestamps[0:stopID-startID]
#y = slipvectors_delta[:,0]
ax1.plot(x, slipvectors[:,0], label="Alcazar", ls="-", lw=1.5, color=config.UIBK_blue, alpha=0.75)
ax1.plot(x, slipvectors_ncc_1[:,0], label="NCC 1", ls="-", lw=1.5, color=config.UIBK_orange, alpha=1.0)
ax1.plot(x, slipvectors_ncc_2[:,0], label="NCC 2", ls="-", lw=1.5, dashes=[3,1], color=config.UIBK_orange, alpha=1.0)
ax1.plot(x, slipvectors_pc[:,0], label="PC", ls="-", lw=1.5, color=[0.0, 0.0, 0.0], alpha=1.0)
ax1.set_ylabel(r"$\Delta x$ [mm]", rotation=90)
ax1.yaxis.set_major_locator(MaxNLocator(integer=True)) # Restriction to integer
ax1.legend(fontsize=8, loc='upper left', fancybox=True, shadow=False, framealpha=1.0)
#ax1.grid('on')
#ax2 = plt.subplot(3,1,2, sharex=ax1, sharey=ax1)
ax2 = plt.subplot(3,1,2)
x = timestamps[0:stopID-startID]
#y = slipvectors_delta[:,1]
ax2.plot(x, slipvectors[:,1], label="Alcazar", ls="-", lw=1.5, color=config.UIBK_blue, alpha=0.75)
ax2.plot(x, slipvectors_ncc_1[:,1], label="NCC 1", ls="-", lw=1.5, color=config.UIBK_orange, alpha=1.0)
ax2.plot(x, slipvectors_ncc_2[:,1], label="NCC 2", ls="-", dashes=[3,1], lw=1.5, color=config.UIBK_orange, alpha=1.0)
ax2.plot(x, slipvectors_pc[:,1], label="PC", ls="-", lw=1.5, color=[0.0, 0.0, 0.0], alpha=1.0)
ax2.set_ylabel(r"$\Delta y$ [mm]", rotation=90)
ax2.yaxis.set_major_locator(MaxNLocator(integer=True)) # Restriction to integer
ax2.legend(fontsize=8, loc='upper left', fancybox=True, shadow=False, framealpha=1.0)
#ax2.grid('on')
#ax2.set_ylim([0, 22])
ax3 = plt.subplot(3,1,3, sharex=ax1)
x = timestamps[0:stopID-startID]
#y = slipangles_delta
y = slipangles
ax3.plot(x, y, label=r"$\theta$", lw=1.5, color=config.UIBK_orange, alpha=1.0)
ax3.set_ylabel("Rotation", rotation=90)
ax3.yaxis.set_major_formatter(ticker.FormatStrFormatter(r"%.1f$^\circ$"))
#ax3.grid('on')
x_poi = x[74]
y_poi = y[74]
ax3.annotate(r"Invalid shape", size=8,
xy=(x_poi, y_poi), xycoords='data',
xytext=(0, 43), textcoords='offset points', ha="right", va="center",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3", fc="black"),
)
ax3.set_xlabel('Time [s]')
#plt.subplots_adjust(top=0.98, bottom=0.08, left=0.08, right=0.98, wspace=0.0, hspace=0.2)
fig.tight_layout()
plotname = "all_in_one_ncc"
fig.savefig(plotname+".pdf", pad_inches=0, dpi=fig.dpi) # pdf
fig.savefig(plotname+".pgf", pad_inches=0, dpi=fig.dpi) # pgf
#------------------------------------------------------------------------
# Slip vector
#------------------------------------------------------------------------
config.load_config_medium()
# Slip vector
text_width = 6.30045 # LaTeX text width in inches
figure_width = 0.45 * text_width
figure_height = figure_width
figure_size = [figure_width, figure_height]
fig, ax = plt.subplots(figsize=figure_size)
ax.plot(slipvectors[:,0], slipvectors[:,1], label="Alcazar", linewidth=1.5, color=config.UIBK_blue, alpha=0.75)
#ax.plot(slipvectors_ncc[:,0], slipvectors_ncc[:,1], label="NCC", ls="-", dashes=[2,1], lw=1.0, color=config.UIBK_blue, alpha=1.0)
ax.plot(slipvectors_ncc_1[:,0], slipvectors_ncc_1[:,1], label="NCC 1", ls="-", lw=1.5, color=config.UIBK_orange, alpha=1.0)
ax.plot(slipvectors_ncc_2[:,0], slipvectors_ncc_2[:,1], label="NCC 2", ls="-", dashes=[3,1], lw=1.5, color=config.UIBK_orange, alpha=1.0)
ax.plot(slipvectors_pc[:,0], slipvectors_pc[:,1], label="PC", ls="-", lw=1.5, color=[0.0, 0.0, 0.0], alpha=1.0, zorder=0)
ax.axis('equal')
ax.set_xlabel(r"$\Delta x$ [mm]")
ax.set_ylabel(r"$\Delta y$ [mm]", rotation=90)
ax.xaxis.set_major_locator(MaxNLocator(integer=True)) # Restriction to integer
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.legend(fontsize=8, loc='lower right', fancybox=True, shadow=False, framealpha=1.0)
#ax.yaxis.labelpad = 10
#ax.grid('on')
#fig.tight_layout()
#ax.set_title("Slip trajectory")
#plt.show()
plt.subplots_adjust(top=0.85, left = 0.15, bottom=0.15, right = 0.85) # Legend on top
plotname = "slip_trajectory_ncc"
fig.savefig(plotname+".pdf", pad_inches=0, dpi=fig.dpi) # pdf
fig.savefig(plotname+".pgf", pad_inches=0, dpi=fig.dpi) # pgf
#------------------------------------------------------------------------
# Rotation
#------------------------------------------------------------------------
# Thanks to Joe Kington
# http://stackoverflow.com/questions/20222436/python-matplotlib-how-to-insert-more-space-between-the-axis-and-the-tick-labe
def realign_polar_xticks(ax):
for theta, label in zip(ax.get_xticks(), ax.get_xticklabels()):
theta = theta * ax.get_theta_direction() + ax.get_theta_offset()
theta = np.pi/2 - theta
y, x = np.cos(theta), np.sin(theta)
if x >= 0.1:
label.set_horizontalalignment('left')
if x <= -0.1:
label.set_horizontalalignment('right')
if y >= 0.5:
label.set_verticalalignment('bottom')
if y <= -0.5:
label.set_verticalalignment('top')
r = np.sqrt(slipvectors[:,0]**2 + slipvectors[:,1]**2)
r_ncc_1 = np.sqrt(slipvectors_ncc_1[:,0]**2 + slipvectors_ncc_1[:,1]**2)
r_ncc_2 = np.sqrt(slipvectors_ncc_2[:,0]**2 + slipvectors_ncc_2[:,1]**2)
r_pc = np.sqrt(slipvectors_pc[:,0]**2 + slipvectors_pc[:,1]**2)
theta = np.deg2rad(slipangles)
d = r.max() / 3.4 # max distance
fig, ax = plt.subplots(figsize=figure_size)
ax = plt.subplot(111, polar=True)
ax.plot(theta, r, label="Alcazar", lw=1.5, color=config.UIBK_blue, alpha=0.75)
ax.plot(theta, r_ncc_1, label="NCC 1", ls="-", lw=1.5, color=config.UIBK_orange, alpha=1.0)
ax.plot(theta, r_ncc_2, label="NCC 2", ls="-", dashes=[3,1], lw=1.5, color=config.UIBK_orange, alpha=1.0)
ax.plot(theta, r_pc, label="PC", ls="-", lw=1.5, color=[0.0, 0.0, 0.0], alpha=1.0)
ax.set_rmax(d)
# tick labels (Workaround for pgf degree symbol)
xtick_labels=[r"0$^\circ$", r"45$^\circ$",
r"90$^\circ$", r"135$^\circ$",
r"180$^\circ$", r"225$^\circ$",
r"270$^\circ$", r"315$^\circ$"]
ax.set_xticklabels(xtick_labels)
# tick locations
thetaticks = np.arange(0,360,45)
ax.set_thetagrids(thetaticks, frac=1.1) # additional distance
realign_polar_xticks(ax)
#ax.grid(True)
#ax.set_title("Rotation")
fig.tight_layout()
plt.rgrids(np.arange(1.0, d+1, 1), angle=90);
ax.set_yticklabels( [("%.1f mm" % i) for i in 3.4*np.arange(1,d)], fontsize=6)
ax.legend(fontsize=8, bbox_to_anchor=[0.25, 0.5], loc='center', fancybox=True, shadow=False, framealpha=1.0)
plotname = "rotation_trajectory_ncc"
fig.savefig(plotname+".pdf", pad_inches=0, dpi=fig.dpi) # pdf
fig.savefig(plotname+".pgf", pad_inches=0, dpi=fig.dpi) # pgf
plt.close()
| gpl-3.0 |
mxjl620/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 140 | 6926 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
logpai/logparser | logparser/LenMa/LenMa.py | 1 | 4019 | """
Description: This file implements the Lenma algorithm for log parsing
Author: LogPAI team
License: MIT
"""
from templateminer import lenma_template
import pandas as pd
import re
import os
import hashlib
from collections import defaultdict
from datetime import datetime
class LogParser(object):
def __init__(self, indir, outdir, log_format, threshold=0.9, predefined_templates=None, rex=[]):
self.path = indir
self.savePath = outdir
self.logformat = log_format
self.rex = rex
self.wordseqs = []
self.df_log = pd.DataFrame()
self.wordpos_count = defaultdict(int)
self.templ_mgr = lenma_template.LenmaTemplateManager(threshold=threshold, predefined_templates=predefined_templates)
self.logname = None
def parse(self, logname):
print('Parsing file: ' + os.path.join(self.path, logname))
self.logname = logname
starttime = datetime.now()
headers, regex = self.generate_logformat_regex(self.logformat)
self.df_log = self.log_to_dataframe(os.path.join(self.path, self.logname), regex, headers, self.logformat)
for idx, line in self.df_log.iterrows():
line = line['Content']
if self.rex:
for currentRex in self.rex:
line = re.sub(currentRex, '<*>', line)
words = line.split()
self.templ_mgr.infer_template(words, idx)
self.dump_results()
print('Parsing done. [Time taken: {!s}]'.format(datetime.now() - starttime))
def dump_results(self):
if not os.path.isdir(self.savePath):
os.makedirs(self.savePath)
df_event = []
templates = [0] * self.df_log.shape[0]
template_ids = [0] * self.df_log.shape[0]
for t in self.templ_mgr.templates:
template = ' '.join(t.words)
eventid = hashlib.md5(' '.join(template).encode('utf-8')).hexdigest()[0:8]
logids = t.get_logids()
for logid in logids:
templates[logid] = template
template_ids[logid] = eventid
df_event.append([eventid, template, len(logids)])
self.df_log['EventId'] = template_ids
self.df_log['EventTemplate'] = templates
pd.DataFrame(df_event, columns=['EventId', 'EventTemplate', 'Occurrences']).to_csv(os.path.join(self.savePath, self.logname + '_templates.csv'), index=False)
self.df_log.to_csv(os.path.join(self.savePath, self.logname + '_structured.csv'), index=False)
def log_to_dataframe(self, log_file, regex, headers, logformat):
''' Function to transform log file to dataframe '''
log_messages = []
linecount = 0
with open(log_file, 'r') as fin:
for line in fin.readlines():
try:
match = regex.search(line.strip())
message = [match.group(header) for header in headers]
log_messages.append(message)
linecount += 1
except Exception as e:
pass
logdf = pd.DataFrame(log_messages, columns=headers)
logdf.insert(0, 'LineId', None)
logdf['LineId'] = [i + 1 for i in range(linecount)]
return logdf
def generate_logformat_regex(self, logformat):
'''
Function to generate regular expression to split log messages
'''
headers = []
splitters = re.split(r'(<[^<>]+>)', logformat)
regex = ''
for k in range(len(splitters)):
if k % 2 == 0:
splitter = re.sub(' +', '\s+', splitters[k])
regex += splitter
else:
header = splitters[k].strip('<').strip('>')
regex += '(?P<%s>.*?)' % header
headers.append(header)
regex = re.compile('^' + regex + '$')
return headers, regex
| mit |
printedheart/h2o-3 | py2/h2o_gbm.py | 30 | 16328 |
import re, random, math
import h2o_args
import h2o_nodes
import h2o_cmd
from h2o_test import verboseprint, dump_json, check_sandbox_for_errors
def plotLists(xList, xLabel=None, eListTitle=None, eList=None, eLabel=None, fListTitle=None, fList=None, fLabel=None, server=False):
if h2o_args.python_username!='kevin':
return
# Force matplotlib to not use any Xwindows backend.
if server:
import matplotlib
matplotlib.use('Agg')
import pylab as plt
print "xList", xList
print "eList", eList
print "fList", fList
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 26}
### plt.rc('font', **font)
plt.rcdefaults()
if eList:
if eListTitle:
plt.title(eListTitle)
plt.figure()
plt.plot (xList, eList)
plt.xlabel(xLabel)
plt.ylabel(eLabel)
plt.draw()
plt.savefig('eplot.jpg',format='jpg')
# Image.open('testplot.jpg').save('eplot.jpg','JPEG')
if fList:
if fListTitle:
plt.title(fListTitle)
plt.figure()
plt.plot (xList, fList)
plt.xlabel(xLabel)
plt.ylabel(fLabel)
plt.draw()
plt.savefig('fplot.jpg',format='jpg')
# Image.open('fplot.jpg').save('fplot.jpg','JPEG')
if eList or fList:
plt.show()
# pretty print a cm that the C
def pp_cm(jcm, header=None):
# header = jcm['header']
# hack col index header for now..where do we get it?
header = ['"%s"'%i for i in range(len(jcm[0]))]
# cm = ' '.join(header)
cm = '{0:<8}'.format('')
for h in header:
cm = '{0}|{1:<8}'.format(cm, h)
cm = '{0}|{1:<8}'.format(cm, 'error')
c = 0
for line in jcm:
lineSum = sum(line)
if c < 0 or c >= len(line):
raise Exception("Error in h2o_gbm.pp_cm. c: %s line: %s len(line): %s jcm: %s" % (c, line, len(line), dump_json(jcm)))
print "c:", c, "line:", line
errorSum = lineSum - line[c]
if (lineSum>0):
err = float(errorSum) / lineSum
else:
err = 0.0
fl = '{0:<8}'.format(header[c])
for num in line: fl = '{0}|{1:<8}'.format(fl, num)
fl = '{0}|{1:<8.2f}'.format(fl, err)
cm = "{0}\n{1}".format(cm, fl)
c += 1
return cm
def pp_cm_summary(cm):
# hack cut and past for now (should be in h2o_gbm.py?
scoresList = cm
totalScores = 0
totalRight = 0
# individual scores can be all 0 if nothing for that output class
# due to sampling
classErrorPctList = []
predictedClassDict = {} # may be missing some? so need a dict?
for classIndex,s in enumerate(scoresList):
classSum = sum(s)
if classSum == 0 :
# why would the number of scores for a class be 0?
# in any case, tolerate. (it shows up in test.py on poker100)
print "classIndex:", classIndex, "classSum", classSum, "<- why 0?"
else:
if classIndex >= len(s):
print "Why is classindex:", classIndex, 'for s:"', s
else:
# H2O should really give me this since it's in the browser, but it doesn't
classRightPct = ((s[classIndex] + 0.0)/classSum) * 100
totalRight += s[classIndex]
classErrorPct = 100 - classRightPct
classErrorPctList.append(classErrorPct)
### print "s:", s, "classIndex:", classIndex
print "class:", classIndex, "classSum", classSum, "classErrorPct:", "%4.2f" % classErrorPct
# gather info for prediction summary
for pIndex,p in enumerate(s):
if pIndex not in predictedClassDict:
predictedClassDict[pIndex] = p
else:
predictedClassDict[pIndex] += p
totalScores += classSum
print "Predicted summary:"
# FIX! Not sure why we weren't working with a list..hack with dict for now
for predictedClass,p in predictedClassDict.items():
print str(predictedClass)+":", p
# this should equal the num rows in the dataset if full scoring? (minus any NAs)
print "totalScores:", totalScores
print "totalRight:", totalRight
if totalScores != 0: pctRight = 100.0 * totalRight/totalScores
else: pctRight = 0.0
print "pctRight:", "%5.2f" % pctRight
pctWrong = 100 - pctRight
print "pctWrong:", "%5.2f" % pctWrong
return pctWrong
# I just copied and changed GBM to GBM. Have to update to match GBM params and responses
def pickRandGbmParams(paramDict, params):
colX = 0
randomGroupSize = random.randint(1,len(paramDict))
for i in range(randomGroupSize):
randomKey = random.choice(paramDict.keys())
randomV = paramDict[randomKey]
randomValue = random.choice(randomV)
params[randomKey] = randomValue
# compare this glm to last one. since the files are concatenations,
# the results should be similar? 10% of first is allowed delta
def compareToFirstGbm(self, key, glm, firstglm):
# if isinstance(firstglm[key], list):
# in case it's not a list allready (err is a list)
verboseprint("compareToFirstGbm key:", key)
verboseprint("compareToFirstGbm glm[key]:", glm[key])
# key could be a list or not. if a list, don't want to create list of that list
# so use extend on an empty list. covers all cases?
if type(glm[key]) is list:
kList = glm[key]
firstkList = firstglm[key]
elif type(glm[key]) is dict:
raise Exception("compareToFirstGLm: Not expecting dict for " + key)
else:
kList = [glm[key]]
firstkList = [firstglm[key]]
for k, firstk in zip(kList, firstkList):
# delta must be a positive number ?
delta = .1 * abs(float(firstk))
msg = "Too large a delta (" + str(delta) + ") comparing current and first for: " + key
self.assertAlmostEqual(float(k), float(firstk), delta=delta, msg=msg)
self.assertGreaterEqual(abs(float(k)), 0.0, str(k) + " abs not >= 0.0 in current")
def goodXFromColumnInfo(y,
num_cols=None, missingValuesDict=None, constantValuesDict=None, enumSizeDict=None,
colTypeDict=None, colNameDict=None, keepPattern=None, key=None,
timeoutSecs=120, forRF=False, noPrint=False):
y = str(y)
# if we pass a key, means we want to get the info ourselves here
if key is not None:
(missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
h2o_cmd.columnInfoFromInspect(key, exceptionOnMissingValues=False,
max_column_display=99999999, timeoutSecs=timeoutSecs)
num_cols = len(colNameDict)
# now remove any whose names don't match the required keepPattern
if keepPattern is not None:
keepX = re.compile(keepPattern)
else:
keepX = None
x = range(num_cols)
# need to walk over a copy, cause we change x
xOrig = x[:]
ignore_x = [] # for use by RF
for k in xOrig:
name = colNameDict[k]
# remove it if it has the same name as the y output
if str(k)== y: # if they pass the col index as y
if not noPrint:
print "Removing %d because name: %s matches output %s" % (k, str(k), y)
x.remove(k)
# rf doesn't want it in ignore list
# ignore_x.append(k)
elif name == y: # if they pass the name as y
if not noPrint:
print "Removing %d because name: %s matches output %s" % (k, name, y)
x.remove(k)
# rf doesn't want it in ignore list
# ignore_x.append(k)
elif keepX is not None and not keepX.match(name):
if not noPrint:
print "Removing %d because name: %s doesn't match desired keepPattern %s" % (k, name, keepPattern)
x.remove(k)
ignore_x.append(k)
# missing values reports as constant also. so do missing first.
# remove all cols with missing values
# could change it against num_rows for a ratio
elif k in missingValuesDict:
value = missingValuesDict[k]
if not noPrint:
print "Removing %d with name: %s because it has %d missing values" % (k, name, value)
x.remove(k)
ignore_x.append(k)
elif k in constantValuesDict:
value = constantValuesDict[k]
if not noPrint:
print "Removing %d with name: %s because it has constant value: %s " % (k, name, str(value))
x.remove(k)
ignore_x.append(k)
# this is extra pruning..
# remove all cols with enums, if not already removed
elif k in enumSizeDict:
value = enumSizeDict[k]
if not noPrint:
print "Removing %d %s because it has enums of size: %d" % (k, name, value)
x.remove(k)
ignore_x.append(k)
if not noPrint:
print "x has", len(x), "cols"
print "ignore_x has", len(ignore_x), "cols"
x = ",".join(map(str,x))
ignore_x = ",".join(map(str,ignore_x))
if not noPrint:
print "\nx:", x
print "\nignore_x:", ignore_x
if forRF:
return ignore_x
else:
return x
def showGBMGridResults(GBMResult, expectedErrorMax, classification=True):
# print "GBMResult:", dump_json(GBMResult)
jobs = GBMResult['jobs']
print "GBM jobs:", jobs
for jobnum, j in enumerate(jobs):
_distribution = j['_distribution']
model_key = j['destination_key']
job_key = j['job_key']
# inspect = h2o_cmd.runInspect(key=model_key)
# print "jobnum:", jobnum, dump_json(inspect)
gbmTrainView = h2o_cmd.runGBMView(model_key=model_key)
print "jobnum:", jobnum, dump_json(gbmTrainView)
if classification:
cms = gbmTrainView['gbm_model']['cms']
cm = cms[-1]['_arr'] # take the last one
print "GBM cms[-1]['_predErr']:", cms[-1]['_predErr']
print "GBM cms[-1]['_classErr']:", cms[-1]['_classErr']
pctWrongTrain = pp_cm_summary(cm);
if pctWrongTrain > expectedErrorMax:
raise Exception("Should have < %s error here. pctWrongTrain: %s" % (expectedErrorMax, pctWrongTrain))
errsLast = gbmTrainView['gbm_model']['errs'][-1]
print "\nTrain", jobnum, job_key, "\n==========\n", "pctWrongTrain:", pctWrongTrain, "errsLast:", errsLast
print "GBM 'errsLast'", errsLast
print pp_cm(cm)
else:
print "\nTrain", jobnum, job_key, "\n==========\n", "errsLast:", errsLast
print "GBMTrainView errs:", gbmTrainView['gbm_model']['errs']
def simpleCheckGBMView(node=None, gbmv=None, noPrint=False, **kwargs):
if not node:
node = h2o_nodes.nodes[0]
if 'warnings' in gbmv:
warnings = gbmv['warnings']
# catch the 'Failed to converge" for now
for w in warnings:
if not noPrint: print "\nwarning:", w
if ('Failed' in w) or ('failed' in w):
raise Exception(w)
if 'cm' in gbmv:
cm = gbmv['cm'] # only one
else:
if 'gbm_model' in gbmv:
gbm_model = gbmv['gbm_model']
else:
raise Exception("no gbm_model in gbmv? %s" % dump_json(gbmv))
cms = gbm_model['cms']
print "number of cms:", len(cms)
print "FIX! need to add reporting of h2o's _perr per class error"
# FIX! what if regression. is rf only classification?
print "cms[-1]['_arr']:", cms[-1]['_arr']
print "cms[-1]['_predErr']:", cms[-1]['_predErr']
print "cms[-1]['_classErr']:", cms[-1]['_classErr']
## print "cms[-1]:", dump_json(cms[-1])
## for i,c in enumerate(cms):
## print "cm %s: %s" % (i, c['_arr'])
cm = cms[-1]['_arr'] # take the last one
scoresList = cm
used_trees = gbm_model['N']
errs = gbm_model['errs']
print "errs[0]:", errs[0]
print "errs[-1]:", errs[-1]
print "errs:", errs
# if we got the ntree for comparison. Not always there in kwargs though!
param_ntrees = kwargs.get('ntrees',None)
if (param_ntrees is not None and used_trees != param_ntrees):
raise Exception("used_trees should == param_ntree. used_trees: %s" % used_trees)
if (used_trees+1)!=len(cms) or (used_trees+1)!=len(errs):
raise Exception("len(cms): %s and len(errs): %s should be one more than N %s trees" % (len(cms), len(errs), used_trees))
totalScores = 0
totalRight = 0
# individual scores can be all 0 if nothing for that output class
# due to sampling
classErrorPctList = []
predictedClassDict = {} # may be missing some? so need a dict?
for classIndex,s in enumerate(scoresList):
classSum = sum(s)
if classSum == 0 :
# why would the number of scores for a class be 0? does GBM CM have entries for non-existent classes
# in a range??..in any case, tolerate. (it shows up in test.py on poker100)
if not noPrint: print "class:", classIndex, "classSum", classSum, "<- why 0?"
else:
# H2O should really give me this since it's in the browser, but it doesn't
classRightPct = ((s[classIndex] + 0.0)/classSum) * 100
totalRight += s[classIndex]
classErrorPct = round(100 - classRightPct, 2)
classErrorPctList.append(classErrorPct)
### print "s:", s, "classIndex:", classIndex
if not noPrint: print "class:", classIndex, "classSum", classSum, "classErrorPct:", "%4.2f" % classErrorPct
# gather info for prediction summary
for pIndex,p in enumerate(s):
if pIndex not in predictedClassDict:
predictedClassDict[pIndex] = p
else:
predictedClassDict[pIndex] += p
totalScores += classSum
#****************************
if not noPrint:
print "Predicted summary:"
# FIX! Not sure why we weren't working with a list..hack with dict for now
for predictedClass,p in predictedClassDict.items():
print str(predictedClass)+":", p
# this should equal the num rows in the dataset if full scoring? (minus any NAs)
print "totalScores:", totalScores
print "totalRight:", totalRight
if totalScores != 0:
pctRight = 100.0 * totalRight/totalScores
else:
pctRight = 0.0
pctWrong = 100 - pctRight
print "pctRight:", "%5.2f" % pctRight
print "pctWrong:", "%5.2f" % pctWrong
#****************************
# more testing for GBMView
# it's legal to get 0's for oobe error # if sample_rate = 1
sample_rate = kwargs.get('sample_rate', None)
validation = kwargs.get('validation', None)
if (sample_rate==1 and not validation):
pass
elif (totalScores<=0 or totalScores>5e9):
raise Exception("scores in GBMView seems wrong. scores:", scoresList)
varimp = gbm_model['varimp']
treeStats = gbm_model['treeStats']
if not treeStats:
raise Exception("treeStats not right?: %s" % dump_json(treeStats))
# print "json:", dump_json(gbmv)
data_key = gbm_model['_dataKey']
model_key = gbm_model['_key']
classification_error = pctWrong
if not noPrint:
if 'minLeaves' not in treeStats or not treeStats['minLeaves']:
raise Exception("treeStats seems to be missing minLeaves %s" % dump_json(treeStats))
print """
Leaves: {0} / {1} / {2}
Depth: {3} / {4} / {5}
Err: {6:0.2f} %
""".format(
treeStats['minLeaves'],
treeStats['meanLeaves'],
treeStats['maxLeaves'],
treeStats['minDepth'],
treeStats['meanDepth'],
treeStats['maxDepth'],
classification_error,
)
### modelInspect = node.inspect(model_key)
dataInspect = h2o_cmd.runInspect(key=data_key)
check_sandbox_for_errors()
return (round(classification_error,2), classErrorPctList, totalScores)
| apache-2.0 |
caxenie/nstbot | nstbot/omniarm.py | 1 | 24322 | from . import nstbot
import numpy as np
import threading
import time
class OmniArmBot(nstbot.NSTBot):
def initialize(self):
super(OmniArmBot, self).initialize()
self.retina_packet_size = {}
self.image = {}
self.count_spike_regions = {}
self.track_periods = {}
self.last_timestamp = {}
self.p_x = {}
self.p_y = {}
self.track_certainty = {}
self.count_regions = {}
self.count_regions_scale = {}
self.track_certainty_scale = {}
self.track_sigma_t = {}
self.track_sigma_p = {}
self.track_eta = {}
self.last_off = {}
self.track_certainty = {}
self.good_events = {}
self.conn_thread = {}
self.retina_thread = {}
self.trk_px = {}
self.trk_py = {}
self.trk_radius = {}
self.trk_certainty = {}
for name in self.adress_list:
if "retina" in name:
self.retina_packet_size[name] = None
self.image[name] = None
self.count_spike_regions[name] = None
self.track_periods[name] = None
self.p_x[name] = None
self.p_y[name] = None
self.track_certainty[name] = None
self.last_timestamp[name] = None
# initialize variables for embedded tracker
self.trk_px[name] = np.zeros_like(np.array(range(8)))
self.trk_py[name] = np.zeros_like(np.array(range(8)))
self.trk_radius[name] = np.zeros_like(np.array(range(8)))
self.trk_certainty[name] = np.zeros_like(np.array(range(8)))
self.sensor = {}
self.sensor_scale = {}
self.sensor_map = {}
self.add_sensor('bump', bit=0, range=1, length=1)
# we have 8 values but we take only the first 3 vals (base motors)
self.add_sensor('wheel', bit=1, range=100, length=3)
# the hex encoded values need conversion
# we have 4 values but we take only the first 3 vals
# gyro is measured in deg/s and Q16 encoded
self.add_sensor('gyro', bit=2, range=2**32-1, length=3)
# acc is measured in g and Q16 encoded
self.add_sensor('accel', bit=3, range=2**32-1, length=3)
# we have 4 values but we take only the first 3 vals
# euler is measured in deg and Q16 encoded
# roll [-90,90], pitch [-180,180], yaw [-180,180]
self.add_sensor('euler', bit=4, range=2**32-1, length=3)
# compass is measured in uT (micro tesla) and Q16 encoded
self.add_sensor('compass', bit=5, range=2**32-1, length=3)
# we have 8 values but we only take the last 5 (arm motors)
self.add_sensor('servo', bit=7, range=4096, length=5)
# we have 8 values but we only take the last 5 (arm motors)
self.add_sensor('load', bit=9, range=4096, length=5)
self.sensor_bitmap = {"bump": [19, slice(0, 1)],
"wheel": [17,slice(0, 3)],
"gyro": [4,slice(0, 3)],
"accel": [5, slice(0,3)],
"euler": [9, slice(0,3)],
"compass": [6, slice(0, 3)],
"servo": [16, slice(3, 8)],
"load": [14, slice(3, 8)]}
self.base([0.0, 0.0, 0.0])
#self.arm([np.pi, np.pi, np.pi, 1])
def base(self, spd, msg_period=None):
val_range = 100
x = int(spd[0] * val_range)
y = int(spd[1] * val_range)
z = int(spd[2] * val_range)
if x > val_range: x = val_range
if x < -val_range: x = -val_range
if y > val_range: y = val_range
if y < -val_range: y = -val_range
if z > val_range: z = val_range
if z < -val_range: z = -val_range
cmd = '!P0%d\n!P1%d\n!P2%d\n' % (x, y, z)
self.send('motors', 'base', cmd, msg_period=msg_period)
def base_pos(self, pos2d, msg_period=None):
val_range = 100
x = int(pos2d[0] * val_range)
y = int(pos2d[1] * val_range)
rot = int(pos2d[2] * val_range)
if x > val_range: x = val_range
if x < -val_range: x = -val_range
if y > val_range: y = val_range
if y < -val_range: y = -val_range
if rot > val_range: rot = val_range
if rot < -val_range: rot = -val_range
cmd = '!DD%d,%d,%d\n' % (x, y, rot)
self.send('motors', 'base_pos', cmd, msg_period=msg_period)
def arm(self, joints, msg_period=None):
# motion should be limited by the dynamics of the robot
# convert to numpy for special ops
jnt = np.array(joints)
# apply rad to position map
pos = (jnt[:3]*10000).astype(dtype=np.int)
# min pos and max pos in the range
min_val = 0
max_val = 62830
# apply range
pos[pos < min_val] = min_val
pos[pos > max_val] = max_val
# separate each link
pos = np.append(pos, jnt[3])
[shoulder, elbow, hand, gripper] = pos
# indices for motor IDs
cmd = '!r%d,%d,%d\n' % (shoulder, elbow, hand)
#cmd = '!G3%d\n!G4%d\n!G5%d\n' % (shoulder/10, elbow/10, hand/10)
grip = '!f%d\n' % int(gripper*100)
cmd += grip
self.send('motors', 'arm', cmd, msg_period=msg_period)
def set_arm_speed(self, x, msg_period=None):
if x > 0:
self.send('motors', 'arm', '!P3%d\n!P4%d\n!P5%d\n!P750\n' % (x, x, x), msg_period=msg_period)
else:
self.send('motors', 'arm', '!P35\n!P45\n!P55\n', msg_period=msg_period)
def add_sensor(self, name, bit, range, length):
value = np.zeros(length)
self.sensor[bit] = value
self.sensor[name] = value
self.sensor_map[bit] = name
self.sensor_map[name] = bit
self.sensor_scale[bit] = 1.0 / range
def activate_sensors(self, period=0.1, **names):
bits = 0
for name, b_activate in names.items():
bit = self.sensor_map[name]
if b_activate:
bits += 1 << bit
cmd = '!I1,%d,%d\n' % (int(1.0/period), bits)
self.connection.send('motors', cmd)
def send(self, name, key, message, msg_period=None):
now = time.time()
if msg_period is None or now > self.last_time.get(key, 0) + msg_period:
#print 'msg', name, message
self.connection.send(name, message)
self.last_time[key] = now
def get_sensor(self, name):
return self.sensor[name]
def connect(self, connection):
self.adress_list = connection.get_socket_keys()
super(OmniArmBot, self).connect(connection)
for name in self.adress_list:
self.connection.send(name,'R\n') # reset platform
time.sleep(1)
if "retina" not in name:
self.connection.send(name,'!E0\n') # disable command echo (save some bandwidth)
time.sleep(1)
# FIXME for the embedded tracker to work and not have 2 streams simultaneously
# else:
# self.connection.send(name, 'E+\n')
# time.sleep(1)
self.set_arm_speed(10)
time.sleep(0.5)
self.conn_thread[name] = threading.Thread(target=self.sensor_loop, args=(name,))
#self.conn_thread[name] = multiprocessing.Process(target=self.sensor_loop, args=(name,))
self.conn_thread[name].daemon = True
self.conn_thread[name].start()
def disconnect(self):
for name in self.adress_list:
if 'retina' not in name:
self.connection.send(name, '!I0\n')
#self.connection.send(name, 'R\n')
else:
self.retina(name, False)
self.tracker(name, False, [], 10000)
# for process in multiprocessing.active_children():
# if process.is_alive():
# process.join()
# process.terminate()
#self.base([0, 0, 0])
#self.arm([np.pi, np.pi, np.pi, 1])
super(OmniArmBot, self).disconnect()
def retina(self, name, active, bytes_in_timestamp=4):
if active:
assert bytes_in_timestamp in [0, 2, 3, 4]
# FIXME for the embedded tracker to work and not have 2 streams simultaneously
#cmd = '!E%d\nE+\n' % bytes_in_timestamp
cmd = '!E%d\n' % bytes_in_timestamp
self.retina_packet_size[name] = 2 + bytes_in_timestamp
else:
cmd = 'E-\n'
self.retina_packet_size[name] = None
self.connection.send(name, cmd)
def tracker(self, name, active, tracking_freqs, streaming_period):
# calculate tracking period from frequency
tracking_periods = np.array([int(np.ceil((1.0 / freq) * 1000 * 1000)) for freq in tracking_freqs])
if active:
# initalize all channels to zero
for channel in range(8):
cmd = '!TD%d=0\n!TR=0\n' % channel
self.connection.send(name, cmd)
# make sure we disconnect the event stream
self.connection.send(name, 'E-\n')
for channel, tracking_period in enumerate(tracking_periods):
if active:
# now set all channels, which are specified for tracking
cmd = '!TD%d=%d\n!TR=%d\n' % (channel, tracking_period, streaming_period)
else:
cmd = '!TD%d=0\n!TR=0\n' % channel
self.connection.send(name, cmd)
def show_image(self, name, decay=0.5, display_mode='quick'):
if self.image[name] is None:
self.image[name] = np.zeros((128, 128), dtype=float)
self.retina_thread[name] = threading.Thread(target=self.image_loop,
args=(name, decay, display_mode))
# self.retina_thread[name] = multiprocessing.Process(target=self.image_loop,
# args=(name, decay, display_mode))
self.retina_thread[name].daemon = True
self.retina_thread[name].start()
def get_image(self, name):
return self.image[name]
def keep_image(self, name):
if self.image[name] is None:
self.image[name] = np.zeros((128, 128), dtype=float)
def image_loop(self, name, decay, display_mode):
import pylab
import matplotlib.pyplot as plt
# using axis for updating only parts of the image that change
fig, ax = plt.subplots()
# so quick mode can run on ubuntu
plt.show(block=False)
pylab.ion()
img = pylab.imshow(self.image[name], vmax=1, vmin=-1,
interpolation='none', cmap='binary')
pylab.xlim(0, 127)
pylab.ylim(127, 0)
regions = {}
if self.count_spike_regions[name] is not None:
for k, v in self.count_spike_regions[name].items():
minx, miny, maxx, maxy = v
rect = pylab.Rectangle((minx - 0.5, miny - 0.5),
maxx - minx,
maxy - miny,
facecolor='yellow', alpha=0.2)
pylab.gca().add_patch(rect)
regions[k] = rect
if self.track_periods[name] is not None:
colors = ([(0,0,1), (0,1,0), (1,0,0), (1,1,0), (1,0,1)] * 10)[:len(self.p_y)]
scatter = pylab.scatter(self.p_x[name], self.p_y[name], s=50, c=colors)
else:
scatter = None
while True:
img.set_data(self.image[name])
for k, rect in regions.items():
alpha = self.get_spike_rate(k) * 0.5
alpha = min(alpha, 0.5)
rect.set_alpha(0.05 + alpha)
if scatter is not None:
scatter.set_offsets(np.array([self.p_x[name], self.p_y[name]]).T)
c = [(r,g,b,min(self.track_certainty[name][i],1)) for i,(r,g,b) in enumerate(colors)]
scatter.set_color(c)
if display_mode == 'quick':
# this is faster, but doesn't work on all systems
fig.canvas.draw()
fig.canvas.flush_events()
elif display_mode == 'ubuntu_quick':
# this is even faster, but doesn't work on all systems
ax.draw_artist(ax.patch)
ax.draw_artist(img)
ax.draw_artist(scatter)
fig.canvas.update()
fig.canvas.flush_events()
else:
# this works on all systems, but is kinda slow
pylab.pause(1e-8)
self.image[name] *= decay
def sensor_loop(self, name):
"""Handle all data coming from the robot."""
old_data = None
buffered_ascii = ''
while True:
if "retina" in name:
packet_size = self.retina_packet_size[name]
else:
packet_size = None
# grab the new data
data = self.connection.receive(name)
# combine it with any leftover data from last time through the loop
if old_data is not None:
data = old_data + data
old_data = None
if packet_size is None:
# no retina events, so everything should be ascii
buffered_ascii += data
else:
# find the ascii events
data_all = np.fromstring(data, np.uint8)
ascii_index = np.where(data_all[::packet_size] < 0x80)[0]
offset = 0
while len(ascii_index) > 0:
# if there's an ascii event, remove it from the data
index = ascii_index[0] * packet_size
stop_index = np.where(data_all[index:] >= 0x80)[0]
if len(stop_index) > 0:
stop_index = index + stop_index[0]
else:
stop_index = len(data)
# and add it to the buffered_ascii list
buffered_ascii += data[offset + index:offset + stop_index]
data_all = np.hstack((data_all[:index],
data_all[stop_index:]))
offset += stop_index - index
ascii_index = np.where(data_all[::packet_size] < 0x80)[0]
# handle any partial retina packets
extra = len(data_all) % packet_size
if extra != 0:
old_data = data[-extra:]
data_all = data_all[:-extra]
if len(data_all) > 0:
# now process those retina events
if "retina" in name:
self.process_retina(name, data_all)
if "retina" not in name:
# and process the ascii events too from the base and arm sensors
while '\n\n' in buffered_ascii:
cmd, buffered_ascii = buffered_ascii.split('\n\n', 1)
if '-I' in cmd:
dbg, proc_cmd = cmd.split('-I', 1)
self.process_ascii(name, '-I' + proc_cmd)
else:
# process ascii events from embedded tracker
while '\n' in buffered_ascii:
cmd, buffered_ascii = buffered_ascii.split('\n', 1)
# make sure we discard the echo
if '-T' in cmd and 'R' not in cmd and '(' not in cmd:
dbg, proc_cmd = cmd.split('-T', 1)
self.process_ascii(name, '-T' + proc_cmd)
def process_ascii(self, name, message):
try:
# handle sensory data
if message[:2] == '-I':
data = message[2:].split()
sp_data = data[1:]
hdr_idx = [ind for ind, s in enumerate(sp_data) if '-S' in s]
for ind, el in enumerate(hdr_idx):
if ind < len(hdr_idx) - 1:
vals = sp_data[hdr_idx[ind] + 1:hdr_idx[ind + 1]]
else:
vals = sp_data[hdr_idx[ind] + 1:]
src = int(sp_data[hdr_idx[ind]][2:])
for name, value in self.sensor_bitmap.iteritems():
if src == value[0]:
sliced = value[1]
index = self.sensor_map[name]
scale = self.sensor_scale[index]
# FIXME Check the correct ranges and conversions
if scale < 1./6000 or name is not 'bump':
sensors = [float.fromhex(x)*scale for x in vals[sliced]]
else:
sensors = [float(x)*scale for x in vals[sliced]]
self.sensor[index] = sensors
self.sensor[self.sensor_map[index]] = sensors
# handle uDVS tracker data
elif message[:2] == '-T':
trk_data = message[2:]
# make sure, that the message is not the DVS confirmation msg:
# TODO: do we need to track more than one frequency per retina?
# if yes, how do we get the information about the current frequency from the incoming message?
# in this case we need to make adjustments accodringly here, in the get_tracker function and in the firmware
# Update: up to 8 tracked frequencies are possible for each retina (acording changes need test)
if len(trk_data) > 5:
trk_id = trk_data[0] # uDVS tracker id
trk_xpos = trk_data[1:5] # xpos 4byte HEX
trk_ypos = trk_data[5:9] # ypos 4byte HEX
trk_rad = trk_data[9:11] # tracking radius 2byte HEX
trk_cert = trk_data[11:13] # tracking certainty 2byte HEX
self.trk_px[name][trk_id] = float.fromhex(trk_xpos)
self.trk_py[name][trk_id] = float.fromhex(trk_ypos)
self.trk_radius[name][trk_id] = float.fromhex(trk_rad)
self.trk_certainty[name][trk_id] = float.fromhex(trk_cert)
else:
print('unknown message: %s\n' % message)
except:
print('Error processing "%s"' % message)
import traceback
traceback.print_exc()
last_timestamp = None
def process_retina(self, name, data):
packet_size = self.retina_packet_size[name]
y = data[::packet_size] & 0x7f
x = data[1::packet_size] & 0x7f
if self.image[name] is not None:
value = np.where(data[1::packet_size]>=0x80, 1, -1)
self.image[name][y, x] += value
if self.count_spike_regions[name] is not None:
tau = 0.05 * 1000000
for k, region in self.count_spike_regions[name].items():
minx, miny, maxx, maxy = region
index = (minx <= x) & (x<maxx) & (miny <= y) & (y<maxy)
count = np.sum(index)
t = (int(data[-2]) << 8) + data[-1]
if packet_size >= 5:
t += int(data[-3]) << 16
if packet_size >= 6:
t += int(data[-4]) << 24
old_count, old_time = self.count_regions[name][k]
dt = float(t - old_time)
if dt < 0:
dt += 1 << ((packet_size - 2) * 8)
count *= self.count_regions_scale[name][k]
count /= dt / 1000.0
decay = np.exp(-dt/tau)
new_count = old_count * (decay) + count * (1-decay)
self.count_regions[name][k] = new_count, t
if self.track_periods[name] is not None:
t = data[2::packet_size].astype(np.uint32)
t = (t << 8) + data[3::packet_size]
if packet_size >= 5:
t = (t << 8) + data[4::packet_size]
if packet_size >=6:
t = (t << 8) + data[5::packet_size]
if self.last_timestamp[name] is not None:
dt = float(t[-1]) - self.last_timestamp[name]
if dt < 0:
dt += 1 << (8 * (packet_size-2))
else:
dt = 1
self.last_timestamp[name] = t[-1]
index_off = (data[1::packet_size] & 0x80) == 0
delta = np.where(index_off, t - self.last_off[name][x, y], 0)
self.last_off[name][x[index_off],
y[index_off]] = t[index_off]
tau = 0.05 * 1000000
decay = np.exp(-dt/tau)
self.track_certainty[name] *= decay
for i, period in enumerate(self.track_periods[name]):
eta = self.track_eta[name]
t_exp = period * 2
sigma_t = self.track_sigma_t[name] # in microseconds
sigma_p = self.track_sigma_p[name] # in pixels
t_diff = delta.astype(np.float) - t_exp
w_t = np.exp(-(t_diff**2)/(2*sigma_t**2))
px = self.p_x[name][i]
py = self.p_y[name][i]
dist2 = (x - px)**2 + (y - py)**2
w_p = np.exp(-dist2/(2*sigma_p**2))
ww = w_t * w_p
c = sum(ww) * self.track_certainty_scale[name] / dt
self.track_certainty[name][i] += (1-decay) * c
w = eta * ww
for j in np.where(w > eta * 0.1)[0]:
px += w[j] * (x[j] - px)
py += w[j] * (y[j] - py)
self.p_x[name][i] = px
self.p_y[name][i] = py
def track_spike_rate(self,name, **regions):
self.count_spike_regions[name] = regions
self.count_regions[name] = {}
self.count_regions_scale[name] = {}
for k,v in regions.items():
self.count_regions[name][k] = [0, 0]
area = (v[2] - v[0]) * (v[3] - v[1])
self.count_regions_scale[name][k] = 200.0 / area
def get_spike_rate(self, name, region):
return self.count_regions[name][region][0]
def track_frequencies(self, name, freqs, sigma_t=100, sigma_p=30, eta=0.3,
certainty_scale=10000):
freqs = np.array(freqs, dtype=float)
track_periods = 500000 / freqs
self.track_certainty_scale[name] = certainty_scale
self.track_sigma_t[name] = sigma_t
self.track_sigma_p[name] = sigma_p
self.track_eta[name] = eta
self.last_off[name] = np.zeros((128, 128), dtype=np.uint32)
self.p_x[name] = np.zeros_like(track_periods) + 64.0
self.p_y[name] = np.zeros_like(track_periods) + 64.0
self.track_certainty[name] = np.zeros_like(track_periods)
self.good_events[name] = np.zeros_like(track_periods, dtype=int)
self.track_periods[name] = track_periods
def get_frequency_info(self, name, index):
x = self.p_x[name][index] / 64.0 - 1
y = - self.p_y[name][index] / 64.0 + 1
return x, y, self.track_certainty[name][index]
def get_tracker_info(self, name, index):
x = self.trk_px[name][index]/ 1024.0 - 1
y = - self.trk_py[name][index]/ 1024.0 + 1
c = self.trk_certainty[name][index] / 255.0
return x, y, self.trk_radius[name][index], c
| gpl-2.0 |
IshankGulati/scikit-learn | sklearn/grid_search.py | 16 | 40213 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.20.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterGrid` instead.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterSampler` instead.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.fit_grid_point` instead.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def classes_(self):
return self.best_estimator_.classes_
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GridSearchCV` instead.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape='ovr', degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.RandomizedSearchCV` instead.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
niamoto/niamoto-core | tests/data_marts/test_base_fact_table.py | 2 | 4217 | # coding: utf-8
import unittest
from sqlalchemy.engine.reflection import Inspector
import sqlalchemy as sa
import pandas as pd
from niamoto.testing import set_test_path
set_test_path()
from niamoto.conf import settings
from niamoto.testing.test_database_manager import TestDatabaseManager
from niamoto.testing.base_tests import BaseTestNiamotoSchemaCreated
from niamoto.testing.test_data_marts import TestDimension, \
TestFactTablePublisher
from niamoto.data_marts.fact_tables.base_fact_table import BaseFactTable
from niamoto.db.connector import Connector
from niamoto.db import metadata as meta
class TestBaseFactTable(BaseTestNiamotoSchemaCreated):
"""
Test case for BaseFactTable class.
"""
def setUp(self):
super(TestBaseFactTable, self).setUp()
self.tearDown()
def tearDown(self):
with Connector.get_connection() as connection:
inspector = Inspector.from_engine(connection)
tables = inspector.get_table_names(
schema=settings.NIAMOTO_FACT_TABLES_SCHEMA
)
for tb in tables:
connection.execute("DROP TABLE {};".format(
"{}.{}".format(settings.NIAMOTO_FACT_TABLES_SCHEMA, tb)
))
connection.execute(meta.fact_table_registry.delete())
with Connector.get_connection() as connection:
inspector = Inspector.from_engine(connection)
tables = inspector.get_table_names(
schema=settings.NIAMOTO_DIMENSIONS_SCHEMA
)
for tb in tables:
connection.execute("DROP TABLE {};".format(
"{}.{}".format(settings.NIAMOTO_DIMENSIONS_SCHEMA, tb)
))
connection.execute(meta.dimension_registry.delete())
def test_base_fact_table(self):
dim_1 = TestDimension("dim_1")
dim_2 = TestDimension("dim_2")
ft = BaseFactTable(
"test_fact",
dimensions=[dim_1, dim_2],
measure_columns=[
sa.Column('measure_1', sa.Float),
],
publisher_cls=TestFactTablePublisher
)
dim_1.create_dimension()
dim_2.create_dimension()
dim_1.populate_from_publisher()
dim_2.populate_from_publisher()
self.assertFalse(ft.is_created())
ft.create_fact_table()
self.assertTrue(ft.is_created())
with Connector.get_connection() as connection:
inspector = Inspector.from_engine(connection)
tables = inspector.get_table_names(
schema=settings.NIAMOTO_FACT_TABLES_SCHEMA
)
self.assertIn("test_fact", tables)
ft.populate_from_publisher()
vals = ft.get_values()
self.assertGreater(len(vals), 0)
ft.truncate()
vals_bis = ft.get_values()
self.assertEqual(len(vals_bis), 0)
def test_load(self):
dim_1 = TestDimension("dim_1")
dim_2 = TestDimension("dim_2")
ft = BaseFactTable(
"test_fact",
dimensions=[dim_1, dim_2],
measure_columns=[
sa.Column('measure_1', sa.Float),
],
publisher_cls=TestFactTablePublisher
)
dim_1.create_dimension()
dim_2.create_dimension()
ft.create_fact_table()
ft_bis = BaseFactTable.load('test_fact')
self.assertEqual(
[dim.name for dim in ft_bis.dimensions],
['dim_1', 'dim_2'],
)
self.assertEqual(
[measure.name for measure in ft_bis.measurement_columns],
['measure_1', ]
)
self.assertEqual(ft_bis.name, ft.name)
if __name__ == '__main__':
TestDatabaseManager.setup_test_database()
TestDatabaseManager.create_schema(settings.NIAMOTO_SCHEMA)
TestDatabaseManager.create_schema(settings.NIAMOTO_RASTER_SCHEMA)
TestDatabaseManager.create_schema(settings.NIAMOTO_VECTOR_SCHEMA)
TestDatabaseManager.create_schema(settings.NIAMOTO_DIMENSIONS_SCHEMA)
TestDatabaseManager.create_schema(settings.NIAMOTO_FACT_TABLES_SCHEMA)
unittest.main(exit=False)
TestDatabaseManager.teardown_test_database()
| gpl-3.0 |
gwpy/seismon | RfPrediction/old/GPR_Rfamp_prediction.py | 2 | 3742 | # Python GPR code for Rf Amplitude Prediction from Earthquake Parameters
# Nikhil Mukund Menon ([email protected])
# 16th July 2017
from __future__ import division
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#%matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF,WhiteKernel, RationalQuadratic, ExpSineSquared, Matern, ConstantKernel as C
filename = '/home/mcoughlin/Seismon/Predictions/L1O1O2_CMT/earthquakes.txt'
'''
1: earthquake gps time
2: earthquake mag
3: p gps time
4: s gps time
5: r (2 km/s)
6: r (3.5 km/s)
7: r (5 km/s)
8: predicted ground motion (m/s)
9: lower bounding time
10: upper bounding time
11: latitude
12: longitude
13: distance
14: depth (m)
15: azimuth (deg)
16: nodalPlane1_strike
17: nodalPlane1_rake
18: nodalPlane1_dip
19: momentTensor_Mrt
20: momentTensor_Mtp
21: momentTensor_Mrp
22: momentTensor_Mtt
23: momentTensor_Mrr
24: momentTensor_Mpp
25: peak ground velocity gps time
26: peak ground velocity (m/s)
27: peak ground acceleration gps time
28: peak ground acceleration (m/s^2)
29: peak ground displacement gps time
30: peak ground displacement (m)
31: Lockloss time
32: Detector Status
'''
data = pd.read_csv(filename,delimiter=' ',header=None)
Rf_Amp_thresh = 1e-7;
index = data[25] > Rf_Amp_thresh
data = data[:][index]
X = np.asarray(data[[1,10,7,11,12,13,14,15,16,17,18,19,20,21,22,23]])
Y = np.asarray(data[[25]])[:,np.newaxis]
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)
##############################################################################
# Instanciate a Gaussian Process model
# Choose Kernel [Tricky]
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
kernel = Matern(length_scale=0.2, nu=0.5) + WhiteKernel(noise_level=0.1) + C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
##############################################################################
gp = GaussianProcessRegressor(alpha=1e-3, copy_X_train=True,
kernel=kernel,
n_restarts_optimizer=10, normalize_y=False,
optimizer='fmin_l_bfgs_b', random_state=None)
'''
OKish Parameter Values
gp = GaussianProcessRegressor(alpha=1e-7, copy_X_train=True,
kernel=1**2 + Matern(length_scale=0.2, nu=0.5) + WhiteKernel(noise_level=0.1),
n_restarts_optimizer=10, normalize_y=False,
optimizer='fmin_l_bfgs_b', random_state=None)
'''
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(x_train,y_train)
#x = np.linspace(min(X),max(X),len(X))[:,np.newaxis]
y_pred, sigma = gp.predict(x_test, return_std=True)
## Percentage within the specified factor
Fac = 5
IDX = y_pred/y_test >= 1
K = y_pred[IDX]
Q = y_test[IDX]
L = y_pred[~IDX]
M = y_test[~IDX]
Upper_indices = [i for i, x in enumerate(K <= Fac*Q) if x == True]
Lower_indices = [i for i, x in enumerate(L >= M/Fac) if x == True]
Percent_within_Fac = (len(Upper_indices) + len(Lower_indices))/len(y_pred)*100
print("Percentage captured within a factor of {} = {:.2f}".format(Fac,Percent_within_Fac))
# sort results in Ascending order
y_test_sort = np.sort(y_test,axis=0)
y_pred_sort = y_pred[np.argsort(y_test,axis=0)]
# Errorbar values
yerr_lower = y_test_sort - y_test_sort/Fac
yerr_upper = Fac*y_test_sort - y_test_sort
idx = np.arange(0,len(y_test_sort))
#plt.scatter(idx,y_test_sort,color='red',alpha=0.3)
plt.errorbar(idx,y_test_sort,yerr=[yerr_lower,yerr_upper], alpha=0.3 )
idx2 = np.arange(0,len(y_pred_sort))
plt.scatter(idx2,y_pred_sort,color='green',alpha=0.7)
plt.yscale('log')
plt.grid()
plt.ylim([1e-8, 1e-3])
plt.title("Percentage captured within a factor of {} = {:.2f}".format(Fac,Percent_within_Fac))
plt.savefig('GPR.png')
| gpl-3.0 |
Isaac-W/cpr-vision-measurement | dataloader.py | 1 | 6474 | import csv
import numpy as np
import matplotlib.pyplot as plt
import peakutils
from datetime import datetime
from datalogger import TIME_FORMAT
from markerutils import *
SMOOTH_WINDOW_SIZE = 3
def average(values):
return sum(values) / float(len(values))
def time_parse(value):
try:
return datetime.strptime(value, TIME_FORMAT)
except:
pass
return datetime.strptime(value, '%m-%d-%y_%H:%M:%S')
def int_parse(value):
return int(value) if value else None
def float_parse(value):
return float(value) if value else None
def negate_data(data):
return [-x for x in data]
def get_peaks(data):
peaks = peakutils.indexes(np.array(data), 0.3, 8)
return peaks.astype(int).tolist()
def plot_data(data, peaks, troughs):
x = [x for x in range(0, len(data))]
y = data
#plt.plot(x, y)
if peaks is not None:
plt.plot(peaks, [data[x] for x in peaks], 'rx')
if troughs is not None:
plt.plot(troughs, [data[x] for x in troughs], 'bx')
#plt.show()
typemap = {
'Index': int_parse,
'Time': time_parse,
'Origin': float_parse,
'Position': float_parse,
'Rate': float_parse,
'Depth': float_parse,
'Recoil': float_parse,
'Status': int_parse
}
class Compression(object):
def __init__(self, time, depth, recoil, rate):
self.time = time
self.depth = depth
self.recoil = recoil
self.rate = rate
def is_depth_correct(self):
return DEPTH_RANGE[0] <= self.depth <= DEPTH_RANGE[1]
def is_recoil_correct(self):
return self.recoil <= RECOIL_THRESH
def is_rate_correct(self):
return RATE_RANGE[0] <= self.rate <= RATE_RANGE[1]
def is_correct(self):
return self.is_depth_correct() and self.is_recoil_correct() and self.is_rate_correct()
def is_depth_recoil_correct(self):
return self.is_depth_correct() and self.is_recoil_correct()
def is_depth_rate_correct(self):
return self.is_depth_correct() and self.is_rate_correct()
def is_recoil_rate_correct(self):
return self.is_recoil_correct() and self.is_rate_correct()
class DataLoader(object):
def __init__(self, filename, start=None, end=None):
self.data = []
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
datarow = {}
for key, value in row.iteritems():
datarow[key] = typemap[key](value)
self.data.append(datarow)
# Slice data
self.data = self.data[start:end]
def get_duration(self):
return (abs(self.data[-1]['Time'] - self.data[0]['Time'])).total_seconds()
def get_values(self, key):
return [self.data[x][key] for x in range(len(self.data))]
def get_values_at(self, points, key):
return [self.data[int(x)][key] for x in points]
def get_raw_data(self):
return self.get_values('Position')
def get_smoothed_data(self):
data = self.get_raw_data()
# Smooth the data
smooth_filter = np.array([1 / float(SMOOTH_WINDOW_SIZE) for x in range(SMOOTH_WINDOW_SIZE)])
smoothed = np.convolve(np.array(data), smooth_filter, mode='valid')
smoothed = np.concatenate((np.zeros(int(SMOOTH_WINDOW_SIZE / 2)), smoothed)) # Add offset caused by convolution
return smoothed
def get_raw_data_at(self, points):
data = self.get_raw_data()
return [data[x] for x in points]
def get_data_at(self, points):
data = self.get_smoothed_data()
return [data[x] for x in points]
def get_raw_peaks(self):
return get_peaks(self.get_raw_data())
def get_peaks(self):
return get_peaks(self.get_smoothed_data())
def get_raw_troughs(self):
return get_peaks(negate_data(self.get_raw_data()))
def get_troughs(self):
data = self.get_smoothed_data()
out_peaks = []
peaks = get_peaks(negate_data(data))
for peak in peaks:
if data[peak] <= -0.45:
out_peaks.append(peak)
return out_peaks
def get_periods(self, points):
periods = []
for i in range(1, len(points)):
period = (abs(self.data[points[i]]['Time'] - self.data[points[i - 1]]['Time'])).total_seconds()
if period == 0:
continue
periods.append(period)
return periods
def get_rates(self):
periods = self.get_periods(self.get_troughs())
rates = [SEC_PER_MIN / x for x in periods]
return rates
def get_average_rate(self):
return average(self.get_rates())
def get_depths(self):
troughs = self.get_troughs()
depths = self.get_data_at(troughs)
return depths
def get_average_depth(self):
return average(self.get_depths())
def get_recoils(self):
peaks = self.get_peaks()
recoils = self.get_data_at(peaks)
return recoils
def get_average_recoil(self):
return average(self.get_recoils())
def get_compressions(self):
# Get the compressions (using depth)
data = self.get_smoothed_data()
troughs = self.get_troughs()
compressions = []
for i in range(0, len(troughs) - 1):
trough = troughs[i]
next_trough = troughs[i + 1]
# Get depth
depth = -data[trough]
# Get recoil (between compressions)
subset = data[trough:next_trough]
recoil = -max(subset)
# Get rate
time = (abs(self.data[trough]['Time'] - self.data[next_trough]['Time'])).total_seconds()
if time == 0:
continue
rate = SEC_PER_MIN / time
compressions.append(Compression(self.data[trough]['Time'], depth, recoil, rate))
return compressions
def plot_raw(self, show_peaks=True, show_troughs=True):
data = self.get_raw_data()
peaks = self.get_raw_peaks() if show_peaks else None
troughs = self.get_raw_troughs() if show_troughs else None
plot_data(data, peaks, troughs)
def plot(self, show_peaks=True, show_troughs=True):
data = self.get_smoothed_data()
peaks = self.get_peaks() if show_peaks else None
troughs = self.get_troughs() if show_troughs else None
plot_data(data, peaks, troughs)
| mit |
lukas/ml-class | examples/keras-audio/audio_utilities.py | 2 | 13234 | import scipy.io.wavfile
from os.path import expanduser
import os
import array
from pylab import *
import scipy.signal
import scipy
import wave
import numpy as np
import time
import sys
import math
import matplotlib
import subprocess
# Author: Brian K. Vogel
# [email protected]
fft_size = 2048
iterations = 300
hopsamp = fft_size // 8
def ensure_audio():
if not os.path.exists("audio"):
print("Downloading audio dataset...")
subprocess.check_output(
"curl -SL https://storage.googleapis.com/wandb/audio.tar.gz | tar xz", shell=True)
def griffin_lim(stft, scale):
# Undo the rescaling.
stft_modified_scaled = stft / scale
stft_modified_scaled = stft_modified_scaled**0.5
# Use the Griffin&Lim algorithm to reconstruct an audio signal from the
# magnitude spectrogram.
x_reconstruct = reconstruct_signal_griffin_lim(stft_modified_scaled,
fft_size, hopsamp,
iterations)
# The output signal must be in the range [-1, 1], otherwise we need to clip or normalize.
max_sample = np.max(abs(x_reconstruct))
if max_sample > 1.0:
x_reconstruct = x_reconstruct / max_sample
return x_reconstruct
def hz_to_mel(f_hz):
"""Convert Hz to mel scale.
This uses the formula from O'Shaugnessy's book.
Args:
f_hz (float): The value in Hz.
Returns:
The value in mels.
"""
return 2595*np.log10(1.0 + f_hz/700.0)
def mel_to_hz(m_mel):
"""Convert mel scale to Hz.
This uses the formula from O'Shaugnessy's book.
Args:
m_mel (float): The value in mels
Returns:
The value in Hz
"""
return 700*(10**(m_mel/2595) - 1.0)
def fft_bin_to_hz(n_bin, sample_rate_hz, fft_size):
"""Convert FFT bin index to frequency in Hz.
Args:
n_bin (int or float): The FFT bin index.
sample_rate_hz (int or float): The sample rate in Hz.
fft_size (int or float): The FFT size.
Returns:
The value in Hz.
"""
n_bin = float(n_bin)
sample_rate_hz = float(sample_rate_hz)
fft_size = float(fft_size)
return n_bin*sample_rate_hz/(2.0*fft_size)
def hz_to_fft_bin(f_hz, sample_rate_hz, fft_size):
"""Convert frequency in Hz to FFT bin index.
Args:
f_hz (int or float): The frequency in Hz.
sample_rate_hz (int or float): The sample rate in Hz.
fft_size (int or float): The FFT size.
Returns:
The FFT bin index as an int.
"""
f_hz = float(f_hz)
sample_rate_hz = float(sample_rate_hz)
fft_size = float(fft_size)
fft_bin = int(np.round((f_hz*2.0*fft_size/sample_rate_hz)))
if fft_bin >= fft_size:
fft_bin = fft_size-1
return fft_bin
def make_mel_filterbank(min_freq_hz, max_freq_hz, mel_bin_count,
linear_bin_count, sample_rate_hz):
"""Create a mel filterbank matrix.
Create and return a mel filterbank matrix `filterbank` of shape (`mel_bin_count`,
`linear_bin_couont`). The `filterbank` matrix can be used to transform a
(linear scale) spectrum or spectrogram into a mel scale spectrum or
spectrogram as follows:
`mel_scale_spectrum` = `filterbank`*'linear_scale_spectrum'
where linear_scale_spectrum' is a shape (`linear_bin_count`, `m`) and
`mel_scale_spectrum` is shape ('mel_bin_count', `m`) where `m` is the number
of spectral time slices.
Likewise, the reverse-direction transform can be performed as:
'linear_scale_spectrum' = filterbank.T`*`mel_scale_spectrum`
Note that the process of converting to mel scale and then back to linear
scale is lossy.
This function computes the mel-spaced filters such that each filter is triangular
(in linear frequency) with response 1 at the center frequency and decreases linearly
to 0 upon reaching an adjacent filter's center frequency. Note that any two adjacent
filters will overlap having a response of 0.5 at the mean frequency of their
respective center frequencies.
Args:
min_freq_hz (float): The frequency in Hz corresponding to the lowest
mel scale bin.
max_freq_hz (flloat): The frequency in Hz corresponding to the highest
mel scale bin.
mel_bin_count (int): The number of mel scale bins.
linear_bin_count (int): The number of linear scale (fft) bins.
sample_rate_hz (float): The sample rate in Hz.
Returns:
The mel filterbank matrix as an 2-dim Numpy array.
"""
min_mels = hz_to_mel(min_freq_hz)
max_mels = hz_to_mel(max_freq_hz)
# Create mel_bin_count linearly spaced values between these extreme mel values.
mel_lin_spaced = np.linspace(min_mels, max_mels, num=mel_bin_count)
# Map each of these mel values back into linear frequency (Hz).
center_frequencies_hz = np.array([mel_to_hz(n) for n in mel_lin_spaced])
mels_per_bin = float(max_mels - min_mels)/float(mel_bin_count - 1)
mels_start = min_mels - mels_per_bin
hz_start = mel_to_hz(mels_start)
fft_bin_start = hz_to_fft_bin(hz_start, sample_rate_hz, linear_bin_count)
#print('fft_bin_start: ', fft_bin_start)
mels_end = max_mels + mels_per_bin
hz_stop = mel_to_hz(mels_end)
fft_bin_stop = hz_to_fft_bin(hz_stop, sample_rate_hz, linear_bin_count)
#print('fft_bin_stop: ', fft_bin_stop)
# Map each center frequency to the closest fft bin index.
linear_bin_indices = np.array([hz_to_fft_bin(
f_hz, sample_rate_hz, linear_bin_count) for f_hz in center_frequencies_hz])
# Create filterbank matrix.
filterbank = np.zeros((mel_bin_count, linear_bin_count))
for mel_bin in range(mel_bin_count):
center_freq_linear_bin = int(linear_bin_indices[mel_bin].item())
# Create a triangular filter having the current center freq.
# The filter will start with 0 response at left_bin (if it exists)
# and ramp up to 1.0 at center_freq_linear_bin, and then ramp
# back down to 0 response at right_bin (if it exists).
# Create the left side of the triangular filter that ramps up
# from 0 to a response of 1 at the center frequency.
if center_freq_linear_bin > 1:
# It is possible to create the left triangular filter.
if mel_bin == 0:
# Since this is the first center frequency, the left side
# must start ramping up from linear bin 0 or 1 mel bin before the center freq.
left_bin = max(0, fft_bin_start)
else:
# Start ramping up from the previous center frequency bin.
left_bin = int(linear_bin_indices[mel_bin - 1].item())
for f_bin in range(left_bin, center_freq_linear_bin+1):
if (center_freq_linear_bin - left_bin) > 0:
response = float(f_bin - left_bin) / \
float(center_freq_linear_bin - left_bin)
filterbank[mel_bin, f_bin] = response
# Create the right side of the triangular filter that ramps down
# from 1 to 0.
if center_freq_linear_bin < linear_bin_count-2:
# It is possible to create the right triangular filter.
if mel_bin == mel_bin_count - 1:
# Since this is the last mel bin, we must ramp down to response of 0
# at the last linear freq bin.
right_bin = min(linear_bin_count - 1, fft_bin_stop)
else:
right_bin = int(linear_bin_indices[mel_bin + 1].item())
for f_bin in range(center_freq_linear_bin, right_bin+1):
if (right_bin - center_freq_linear_bin) > 0:
response = float(right_bin - f_bin) / \
float(right_bin - center_freq_linear_bin)
filterbank[mel_bin, f_bin] = response
filterbank[mel_bin, center_freq_linear_bin] = 1.0
return filterbank
def stft_for_reconstruction(x, fft_size, hopsamp):
"""Compute and return the STFT of the supplied time domain signal x.
Args:
x (1-dim Numpy array): A time domain signal.
fft_size (int): FFT size. Should be a power of 2, otherwise DFT will be used.
hopsamp (int):
Returns:
The STFT. The rows are the time slices and columns are the frequency bins.
"""
window = np.hanning(fft_size)
fft_size = int(fft_size)
hopsamp = int(hopsamp)
return np.array([np.fft.rfft(window*x[i:i+fft_size])
for i in range(0, len(x)-fft_size, hopsamp)])
def istft_for_reconstruction(X, fft_size, hopsamp):
"""Invert a STFT into a time domain signal.
Args:
X (2-dim Numpy array): Input spectrogram. The rows are the time slices and columns are the frequency bins.
fft_size (int):
hopsamp (int): The hop size, in samples.
Returns:
The inverse STFT.
"""
fft_size = int(fft_size)
hopsamp = int(hopsamp)
window = np.hanning(fft_size)
time_slices = X.shape[0]
len_samples = int(time_slices*hopsamp + fft_size)
x = np.zeros(len_samples)
for n, i in enumerate(range(0, len(x)-fft_size, hopsamp)):
x[i:i+fft_size] += window*np.real(np.fft.irfft(X[n]))
return x
def get_signal(in_file, expected_fs=44100):
"""Load a wav file.
If the file contains more than one channel, return a mono file by taking
the mean of all channels.
If the sample rate differs from the expected sample rate (default is 44100 Hz),
raise an exception.
Args:
in_file: The input wav file, which should have a sample rate of `expected_fs`.
expected_fs (int): The expected sample rate of the input wav file.
Returns:
The audio siganl as a 1-dim Numpy array. The values will be in the range [-1.0, 1.0]. fixme ( not yet)
"""
fs, y = scipy.io.wavfile.read(in_file)
num_type = y[0].dtype
if num_type == 'int16':
y = y*(1.0/32768)
elif num_type == 'int32':
y = y*(1.0/2147483648)
elif num_type == 'float32':
# Nothing to do
pass
elif num_type == 'uint8':
raise Exception('8-bit PCM is not supported.')
else:
raise Exception('Unknown format.')
if fs != expected_fs:
raise Exception('Invalid sample rate.')
if y.ndim == 1:
return y
else:
return y.mean(axis=1)
def reconstruct_signal_griffin_lim(magnitude_spectrogram, fft_size, hopsamp, iterations):
"""Reconstruct an audio signal from a magnitude spectrogram.
Given a magnitude spectrogram as input, reconstruct
the audio signal and return it using the Griffin-Lim algorithm from the paper:
"Signal estimation from modified short-time fourier transform" by Griffin and Lim,
in IEEE transactions on Acoustics, Speech, and Signal Processing. Vol ASSP-32, No. 2, April 1984.
Args:
magnitude_spectrogram (2-dim Numpy array): The magnitude spectrogram. The rows correspond to the time slices
and the columns correspond to frequency bins.
fft_size (int): The FFT size, which should be a power of 2.
hopsamp (int): The hope size in samples.
iterations (int): Number of iterations for the Griffin-Lim algorithm. Typically a few hundred
is sufficient.
Returns:
The reconstructed time domain signal as a 1-dim Numpy array.
"""
time_slices = magnitude_spectrogram.shape[0]
len_samples = int(time_slices*hopsamp + fft_size)
# Initialize the reconstructed signal to noise.
x_reconstruct = np.random.randn(len_samples)
n = iterations # number of iterations of Griffin-Lim algorithm.
while n > 0:
n -= 1
reconstruction_spectrogram = stft_for_reconstruction(
x_reconstruct, fft_size, hopsamp)
reconstruction_angle = np.angle(reconstruction_spectrogram)
# Discard magnitude part of the reconstruction and use the supplied magnitude spectrogram instead.
proposal_spectrogram = magnitude_spectrogram * \
np.exp(1.0j*reconstruction_angle)
prev_x = x_reconstruct
x_reconstruct = istft_for_reconstruction(
proposal_spectrogram, fft_size, hopsamp)
diff = sqrt(sum((x_reconstruct - prev_x)**2)/x_reconstruct.size)
#print('Reconstruction iteration: {}/{} RMSE: {} '.format(iterations - n, iterations, diff))
return x_reconstruct
def save_audio_to_file(x, sample_rate, outfile='out.wav'):
"""Save a mono signal to a file.
Args:
x (1-dim Numpy array): The audio signal to save. The signal values should be in the range [-1.0, 1.0].
sample_rate (int): The sample rate of the signal, in Hz.
outfile: Name of the file to save.
"""
x_max = np.max(abs(x))
assert x_max <= 1.0, 'Input audio value is out of range. Should be in the range [-1.0, 1.0].'
x = x*32767.0
data = array.array('h')
for i in range(len(x)):
cur_samp = int(round(x[i]))
data.append(cur_samp)
f = wave.open(outfile, 'w')
f.setparams((1, 2, sample_rate, 0, "NONE", "Uncompressed"))
f.writeframes(data.tostring())
f.close()
| gpl-2.0 |
evidation-health/bokeh | examples/plotting/file/elements.py | 43 | 1485 | import pandas as pd
from bokeh.plotting import figure, show, output_file
from bokeh.sampledata import periodic_table
elements = periodic_table.elements
elements = elements[elements["atomic number"] <= 82]
elements = elements[~pd.isnull(elements["melting point"])]
mass = [float(x.strip("[]")) for x in elements["atomic mass"]]
elements["atomic mass"] = mass
palette = list(reversed([
"#67001f","#b2182b","#d6604d","#f4a582","#fddbc7","#f7f7f7","#d1e5f0","#92c5de","#4393c3","#2166ac","#053061"
]))
melting_points = elements["melting point"]
low = min(melting_points)
high= max(melting_points)
melting_point_inds = [int(10*(x-low)/(high-low)) for x in melting_points] #gives items in colors a value from 0-10
meltingpointcolors = [palette[i] for i in melting_point_inds]
output_file("elements.html", title="elements.py example")
TOOLS = "pan,wheel_zoom,box_zoom,reset,resize,save"
p = figure(tools=TOOLS, toolbar_location="left", logo="grey", plot_width=1200)
p.title = "Density vs Atomic Weight of Elements (colored by melting point)"
p.background_fill= "#cccccc"
p.circle(elements["atomic mass"], elements["density"], size=12,
color=meltingpointcolors, line_color="black", fill_alpha=0.8)
p.text(elements["atomic mass"], elements["density"]+0.3,
text=elements["symbol"],text_color="#333333",
text_align="center", text_font_size="10pt")
p.xaxis.axis_label="atomic weight (amu)"
p.yaxis.axis_label="density (g/cm^3)"
p.grid.grid_line_color="white"
show(p)
| bsd-3-clause |
ericdill/pyqtgraph | examples/Symbols.py | 5 | 2313 | # -*- coding: utf-8 -*-
"""
This example shows all the scatter plot symbols available in pyqtgraph.
These symbols are used to mark point locations for scatter plots and some line
plots, similar to "markers" in matplotlib and vispy.
"""
import initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
app = QtGui.QApplication([])
win = pg.GraphicsWindow(title="Scatter Plot Symbols")
win.resize(1000,600)
pg.setConfigOptions(antialias=True)
plot = win.addPlot(title="Plotting with symbols")
plot.addLegend()
plot.plot([0, 1, 2, 3, 4], pen=(0,0,200), symbolBrush=(0,0,200), symbolPen='w', symbol='o', symbolSize=14, name="symbol='o'")
plot.plot([1, 2, 3, 4, 5], pen=(0,128,0), symbolBrush=(0,128,0), symbolPen='w', symbol='t', symbolSize=14, name="symbol='t'")
plot.plot([2, 3, 4, 5, 6], pen=(19,234,201), symbolBrush=(19,234,201), symbolPen='w', symbol='t1', symbolSize=14, name="symbol='t1'")
plot.plot([3, 4, 5, 6, 7], pen=(195,46,212), symbolBrush=(195,46,212), symbolPen='w', symbol='t2', symbolSize=14, name="symbol='t2'")
plot.plot([4, 5, 6, 7, 8], pen=(250,194,5), symbolBrush=(250,194,5), symbolPen='w', symbol='t3', symbolSize=14, name="symbol='t3'")
plot.plot([5, 6, 7, 8, 9], pen=(54,55,55), symbolBrush=(55,55,55), symbolPen='w', symbol='s', symbolSize=14, name="symbol='s'")
plot.plot([6, 7, 8, 9, 10], pen=(0,114,189), symbolBrush=(0,114,189), symbolPen='w', symbol='p', symbolSize=14, name="symbol='p'")
plot.plot([7, 8, 9, 10, 11], pen=(217,83,25), symbolBrush=(217,83,25), symbolPen='w', symbol='h', symbolSize=14, name="symbol='h'")
plot.plot([8, 9, 10, 11, 12], pen=(237,177,32), symbolBrush=(237,177,32), symbolPen='w', symbol='star', symbolSize=14, name="symbol='star'")
plot.plot([9, 10, 11, 12, 13], pen=(126,47,142), symbolBrush=(126,47,142), symbolPen='w', symbol='+', symbolSize=14, name="symbol='+'")
plot.plot([10, 11, 12, 13, 14], pen=(119,172,48), symbolBrush=(119,172,48), symbolPen='w', symbol='d', symbolSize=14, name="symbol='d'")
plot.setXRange(-2, 4)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| mit |
nuclear-wizard/moose | python/peacock/PostprocessorViewer/plugins/PostprocessorPlugin.py | 21 | 2454 | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import peacock
import mooseutils
from PyQt5 import QtWidgets
class PostprocessorPlugin(peacock.base.Plugin):
"""
The base class for creating a plugin for the PostprocessorViewer.
"""
def __init__(self):
super(PostprocessorPlugin, self).__init__()
# Initialize member variables
self._figure = None
self._axes = None
self._axes2 = None
# The default layout name
self.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Maximum)
self.setFixedWidth(520)
self.setMainLayoutName('LeftLayout')
def onFigureCreated(self, figure, axes, axes2):
"""
Stores the created figure and axes for use by the plugin. This slot becomes connected to the
figureCreated signal that is emitted by the FigurePlugin.
Args:
figure[plt.Figure]: The matplotlib figure object that postprocessor data is to be displayed.
axes[plt.Axes]: The Axes with y-data displayged on left side of figure.
axes2[plt.Axes]: The Axes with y-data displayed on the right side of figure.
"""
self._figure = figure
self._axes = axes
self._axes2 = axes2
def repr(self):
"""
Return imports and script data
"""
return [], []
def figure(self):
"""
Returns the current matplotlib Figure object.
"""
return self._figure
def axes(self, index=None):
"""
Return the axes object(s).
"""
if index in [0, 'x', 'y']:
return self._axes
elif index in [1, 'y2']:
return self._axes2
return self._axes, self._axes2
def axis(self, name):
"""
Return the pyplot.Axis object.
Args:
name[str]: 'x', 'y', or 'y2'.
"""
if name == 'x':
return self.axes(0).get_xaxis()
elif name == 'y':
return self.axes(0).get_yaxis()
elif name == 'y2':
return self.axes(1).get_yaxis()
mooseutils.mooseError("Unknown axis name, must use: 'x', 'y', or 'y2'.")
| lgpl-2.1 |
sumspr/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
Shaswat27/scipy | scipy/signal/waveforms.py | 64 | 14818 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
| bsd-3-clause |
SeldonIO/seldon-server | python/seldon/tests/test_vw.py | 2 | 6371 | import unittest
import pandas as pd
from seldon import vw
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
import logging
class Test_VWClassifier(unittest.TestCase):
def test_sklearn_pipeline(self):
t = vw.VWClassifier(target="target")
f1 = {"target":0,"b":1.0,"c":0}
f2 = {"target":1,"b":0,"c":2.0}
fs = []
for i in range (1,50):
fs.append(f1)
fs.append(f2)
print "features=>",fs
df = pd.DataFrame.from_dict(fs)
estimators = [("vw",t)]
p = Pipeline(estimators)
print "fitting"
p.fit(df)
print "get preds 1 "
preds = p.predict_proba(df)
print preds
print "-------------------"
t.close()
# vw sometimes need some more time between invocations. Need to look into this.
import time
time.sleep(5)
joblib.dump(p,"/tmp/pipeline/p")
p2 = joblib.load("/tmp/pipeline/p")
print "get preds 2"
df3 = p2.predict_proba(df)
print df3
vw2 = p2._final_estimator
vw2.close()
def test_zero_based_target(self):
t = vw.VWClassifier(target="target",target_readable="name")
try:
df = pd.DataFrame.from_dict([{"target":0,"b":"c d","c":3,"name":"zeroTarget"},{"target":1,"b":"word2","name":"oneTarget"}])
t.fit(df)
scores = t.predict_proba(df)
print "scores->",scores
self.assertEquals(scores.shape[0],2)
self.assertEquals(scores.shape[1],2)
idMap = t.get_class_id_map()
print idMap
formatted_recs_list=[]
for index, proba in enumerate(scores[0]):
print index,proba
if index in idMap:
indexName = idMap[index]
else:
indexName = str(index)
formatted_recs_list.append({
"prediction": str(proba),
"predictedClass": indexName,
"confidence" : str(proba)
})
print formatted_recs_list
finally:
t.close()
def test_create_features(self):
t = vw.VWClassifier(target="target")
try:
df = pd.DataFrame.from_dict([{"target":"1","b":"c d","c":3},{"target":"2","b":"word2"}])
t.fit(df)
scores = t.predict_proba(df)
print scores
self.assertEquals(scores.shape[0],2)
self.assertEquals(scores.shape[1],2)
finally:
t.close()
def test_predictions(self):
t = vw.VWClassifier(target="target")
try:
df = pd.DataFrame.from_dict([{"target":"1","b":"c d","c":3},{"target":"2","b":"word2"}])
t.fit(df)
preds = t.predict(df)
print preds
self.assertEquals(preds[0],0)
self.assertEquals(preds[1],1)
finally:
t.close()
def test_dict_feature(self):
t = vw.VWClassifier(target="target")
try:
df = pd.DataFrame.from_dict([{"target":"1","df":{"1":0.234,"2":0.1}},{"target":"2","df":{"1":0.5}}])
t.fit(df)
scores = t.predict_proba(df)
print scores
self.assertEquals(scores.shape[0],2)
self.assertEquals(scores.shape[1],2)
finally:
t.close()
def test_list_feature(self):
t = vw.VWClassifier(target="target",num_iterations=10)
try:
df = pd.DataFrame.from_dict([{"target":"1","df":["a","b","c","d"]},{"target":"2","df":["x","y","z"]}])
t.fit(df)
df2 = pd.DataFrame.from_dict([{"df":["a","b","c","d"]},{"df":["x","y","z"]}])
scores = t.predict_proba(df2)
if not scores is None:
print scores
self.assertEquals(scores.shape[0],2)
self.assertTrue(scores[0][0]>scores[0][1])
self.assertEquals(scores.shape[1],2)
self.assertTrue(scores[1][0]<scores[1][1])
finally:
t.close()
def test_vw_same_score_bug(self):
t = vw.VWClassifier(target="target",num_iterations=10)
try:
df = pd.DataFrame.from_dict([{"target":"1","df":["a","b","c","d"]},{"target":"2","df":["x","y","z"]}])
t.fit(df)
df2 = pd.DataFrame.from_dict([{"df":["a","b","c","d"]},{"df":["x","y","z"]}])
scores = t.predict_proba(df2)
score_00 = scores[0][0]
score_10 = scores[1][0]
for i in range(1,4):
scores = t.predict_proba(df2)
self.assertEquals(scores[0][0],score_00)
self.assertEquals(scores[1][0],score_10)
finally:
t.close()
def test_large_number_features(self):
t = vw.VWClassifier(target="target")
try:
f = {}
f2 = {}
for i in range(1,5000):
f[str(i)] = 1
f2[str(i)] = 0.1
df = pd.DataFrame.from_dict([{"target":"1","df":f},{"target":"2","df":f2}])
t.fit(df)
scores = t.predict_proba(df)
print scores
self.assertEquals(scores.shape[0],2)
self.assertEquals(scores.shape[1],2)
finally:
t.close()
def test_vw_args(self):
t = vw.VWClassifier(target="target",b=18)
try:
f = {}
f2 = {}
for i in range(1,5000):
f[str(i)] = 1
f2[str(i)] = 0.1
df = pd.DataFrame.from_dict([{"target":"1","df":f},{"target":"2","df":f2}])
t.fit(df)
scores = t.predict_proba(df)
print scores
self.assertEquals(scores.shape[0],2)
self.assertEquals(scores.shape[1],2)
finally:
t.close()
def test_numpy_input(self):
t = vw.VWClassifier()
try:
X = np.random.randn(6,4)
y = np.array([1,2,1,1,2,2])
t.fit(X,y)
scores = t.predict_proba(X)
print scores
finally:
t.close()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
unittest.main()
| apache-2.0 |
sinhrks/expandas | pandas_ml/skaccessors/test/test_covariance.py | 2 | 3139 | #!/usr/bin/env python
import pytest
import sklearn.datasets as datasets
import sklearn.covariance as covariance
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestCovariance(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.covariance.EmpiricalCovariance, covariance.EmpiricalCovariance)
self.assertIs(df.covariance.EllipticEnvelope, covariance.EllipticEnvelope)
self.assertIs(df.covariance.GraphLasso, covariance.GraphLasso)
self.assertIs(df.covariance.GraphLassoCV, covariance.GraphLassoCV)
self.assertIs(df.covariance.LedoitWolf, covariance.LedoitWolf)
self.assertIs(df.covariance.MinCovDet, covariance.MinCovDet)
self.assertIs(df.covariance.OAS, covariance.OAS)
self.assertIs(df.covariance.ShrunkCovariance, covariance.ShrunkCovariance)
self.assertIs(df.covariance.shrunk_covariance, covariance.shrunk_covariance)
self.assertIs(df.covariance.graph_lasso, covariance.graph_lasso)
def test_empirical_covariance(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.covariance.empirical_covariance()
expected = covariance.empirical_covariance(iris.data)
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_index_equal(result.index, df.data.columns)
tm.assert_index_equal(result.columns, df.data.columns)
self.assert_numpy_array_almost_equal(result.values, expected)
def test_ledoit_wolf(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.covariance.ledoit_wolf()
expected = covariance.ledoit_wolf(iris.data)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], pdml.ModelFrame)
tm.assert_index_equal(result[0].index, df.data.columns)
tm.assert_index_equal(result[0].columns, df.data.columns)
self.assert_numpy_array_almost_equal(result[0].values, expected[0])
self.assert_numpy_array_almost_equal(result[1], expected[1])
def test_oas(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.covariance.oas()
expected = covariance.oas(iris.data)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], pdml.ModelFrame)
tm.assert_index_equal(result[0].index, df.data.columns)
tm.assert_index_equal(result[0].columns, df.data.columns)
self.assert_numpy_array_almost_equal(result[0].values, expected[0])
self.assert_numpy_array_almost_equal(result[1], expected[1])
@pytest.mark.parametrize("algo", ['EmpiricalCovariance', 'LedoitWolf'])
def test_Covariance(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.covariance, algo)()
mod2 = getattr(covariance, algo)()
df.fit(mod1)
mod2.fit(iris.data)
self.assert_numpy_array_almost_equal(mod1.covariance_, mod2.covariance_)
| bsd-3-clause |
sirrice/scorpion | tests/unit/frontier.py | 1 | 4634 | import pdb
import random
import numpy as np
from matplotlib import pyplot as plt
random.seed(.2)
from scorpion.sigmod.frontier import *
from scorpion.util import InfRenderer
class Cluster(object):
_id = 0
def __init__(self, tops, bots):
self.id = Cluster._id
Cluster._id += 1
self.tops = tops
self.bots = bots
self.c_range = [0.0, 1]
self.error = 0
pairs = zip(self.tops, self.bots)
f = lambda c: np.mean([t/pow(b,c) for t,b in pairs])
self.inf_func = np.vectorize(f)
def clone(self, *args, **kwargs):
c = Cluster(self.tops, self.bots)
c.c_range = list(self.c_range)
return c
@property
def bound_hash(self):
return hash(str([self.tops, self.bots]))
def __hash__(self):
return hash(str([self.tops, self.bots, self.c_range]))
def __str__(self):
return "%d\t%s\t%s\t%.4f, %.4f\t%.4f-%.4f\t%.4f - %.4f" % (
self.id, self.tops, self.bots, self.c_range[0], self.c_range[1],
self.c_range[0], self.c_range[1], self.inf_func(0), self.inf_func(1))
clusters = []
top, bot = 1, 1
for i in xrange(300):
tops = [float(random.randint(0, 20)) for i in xrange(5)]
bots = [float(random.randint(1, 20)) for i in xrange(5)]
c = Cluster(tops, bots)
clusters.append(c)
#xs = (np.arange(100) / 100.)
#all_infs = np.array([c.inf_func(xs) for c in clusters])
#print all_infs.shape
#medians = np.percentile(all_infs, 50, axis=0)
#print medians.shape
#print medians
#block = (medians.max() - medians.min()) / 50.
#print block
#opts = [0]
#ys = [medians[0]]
#prev = medians[0]
#for x, v in enumerate(medians):
# if abs(v-prev) >= block:
# opts.append(xs[x])
# ys.append(v)
# prev = v
#
#print len(opts)
#print opts
#print ys
#
#opts = np.array(opts)
#weights = opts[1:] - opts[:-1]
#weights = weights.astype(float) / weights.sum()
#tup = np.polyfit(opts[1:], weights, 2, full=True)
#print tup[0]
#print tup[1]
#def create_polynomial(coefficients):
# """
# Given a set of coefficients, return a function that takes a dataset size and computes the cost
#
# Coefficients are for increasing orders of x. E.g.,
#
# [2,9,5] -> 2*x^0 + 9*x^2 + 5*x^3
# """
#
# def f(x):
# return sum([a * (x**power) for power, a in enumerate(coefficients)])
# f.coeffs = coefficients
# return f
#
#f = create_polynomial(tup[0])
#
#
#
#renderer = InfRenderer('test.pdf')
#renderer.plot_inf_curves(clusters, color='grey', alpha=0.3)
#renderer.plot_points(xs, medians, color='red', alpha=1)
#renderer.plot_points(xs, np.average(all_infs, axis=0), color='blue', alpha=1)
#for x in opts:
# renderer.plot_points([x, x], [0, 20], color='black')
#
#renderer.plot_points(xs, f(xs)*20, color='green', alpha=1)
#renderer.close()
#
#
#exit()
#clusters = []
#for i in xrange(5):
# tops = [1,2,3,4,5]
# bots = [1,3,5,2,1]
# clusters.append(Cluster(tops, bots))
renderer = InfRenderer('test.pdf')
def print_stats(frontier):
for key, val in frontier.stats.items():
print key, '\t', val
for key, val in frontier.heap.stats.items():
print key, '\t', val
if True:
get_frontier = Frontier([0,1])
start = time.time()
frontier, removed = get_frontier(clusters)
print time.time() - start
print_stats(get_frontier)
renderer.plot_inf_curves(clusters, color='grey', alpha=0.3)
renderer.plot_active_inf_curves(frontier)
for c in frontier:
print c
if False:
print 'removed'
for c in removed:
print c
if True:
for c in clusters:
c.c_range = [0,1]
f = CheapFrontier([0,1], K=1, nblocks=25)
start = time.time()
frontier, removed = f(clusters)
print time.time() - start
print_stats(f)
renderer.new_page()
renderer.plot_inf_curves(clusters, color='grey', alpha=0.3)
renderer.plot_active_inf_curves(frontier)
for c in frontier:
print c
if False:
print 'removed'
for c in removed:
print c
if True:
for c in clusters:
c.c_range = [0,1]
f = CheapFrontier([0,1], K=1, nblocks=25)
start = time.time()
f.update(clusters[:50])
f.update(clusters[50:])
print time.time() - start
print_stats(f)
frontier = f.frontier
renderer.new_page()
#renderer.plot_inf_curves(clusters, color='grey', alpha=0.3)
renderer.plot_active_inf_curves(frontier)
for c in frontier:
print c
if False:
for c in clusters:
c.c_range = [0,1]
f2 = ContinuousFrontier([0,1])
start = time.time()
for c in clusters:
map(len, f2.update([c]))
print time.time() - start
print_stats(f2)
frontier = f2.frontier
renderer.new_page()
renderer.plot_inf_curves(clusters, color='grey', alpha=0.3)
renderer.plot_active_inf_curves(frontier)
renderer.close()
| mit |
alexeyum/scikit-learn | sklearn/utils/tests/test_utils.py | 16 | 9120 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from scipy.linalg import eigh
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex,
assert_greater_equal)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.arpack import eigsh
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.graph import graph_laplacian
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1],
replace=False, n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
# Issue:6581, n_samples can be more when replace is True (default).
assert_equal(len(resample([1, 2], n_samples=5)), 5)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_arpack_eigsh_initialization():
# Non-regression test that shows null-space computation is better with
# initialization of eigsh from [-1,1] instead of [0,1]
random_state = check_random_state(42)
A = random_state.rand(50, 50)
A = np.dot(A.T, A) # create s.p.d. matrix
A = graph_laplacian(A) + 1e-7 * np.identity(A.shape[0])
k = 5
# Test if eigsh is working correctly
# New initialization [-1,1] (as in original ARPACK)
# Was [0,1] before, with which this test could fail
v0 = random_state.uniform(-1,1, A.shape[0])
w, _ = eigsh(A, k=k, sigma=0.0, v0=v0)
# Eigenvalues of s.p.d. matrix should be nonnegative, w[0] is smallest
assert_greater_equal(w[0], 0)
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
atantet/ergoPack | example/stat/getCorrPower.py | 1 | 6311 | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import pylibconfig2
from ergoPack import ergoStat, ergoPlot
configFile = '../cfg/Chekroun2019.cfg'
cfg = pylibconfig2.Config()
cfg.read_file(configFile)
L = cfg.simulation.LCut + cfg.simulation.spinup
printStepNum = int(cfg.simulation.printStep / cfg.simulation.dt)
caseName = cfg.model.caseName
# delayName = ""
# if hasattr(cfg.model, 'delaysDays'):
# for d in np.arange(len(cfg.model.delaysDays)):
# delayName = "%s_d%d" % (delayName, cfg.model.delaysDays[d])
# if (hasattr(cfg.model, 'rho') & hasattr(cfg.model, 'sigma') & hasattr(cfg.model, 'beta')):
# caseName = "%s_rho%d_sigma%d_beta%d" \
# % (caseName, (int) (cfg.model.rho * 1000),
# (int) (cfg.model.sigma * 1000), (int) (cfg.model.beta * 1000))
# srcPostfix = "_%s%s_L%d_spinup%d_dt%d_samp%d" \
# % (caseName, delayName, L, cfg.simulation.spinup,
# -np.round(np.log10(cfg.simulation.dt)), printStepNum)
srcPostfix = (
"_{}_mu{:04d}_alpha{:04d}_gamma{:04d}_delta{:04d}_beta{:04d}_eps{:04d}_sep{:04d}"
"_L{:d}_spinup{:d}_dt{:d}_samp{:d}".format(
caseName, int(cfg.model.mu * 10000 + 0.1),
int(cfg.model.alpha * 10000 + 0.1), int(cfg.model.gamma * 10000 + 0.1),
int(cfg.model.delta * 10000 + 0.1), int(cfg.model.beta * 10000 + 0.1),
int(cfg.model.eps * 10000 + 0.1), int(cfg.model.sep * 10000 + 0.1),
int(L + 0.1), int(cfg.simulation.spinup + 0.1),
int(-np.round(np.log10(cfg.simulation.dt)) + 0.1), printStepNum))
sampFreq = 1. / cfg.simulation.printStep
lagMaxNum = int(np.round(cfg.stat.lagMax / cfg.simulation.printStep))
lags = np.arange(-cfg.stat.lagMax, cfg.stat.lagMax + 0.999 *
cfg.simulation.printStep, cfg.simulation.printStep)
corrName = 'C{:d}{:d}'.format(cfg.stat.idxf, cfg.stat.idxg)
powerName = 'S{:d}{:d}'.format(cfg.stat.idxf, cfg.stat.idxg)
lagMaxSample = int(cfg.stat.lagMax * sampFreq + 0.1)
lags = np.arange(-cfg.stat.lagMax, cfg.stat.lagMax + 0.999 / sampFreq,
1. / sampFreq)
nLags = lags.shape[0]
nTraj = cfg.sprinkle.nTraj
dstPostfix = "{}_nTraj{:d}".format(srcPostfix, nTraj)
corrSample = np.zeros((nLags,))
for traj in np.arange(nTraj):
print('for traj {:d}'.format(traj))
# Read time series
simFile = '{}/simulation/sim{}_traj{:d}.{}'.format(
cfg.general.resDir, srcPostfix, traj, cfg.general.fileFormat)
print('Reading time series from ' + simFile)
if cfg.general.fileFormat == 'bin':
X = np.fromfile(simFile, dtype=float,
count=int(np.round(cfg.model.dim * cfg.simulation.LCut
/ cfg.simulation.printStep)))
else:
X = np.loadtxt(simFile, dtype=float)
X = X.reshape(-1, cfg.model.dim)
# Read datasets
observable1 = X[:, cfg.stat.idxf]
observable2 = X[:, cfg.stat.idxg]
nt = observable1.shape[0]
ntWindow = int(cfg.stat.chunkWidth * sampFreq)
# Get corrSample averaged over trajectories (should add weights based on length)
# (do not normalize here, because we summup the trajectories)
print('Computing correlation function')
corrSample += ergoStat.ccf(observable1, observable2,
lagMax=cfg.stat.lagMax,
sampFreq=sampFreq, norm=False)
# Get common frequencies
if traj == 0:
nChunks = int(nt / (cfg.stat.chunkWidth * sampFreq))
freq = ergoStat.getFreqPow2(cfg.stat.chunkWidth,
sampFreq=sampFreq)
nfft = freq.shape[0]
powerSample = np.zeros((nfft,))
powerSampleSTD = np.zeros((nfft,))
# Get powerSample averaged over trajectories
# (should add weights based on length)
print('Computing periodogram')
(freq, powerSampleTraj, powerSampleSTDTraj) \
= ergoStat.getPerio(observable1, observable2,
freq=freq, sampFreq=sampFreq,
chunkWidth=cfg.stat.chunkWidth, norm=False)
powerSample += powerSampleTraj
powerSampleSTD += powerSampleSTDTraj**2 * nChunks
corrSample /= nTraj
powerSample /= nTraj
powerSampleSTD = np.sqrt(powerSampleSTD / (nTraj * nChunks))
if cfg.stat.norm:
cov = corrSample[(lags.shape[0] - 1) // 2]
corrSample /= cov
powerSample /= cov
powerSampleSTD /= cov
# Save results
np.savetxt(os.path.join(
cfg.general.resDir, 'correlation', 'corrSample{}_lagMax{:d}yr.txt'.format(
dstPostfix, int(cfg.stat.lagMax * 1e4 + 0.1))), corrSample)
np.savetxt(os.path.join(
cfg.general.resDir, 'correlation', 'lags{}_lagMax{:d}yr.txt'.format(
dstPostfix, int(cfg.stat.lagMax * 1e4 + 0.1))), lags)
np.savetxt(os.path.join(
cfg.general.resDir, 'power', 'powerSample{}_chunk{:d}yr.txt'.format(
dstPostfix, int(cfg.stat.chunkWidth + 0.1))), powerSample)
np.savetxt(os.path.join(
cfg.general.resDir, 'power', 'powerSampleSTD{}_chunk{:d}yr.txt'.format(
dstPostfix, int(cfg.stat.chunkWidth + 0.1))), powerSampleSTD)
np.savetxt(os.path.join(
cfg.general.resDir, 'power', 'freq{}_chunk{:d}yr.txt'.format(
dstPostfix, int(cfg.stat.chunkWidth + 0.1))), freq)
# Plot corrSample
print('Plotting correlation function...')
(fig, ax) = ergoPlot.plotCCF(corrSample, lags, absUnit='y',
plotPositive=True)
plt.savefig(os.path.join(
cfg.general.plotDir, 'correlation', 'corrSample{}_lagMax{:d}yr.{}'.format(
dstPostfix, int(cfg.stat.lagMax * 1e4 + 0.1), ergoPlot.figFormat)),
dpi=ergoPlot.dpi, bbox_inches=ergoPlot.bbox_inches)
# Plot powerSample
print('Plotting periodogram...')
angFreq = freq * 2 * np.pi
(fig, ax) = ergoPlot.plotPerio(powerSample, perioSTD=powerSampleSTD,
freq=angFreq, plotPositive=True,
absUnit='', yscale='log',
xlim=(0, cfg.stat.angFreqMax),
ylim=(cfg.stat.powerMin, cfg.stat.powerMax))
fig.savefig(os.path.join(
cfg.general.plotDir, 'power', 'powerSample{}_chunk{:d}yr.{}'.format(
dstPostfix, int(cfg.stat.chunkWidth + 0.1), ergoPlot.figFormat)),
dpi=ergoPlot.dpi, bbox_inches=ergoPlot.bbox_inches)
plt.show(block=False)
| gpl-3.0 |
jm-begon/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
RPGOne/scikit-learn | examples/gaussian_process/plot_gpr_prior_posterior.py | 104 | 2878 | """
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a GPR with different
kernels. Mean, standard deviation, and 10 samples are shown for both prior
and posterior.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
nu=1.5)]
for fig_index, kernel in enumerate(kernels):
# Specify Gaussian Process
gp = GaussianProcessRegressor(kernel=kernel)
# Plot prior
plt.figure(fig_index, figsize=(8, 8))
plt.subplot(2, 1, 1)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Prior (kernel: %s)" % kernel, fontsize=12)
# Generate data and fit GP
rng = np.random.RandomState(4)
X = rng.uniform(0, 5, 10)[:, np.newaxis]
y = np.sin((X[:, 0] - 2.5) ** 2)
gp.fit(X, y)
# Plot posterior
plt.subplot(2, 1, 2)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
% (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/io/tests/json/test_json_norm.py | 7 | 8594 | import nose
from pandas import DataFrame
import numpy as np
import json
import pandas.util.testing as tm
from pandas import compat
from pandas.io.json import json_normalize, nested_to_record
def _assert_equal_data(left, right):
if not left.columns.equals(right.columns):
left = left.reindex(columns=right.columns)
tm.assert_frame_equal(left, right)
class TestJSONNormalize(tm.TestCase):
def setUp(self):
self.state_data = [
{'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}],
'info': {'governor': 'Rick Scott'},
'shortname': 'FL',
'state': 'Florida'},
{'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}],
'info': {'governor': 'John Kasich'},
'shortname': 'OH',
'state': 'Ohio'}]
def test_simple_records(self):
recs = [{'a': 1, 'b': 2, 'c': 3},
{'a': 4, 'b': 5, 'c': 6},
{'a': 7, 'b': 8, 'c': 9},
{'a': 10, 'b': 11, 'c': 12}]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self):
result = json_normalize(self.state_data[0], 'counties')
expected = DataFrame(self.state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(self.state_data, 'counties')
expected = []
for rec in self.state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(self.state_data, 'counties', meta='state')
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_more_deeply_nested(self):
data = [{'country': 'USA',
'states': [{'name': 'California',
'cities': [{'name': 'San Francisco',
'pop': 12345},
{'name': 'Los Angeles',
'pop': 12346}]
},
{'name': 'Ohio',
'cities': [{'name': 'Columbus',
'pop': 1234},
{'name': 'Cleveland',
'pop': 1236}]}
]
},
{'country': 'Germany',
'states': [{'name': 'Bayern',
'cities': [{'name': 'Munich', 'pop': 12347}]
},
{'name': 'Nordrhein-Westfalen',
'cities': [{'name': 'Duesseldorf', 'pop': 1238},
{'name': 'Koeln', 'pop': 1239}]}
]
}
]
result = json_normalize(data, ['states', 'cities'],
meta=['country', ['states', 'name']])
# meta_prefix={'states': 'state_'})
ex_data = {'country': ['USA'] * 4 + ['Germany'] * 3,
'states.name': ['California', 'California', 'Ohio', 'Ohio',
'Bayern', 'Nordrhein-Westfalen',
'Nordrhein-Westfalen'],
'name': ['San Francisco', 'Los Angeles', 'Columbus',
'Cleveland', 'Munich', 'Duesseldorf', 'Koeln'],
'pop': [12345, 12346, 1234, 1236, 12347, 1238, 1239]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_shallow_nested(self):
data = [{'state': 'Florida',
'shortname': 'FL',
'info': {
'governor': 'Rick Scott'
},
'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}]},
{'state': 'Ohio',
'shortname': 'OH',
'info': {
'governor': 'John Kasich'
},
'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}]}]
result = json_normalize(data, 'counties',
['state', 'shortname',
['info', 'governor']])
ex_data = {'name': ['Dade', 'Broward', 'Palm Beach', 'Summit',
'Cuyahoga'],
'state': ['Florida'] * 3 + ['Ohio'] * 2,
'shortname': ['FL', 'FL', 'FL', 'OH', 'OH'],
'info.governor': ['Rick Scott'] * 3 + ['John Kasich'] * 2,
'population': [12345, 40000, 60000, 1234, 1337]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_meta_name_conflict(self):
data = [{'foo': 'hello',
'bar': 'there',
'data': [{'foo': 'something', 'bar': 'else'},
{'foo': 'something2', 'bar': 'else2'}]}]
self.assertRaises(ValueError, json_normalize, data,
'data', meta=['foo', 'bar'])
result = json_normalize(data, 'data', meta=['foo', 'bar'],
meta_prefix='meta')
for val in ['metafoo', 'metabar', 'foo', 'bar']:
self.assertTrue(val in result)
def test_record_prefix(self):
result = json_normalize(self.state_data[0], 'counties')
expected = DataFrame(self.state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(self.state_data, 'counties',
meta='state',
record_prefix='county_')
expected = []
for rec in self.state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
expected = expected.rename(columns=lambda x: 'county_' + x)
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_non_ascii_key(self):
if compat.PY3:
testjson = (
b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},' +
b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]'
).decode('utf8')
else:
testjson = ('[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},'
'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]')
testdata = {
u'sub.A': [1, 3],
u'sub.B': [2, 4],
b"\xc3\x9cnic\xc3\xb8de".decode('utf8'): [0, 1]
}
expected = DataFrame(testdata)
result = json_normalize(json.loads(testjson))
tm.assert_frame_equal(result, expected)
class TestNestedToRecord(tm.TestCase):
def test_flat_stays_flat(self):
recs = [dict(flat1=1, flat2=2),
dict(flat1=3, flat2=4),
]
result = nested_to_record(recs)
expected = recs
self.assertEqual(result, expected)
def test_one_level_deep_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1}
self.assertEqual(result, expected)
def test_nested_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2),
nested=dict(e=dict(c=1, d=2),
d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
self.assertEqual(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb',
'--pdb-failure', '-s'], exit=False)
| mit |
alexmojaki/odo | odo/backends/tests/test_hdfstore.py | 4 | 4599 | from __future__ import absolute_import, division, print_function
import os
from odo.backends.hdfstore import discover
from contextlib import contextmanager
from odo.utils import tmpfile
from odo.chunks import chunks
from odo import into, append, convert, resource, discover, odo
import datashape
import pandas as pd
from datetime import datetime
import numpy as np
try:
f = pd.HDFStore('foo')
except (RuntimeError, ImportError) as e:
import pytest
pytest.skip('skipping test_hdfstore.py %s' % e)
else:
f.close()
os.remove('foo')
df = pd.DataFrame([['a', 1, 10., datetime(2000, 1, 1)],
['ab', 2, 20., datetime(2000, 2, 2)],
['abc', 3, 30., datetime(2000, 3, 3)],
['abcd', 4, 40., datetime(2000, 4, 4)]],
columns=['name', 'a', 'b', 'time'])
@contextmanager
def file(df):
with tmpfile('.hdf5') as fn:
f = pd.HDFStore(fn)
f.put('/data', df, format='table', append=True)
try:
yield fn, f, f.get_storer('/data')
finally:
f.close()
def test_discover():
with file(df) as (fn, f, dset):
assert str(discover(dset)) == str(discover(df))
assert str(discover(f)) == str(discover({'data': df}))
def test_discover():
with tmpfile('hdf5') as fn:
df.to_hdf(fn, '/a/b/data')
df.to_hdf(fn, '/a/b/data2')
df.to_hdf(fn, '/a/data')
hdf = pd.HDFStore(fn)
try:
assert discover(hdf) == discover({'a': {'b': {'data': df, 'data2': df},
'data': df}})
finally:
hdf.close()
def eq(a, b):
if isinstance(a, pd.DataFrame):
a = into(np.ndarray, a)
if isinstance(b, pd.DataFrame):
b = into(np.ndarray, b)
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_chunks():
with file(df) as (fn, f, dset):
c = convert(chunks(pd.DataFrame), dset)
assert eq(convert(np.ndarray, c), df)
def test_resource_no_info():
with tmpfile('.hdf5') as fn:
r = resource('hdfstore://' + fn)
assert isinstance(r, pd.HDFStore)
r.close()
def test_resource_of_dataset():
with tmpfile('.hdf5') as fn:
ds = datashape.dshape('{x: int32, y: 3 * int32}')
r = resource('hdfstore://'+fn+'::/x', dshape=ds)
assert r
r.parent.close()
def test_append():
with file(df) as (fn, f, dset):
append(dset, df)
append(dset, df)
assert discover(dset).shape == (len(df) * 3,)
def test_into_resource():
with tmpfile('.hdf5') as fn:
d = into('hdfstore://' + fn + '::/x', df)
assert discover(d) == discover(df)
assert eq(into(pd.DataFrame, d), df)
d.parent.close()
def test_convert_pandas():
with file(df) as (fn, f, dset):
assert eq(convert(pd.DataFrame, dset), df)
def test_convert_chunks():
with file(df) as (fn, f, dset):
c = convert(chunks(pd.DataFrame), dset, chunksize=len(df) / 2)
assert len(list(c)) == 2
assert eq(convert(pd.DataFrame, c), df)
def test_append_chunks():
with file(df) as (fn, f, dset):
append(dset, chunks(pd.DataFrame)([df, df]))
assert discover(dset).shape[0] == len(df) * 3
def test_append_other():
with tmpfile('.hdf5') as fn:
x = into(np.ndarray, df)
dset = into('hdfstore://'+fn+'::/data', x)
assert discover(dset) == discover(df)
dset.parent.close()
def test_fixed_shape():
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, 'foo')
r = resource('hdfstore://'+fn+'::/foo')
assert isinstance(r.shape, list)
assert discover(r).shape == (len(df),)
r.parent.close()
def test_fixed_convert():
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, 'foo')
r = resource('hdfstore://'+fn+'::/foo')
assert eq(convert(pd.DataFrame, r), df)
r.parent.close()
def test_append_vs_write():
import pandas.util.testing as tm
with tmpfile('.hdf5') as fn:
df.to_hdf(fn, 'foo', append=True)
store = odo(df, 'hdfstore://%s::foo' % fn)
try:
newdf = odo(store, pd.DataFrame)
finally:
store.parent.close()
tm.assert_frame_equal(newdf, pd.concat([df, df]))
with tmpfile('.hdf5') as fn:
store = odo(df, 'hdfstore://%s::foo' % fn, mode='w')
try:
newdf = odo(store, pd.DataFrame)
finally:
store.parent.close()
tm.assert_frame_equal(newdf, df)
| bsd-3-clause |
saketkc/galaxy_tools | inchlib_clust/inchlib_clust.py | 8 | 24156 | #coding: utf-8
from __future__ import print_function
import csv, json, copy, re, argparse, os, urllib2
import numpy, scipy, fastcluster, sklearn
import scipy.cluster.hierarchy as hcluster
from sklearn import preprocessing
from scipy import spatial
LINKAGES = ["single", "complete", "average", "centroid", "ward", "median", "weighted"]
RAW_LINKAGES = ["ward", "centroid"]
DISTANCES = {"numeric": ["braycurtis", "canberra", "chebyshev", "cityblock", "correlation", "cosine", "euclidean", "mahalanobis", "minkowski", "seuclidean", "sqeuclidean"],
"binary": ["dice","hamming","jaccard","kulsinski","matching","rogerstanimoto","russellrao","sokalmichener","sokalsneath","yule"]}
class Dendrogram():
"""Class which handles the generation of cluster heatmap format of clustered data.
As an input it takes a Cluster instance with clustered data."""
def __init__(self, clustering):
self.cluster_object = clustering
self.data_type = clustering.data_type
self.axis = clustering.clustering_axis
self.clustering = clustering.clustering
self.tree = hcluster.to_tree(self.clustering)
self.data = clustering.data
self.data_names = clustering.data_names
self.header = clustering.header
self.dendrogram = False
def __get_cluster_heatmap__(self, write_data):
root, nodes = hcluster.to_tree(self.clustering, rd=True)
node_id2node = {}
dendrogram = {"nodes":{}}
for node in nodes:
node_id = node.id
if node.count == 1:
node_id2node[node_id] = {"count":1, "distance":0}
else:
node_left_child = node.get_left().id
node_right_child = node.get_right().id
node_id2node[node_id] = {"count":node.count, "distance":round(node.dist, 3), "left_child": node_left_child, "right_child": node_right_child}
for n in node_id2node:
node = node_id2node[n]
if node["count"] != 1:
node_id2node[node["left_child"]]["parent"] = n
node_id2node[node["right_child"]]["parent"] = n
for n in node_id2node:
node = node_id2node[n]
if node["count"] == 1:
data = self.data[n]
node["objects"] = [self.data_names[n]]
if node_id2node[node["parent"]]["left_child"] == n:
node_id2node[node["parent"]]["left_child"] = n
else:
node_id2node[node["parent"]]["right_child"] = n
if not write_data:
data = []
node["features"] = data
dendrogram["nodes"][n] = node
for n in node_id2node:
if node_id2node[n]["count"] != 1:
dendrogram["nodes"][n] = node_id2node[n]
return dendrogram
def __get_column_dendrogram__(self):
root, nodes = hcluster.to_tree(self.cluster_object.column_clustering, rd=True)
node_id2node = {}
dendrogram = {"nodes":{}}
for node in nodes:
node_id = node.id
if node.count == 1:
node_id2node[node_id] = {"count":1, "distance":0}
else:
node_left_child = node.get_left().id
node_right_child = node.get_right().id
node_id2node[node_id] = {"count":node.count, "distance":round(node.dist, 3), "left_child": node_left_child, "right_child": node_right_child}
for n in node_id2node:
node = node_id2node[n]
if node["count"] != 1:
node_id2node[node["left_child"]]["parent"] = n
node_id2node[node["right_child"]]["parent"] = n
for n in node_id2node:
if not n in dendrogram["nodes"]:
dendrogram["nodes"][n] = node_id2node[n]
return dendrogram
def create_cluster_heatmap(self, compress=False, compressed_value="median", write_data=True):
"""Creates cluster heatmap representation in inchlib format. By setting compress parameter to True you can
cut the dendrogram in a distance to decrease the row size of the heatmap to specified count.
When compressing the type of the resulted value of merged rows is given by the compressed_value parameter (median, mean).
When the metadata are nominal (text values) the most frequent is the result after compression.
By setting write_data to False the data features won't be present in the resulting format."""
self.dendrogram = {"data": self.__get_cluster_heatmap__(write_data)}
self.compress = compress
self.compressed_value = compressed_value
self.compress_cluster_treshold = 0
if self.compress and self.compress >= 0:
self.compress_cluster_treshold = self.__get_distance_treshold__(compress)
print("Distance treshold for compression:", self.compress_cluster_treshold)
if self.compress_cluster_treshold >= 0:
self.__compress_data__()
else:
self.compress = False
if self.header and write_data:
self.dendrogram["data"]["feature_names"] = [h for h in self.header]
elif self.header and not write_data:
self.dendrogram["data"]["feature_names"] = []
if self.axis == "both" and len(self.cluster_object.column_clustering):
column_dendrogram = hcluster.to_tree(self.cluster_object.column_clustering)
self.dendrogram["column_dendrogram"] = self.__get_column_dendrogram__()
return
def __compress_data__(self):
nodes = {}
to_remove = set()
compressed_value2fnc = {
"median": lambda values: [round(numpy.median(value), 3) for value in values],
"mean": lambda values: [round(numpy.average(value), 3) for value in values],
}
for n in self.dendrogram["data"]["nodes"]:
node = self.dendrogram["data"]["nodes"][n]
if node["count"] == 1:
objects = node["objects"]
data = node["features"]
node_id = n
while self.dendrogram["data"]["nodes"][node["parent"]]["distance"] <= self.compress_cluster_treshold:
to_remove.add(node_id)
node_id = node["parent"]
node = self.dendrogram["data"]["nodes"][node_id]
if node["count"] != 1:
if not "objects" in self.dendrogram["data"]["nodes"][node_id]:
self.dendrogram["data"]["nodes"][node_id]["objects"] = []
self.dendrogram["data"]["nodes"][node_id]["features"] = []
self.dendrogram["data"]["nodes"][node_id]["objects"].extend(objects)
if data:
self.dendrogram["data"]["nodes"][node_id]["features"].append(data)
for node in to_remove:
self.dendrogram["data"]["nodes"].pop(node)
for k in self.dendrogram["data"]["nodes"]:
node = self.dendrogram["data"]["nodes"][k]
if "objects" in node and node["count"] != 1:
self.dendrogram["data"]["nodes"][k]["distance"] = 0
self.dendrogram["data"]["nodes"][k]["count"] = 1
self.dendrogram["data"]["nodes"][k].pop("left_child")
self.dendrogram["data"]["nodes"][k].pop("right_child")
rows = zip(*self.dendrogram["data"]["nodes"][k]["features"])
self.dendrogram["data"]["nodes"][k]["features"] = compressed_value2fnc[self.compressed_value](rows)
self.__adjust_node_counts__()
return
def __adjust_node_counts__(self):
leaves = []
for n in self.dendrogram["data"]["nodes"]:
if self.dendrogram["data"]["nodes"][n]["count"] > 1:
self.dendrogram["data"]["nodes"][n]["count"] = 0
else:
leaves.append(n)
for n in leaves:
node = self.dendrogram["data"]["nodes"][n]
parent_id = node["parent"]
while parent_id:
node = self.dendrogram["data"]["nodes"][parent_id]
self.dendrogram["data"]["nodes"][parent_id]["count"] += 1
parent_id = False
if "parent" in node:
parent_id = node["parent"]
return
def __get_distance_treshold__(self, cluster_count):
print("Calculating distance treshold for cluster compression...")
if cluster_count >= self.tree.count:
return -1
i = 0
count = cluster_count + 1
test_step = self.tree.dist/2
while test_step >= 0.1:
count = len(set([c for c in hcluster.fcluster(self.clustering, i, "distance")]))
if count < cluster_count:
if i == 0:
return 0
i = i - test_step
test_step = test_step/2
elif count == cluster_count:
return i
else:
i += test_step
return i+test_step*2
def export_cluster_heatmap_as_json(self, filename=None):
"""Returns cluster heatmap in a JSON format or exports it to the file specified by the filename parameter."""
dendrogram_json = json.dumps(self.dendrogram, indent=4)
if filename:
output = open(filename, "w")
output.write(dendrogram_json)
return dendrogram_json
def export_cluster_heatmap_as_html(self, htmldir="."):
"""Export simple HTML page with embedded cluster heatmap and dependencies to given directory."""
if not os.path.exists(htmldir):
os.makedirs(htmldir)
dendrogram_json = json.dumps(self.dendrogram, indent=4)
template = """<html>
<head>
<script src="jquery-2.0.3.min.js"></script>
<script src="kinetic-v5.0.0.min.js"></script>
<script src="inchlib-1.0.1.min.js"></script>
<script>
$(document).ready(function() {{
var data = {};
var inchlib = new InCHlib({{
target: "inchlib",
max_height: 1200,
width: 1000,
}});
inchlib.read_data(data);
inchlib.draw();
}});
</script>
</head>
<body>
<div id="inchlib"></div>
</body>
</html>""".format(dendrogram_json)
lib2url = {"inchlib-1.0.1.min.js": "http://openscreen.cz/software/inchlib/static/js/inchlib-1.0.1.min.js",
"jquery-2.0.3.min.js": "http://openscreen.cz/software/inchlib/static/js/jquery-2.0.3.min.js",
"kinetic-v5.0.0.min.js": "http://openscreen.cz/software/inchlib/static/js/kinetic-v5.0.0.min.js"}
for lib, url in lib2url.items():
try:
source = urllib2.urlopen(url)
source_html = source.read()
with open(os.path.join(htmldir, lib), "w") as output:
output.write(source_html)
except urllib2.URLError, e:
raise Exception("\nCan't download file {}.\nPlease check your internet connection and try again.\nIf the error persists there can be something wrong with the InCHlib server.\n".format(url))
with open(os.path.join(htmdlir, "inchlib.html"), "w") as output:
output.write(template)
return
def add_metadata_from_file(self, metadata_file, delimiter, header=True, metadata_compressed_value="median"):
"""Adds metadata from csv file.
Metadata_compressed_value specifies the resulted value when the data are compressed (median/mean/frequency)"""
self.metadata_compressed_value = metadata_compressed_value
self.metadata, self.metadata_header = self.__read_metadata_file__(metadata_file, delimiter, header)
self.__connect_metadata_to_data__()
return
def add_metadata(self, metadata, header=True, metadata_compressed_value="median"):
"""Adds metadata in a form of list of lists (tuples).
Metadata_compressed_value specifies the resulted value when the data are compressed (median/mean/frequency)"""
self.metadata_compressed_value = metadata_compressed_value
self.metadata, self.metadata_header = self.__read_metadata__(metadata, header)
self.__connect_metadata_to_data__()
return
def __connect_metadata_to_data__(self):
if len(set(self.metadata.keys()) & set(self.data_names)) == 0:
raise Exception("Metadata objects must correspond with original data objects.")
if not self.dendrogram:
raise Exception("You must create dendrogram before adding metadata.")
self.dendrogram["metadata"] = {"nodes":{}}
if self.metadata_header:
self.dendrogram["metadata"]["feature_names"] = self.metadata_header
leaves = {n:node for n, node in self.dendrogram["data"]["nodes"].items() if node["count"] == 1}
if not self.compress:
for leaf_id, leaf in leaves.items():
try:
self.dendrogram["metadata"]["nodes"][leaf_id] = self.metadata[leaf["objects"][0]]
except Exception, e:
continue
else:
compressed_value2fnc = {
"median": lambda values: round(numpy.median(col), 3),
"mean": lambda values: round(numpy.average(col), 3)
}
for leaf in leaves:
objects = []
for item in leaves[leaf]["objects"]:
try:
objects.append(self.metadata[item])
except Exception, e:
continue
cols = zip(*objects)
row = []
cols = [list(c) for c in cols]
for col in cols:
if self.metadata_compressed_value in compressed_value2fnc:
try:
col = [float(c) for c in col]
value = compressed_value2fnc[self.metadata_compressed_value](col)
except ValueError:
freq2val = {col.count(v):v for v in set(col)}
value = freq2val[max(freq2val.keys())]
elif self.metadata_compressed_value == "frequency":
freq2val = {col.count(v):v for v in set(col)}
value = freq2val[max(freq2val.keys())]
else:
raise Exception("Unkown type of metadata_compressed_value: {}. Possible values are: median, mean, frequency.".format(self.metadata_compressed_value))
row.append(value)
self.dendrogram["metadata"]["nodes"][leaf] = row
return
def __read_metadata__(self, metadata, header):
metadata_header = []
rows = metadata
metadata = {}
data_start = 0
if header:
metadata_header = rows[0][1:]
data_start = 1
for row in rows[data_start:]:
metadata[str(row[0])] = [r for r in row[1:]]
return metadata, metadata_header
def __read_metadata_file__(self, metadata_file, delimiter, header):
csv_reader = csv.reader(open(metadata_file, "r"), delimiter=delimiter)
metadata_header = []
rows = [row for row in csv_reader]
metadata = {}
data_start = 0
if header:
metadata_header = rows[0][1:]
data_start = 1
for row in rows[data_start:]:
metadata_id = str(row[0])
metadata[metadata_id] = [r for r in row[1:]]
return metadata, metadata_header
class Cluster():
"""Class for data clustering"""
def __init__(self):
self.write_original = False
def read_csv(self, filename, delimiter=",", header=False):
"""Reads data from the CSV file"""
self.filename = filename
csv_reader = csv.reader(open(self.filename, "r"), delimiter=delimiter)
rows = [row for row in csv_reader]
self.read_data(rows, header)
def read_data(self, rows, header=False):
"""Reads data in a form of list of lists (tuples)"""
self.header = header
data_start = 0
if self.header:
self.header = rows[0][1:]
data_start = 1
self.data_names = [str(row[0]) for row in rows[data_start:]]
self.data = [[round(float(value), 3) for value in row[1:]] for row in rows[data_start:]]
self.original_data = copy.deepcopy(self.data)
return
def normalize_data(self, feature_range=(0,1), write_original=False):
"""Normalizes data to a scale from 0 to 1. When write_original is set to True,
the normalized data will be clustered, but original data will be written to the heatmap."""
self.write_original = write_original
min_max_scaler = preprocessing.MinMaxScaler(feature_range)
self.data = min_max_scaler.fit_transform(self.data)
self.data = [[round(v, 3) for v in row] for row in self.data]
return
def cluster_data(self, data_type="numeric", row_distance="euclidean", row_linkage="single", axis="row", column_distance="euclidean", column_linkage="ward"):
"""Performs clustering according to the given parameters.
@data_type - numeric/binary
@row_distance/column_distance - see. DISTANCES variable
@row_linkage/column_linkage - see. LINKAGES variable
@axis - row/both
"""
print("Clustering rows:", row_distance, row_linkage)
self.data_type = data_type
self.clustering_axis = axis
row_linkage = str(row_linkage)
if row_linkage in RAW_LINKAGES:
self.clustering = fastcluster.linkage(self.data, method=row_linkage, metric=row_distance)
else:
self.distance_vector = fastcluster.pdist(self.data, row_distance)
if data_type in DISTANCES and not row_distance in DISTANCES[data_type]:
raise Exception("".join(["When clustering" , data_type, "data you must choose from these distance measures: ", ", ".join(DISTANCES[data_type])]))
elif not data_type in DISTANCES.keys():
raise Exception("".join(["You can choose only from data types: ", ", ".join(DISTANCES.keys())]))
self.clustering = fastcluster.linkage(self.distance_vector, method=str(row_linkage))
self.column_clustering = []
if axis == "both" and len(self.data[0]) > 2:
print("Clustering columns:", column_distance, column_linkage)
self.__cluster_columns__(column_distance, column_linkage)
if self.write_original:
self.data = self.original_data
return
def __cluster_columns__(self, column_distance, column_linkage):
columns = zip(*self.data)
self.column_clustering = fastcluster.linkage(columns, method=column_linkage, metric=column_distance)
self.data_order = hcluster.leaves_list(self.column_clustering)
self.data = self.__reorder_data__(self.data, self.data_order)
self.original_data = self.__reorder_data__(self.original_data, self.data_order)
if self.header:
self.header = self.__reorder_data__([self.header], self.data_order)[0]
return
def __reorder_data__(self, data, order):
for i in xrange(len(data)):
reordered_data = []
for j in order:
reordered_data.append(data[i][j])
reordered_data.reverse()
data[i] = reordered_data
return data
def _process_(arguments):
c = Cluster()
c.read_csv(arguments.data_file, arguments.data_delimiter, arguments.data_header)
if arguments.normalize:
c.normalize_data(feature_range=(0,1), write_original=arguments.write_original)
c.cluster_data(data_type=arguments.datatype, row_distance=arguments.row_distance, row_linkage=arguments.row_linkage, axis=arguments.axis, column_distance=arguments.column_distance, column_linkage=arguments.column_linkage)
d = Dendrogram(c)
d.create_cluster_heatmap(compress=arguments.compress, compressed_value=arguments.compressed_value, write_data=not arguments.dont_write_data)
if arguments.metadata:
d.add_metadata_from_file(metadata_file=arguments.metadata, delimiter=arguments.metadata_delimiter, header=arguments.metadata_header, metadata_compressed_value=arguments.metadata_compressed_value)
if arguments.output_file or arguments.html_dir:
if arguments.output_file:
d.export_cluster_heatmap_as_json(arguments.output_file)
else:
d.export_cluster_heatmap_as_html(arguments.html_dir)
else:
print(json.dumps(d.dendrogram, indent=4))
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("data_file", type=str, help="csv(text) data file with delimited values")
parser.add_argument("-o", "--output_file", type=str, help="the name of output file")
parser.add_argument("-html", "--html_dir", type=str, help="the directory to store HTML page with dependencies")
parser.add_argument("-rd", "--row_distance", type=str, default="euclidean", help="set the distance to use for clustering rows")
parser.add_argument("-rl", "--row_linkage", type=str, default="ward", help="set the linkage to use for clustering rows")
parser.add_argument("-cd", "--column_distance", type=str, default="euclidean", help="set the distance to use for clustering columns (only when clustering by both axis -a parameter)")
parser.add_argument("-cl", "--column_linkage", type=str, default="ward", help="set the linkage to use for clustering columns (only when clustering by both axis -a parameter)")
parser.add_argument("-a", "--axis", type=str, default="row", help="define clustering axis (row/both)")
parser.add_argument("-dt", "--datatype", type=str, default="numeric", help="specify the type of the data (numeric/binary)")
parser.add_argument("-dd", "--data_delimiter", type=str, default=",", help="delimiter of values in data file")
parser.add_argument("-m", "--metadata", type=str, default=None, help="csv(text) metadata file with delimited values")
parser.add_argument("-md", "--metadata_delimiter", type=str, default=",", help="delimiter of values in metadata file")
parser.add_argument("-dh", "--data_header", default=False, help="whether the first row of data file is a header", action="store_true")
parser.add_argument("-mh", "--metadata_header", default=False, help="whether the first row of metadata file is a header", action="store_true")
parser.add_argument("-c", "--compress", type=int, default=0, help="compress the data to contain maximum of specified count of rows")
parser.add_argument("-cv", "--compressed_value", type=str, default="median", help="the resulted value from merged rows when the data are compressed (median/mean/frequency)")
parser.add_argument("-mcv", "--metadata_compressed_value", type=str, default="median", help="the resulted value from merged rows of metadata when the data are compressed (median/mean/count)")
parser.add_argument("-dwd", "--dont_write_data", default=False, help="don't write clustered data to the inchlib data format", action="store_true")
parser.add_argument("-n", "--normalize", default=False, help="normalize data to [0, 1] range", action="store_true")
parser.add_argument("-wo", "--write_original", default=False, help="cluster normalized data but write the original ones to the heatmap", action="store_true")
args = parser.parse_args()
_process_(args)
| mit |