repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
marqh/iris | lib/iris/tests/unit/plot/test_contourf.py | 11 | 3169 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.contourf` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests import mock
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.contourf(self.cube, coords=('bar', 'str_coord'))
self.assertPointsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.contourf(self.cube, coords=('str_coord', 'bar'))
self.assertPointsTickLabels('xaxis')
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
iplt.contourf(self.cube, axes=ax, coords=('bar', 'str_coord'))
plt.close(fig)
self.assertPointsTickLabels('yaxis', ax)
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
iplt.contourf(self.cube, axes=ax, coords=('str_coord', 'bar'))
plt.close(fig)
self.assertPointsTickLabels('xaxis', ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.contourf,
self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord('foo').points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord('bar').points
self.bar_index = np.arange(self.bar.size)
self.data = self.cube.data
self.dataT = self.data.T
mocker = mock.Mock(alpha=0, antialiased=False)
self.mpl_patch = self.patch('matplotlib.pyplot.contourf',
return_value=mocker)
self.draw_func = iplt.contourf
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Interface_Mesh_Types/Interface_5/HardContact_ElPPlShear/Interface_Test_Shear_Plot.py | 23 | 3513 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Shear.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(shear_strain*5,shear_stress/normal_stress,'-r',label='Analytical Solution', Linewidth=4)
plt.xlabel(r"Shear Displacement $\Delta_t [mm]$")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Interface_Surface_Interface.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(shear_strain*5,shear_stress/normal_stress,'-k',label='Numerical Solution', Linewidth=4)
plt.xlabel(r"Shear Displacement $\Delta_t [mm]$")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
########################################################
# # axes = plt.gca()
# # axes.set_xlim([-7,7])
# # axes.set_ylim([-1,1])
outfigname = "Interface_Test_Shear_Stress.pdf";
legend = plt.legend()
legend.get_frame().set_linewidth(0.0)
legend.get_frame().set_facecolor('none')
plt.savefig(outfigname, bbox_inches='tight')
# plt.show() | cc0-1.0 |
CGATOxford/CGATPipelines | obsolete/reports/pipeline_capseq/trackers/macs.py | 1 | 5174 | import os
import sys
import re
import types
import itertools
import matplotlib.pyplot as plt
import numpy
import scipy.stats
import numpy.ma
import Stats
import Histogram
from CGATReport.Tracker import *
from cpgReport import *
##########################################################################
class MacsSummary(cpgTracker):
pattern = "(macs_summary)"
def getTracks(self, subset=None):
return self.getValues("SELECT track FROM macs_summary ORDER BY track")
def __call__(self, track, slice=None):
resultsdir = os.path.join(EXPORTDIR, "MACS")
fields = ("ncandidates_positive", "ncandidates_negative",
"called_positive", "called_negative", "min_tags",
"paired_peaks", "shift")
f = ",".join(fields)
data = self.getFirstRow(
'''SELECT %(f)s FROM macs_summary WHERE track="%(track)s"''' % locals())
result = odict(list(zip(fields, data)))
if os.path.exists(resultsdir):
print(resultsdir)
result[
"link"] = "`pdf <%(resultsdir)s/%(track)s.macs_model.pdf>`_" % locals()
return result
##########################################################################
class MacsSoloSummary(cpgTracker):
pattern = "(macs_solo_summary)"
def getTracks(self, subset=None):
return self.getValues("SELECT track FROM macs_solo_summary ORDER BY track")
def __call__(self, track, slice=None):
resultsdir = os.path.join(EXPORTDIR, "MACS")
fields = ("ncandidates_positive",
"called_positive", "min_tags",
"paired_peaks", "shift")
f = ",".join(fields)
data = self.getFirstRow(
'''SELECT %(f)s FROM macs_solo_summary WHERE track="%(track)s"''' % locals())
result = odict(list(zip(fields, data)))
if os.path.exists(resultsdir):
print(resultsdir)
result[
"link"] = "`pdf <%(resultsdir)s/%(track)s.macs_model.pdf>`_" % locals()
return result
##########################################################################
class MacsIntervalsSummary(cpgTracker):
"""Summary stats of intervals called by the peak finder. """
mPattern = "_macs_intervals$"
def __call__(self, track, slice=None):
data = self.getFirstRow(
"SELECT COUNT(*), round(AVG(length),0), round(AVG(nprobes),0) FROM %(track)s_macs_intervals" % locals())
return odict(list(zip(("intervals_count", "mean_interval_length", "mean_reads_per_interval"), data)))
##########################################################################
class FoldChangeThreshold(cpgTracker):
"""Count of intervals exceeding fold change threshold for each dataset. """
mPattern = "_foldchange$"
#
def __call__(self, track, slice=None):
data = self.get(
"SELECT threshold, intervals FROM %(track)s_foldchange" % locals())
return odict(list(zip(("Threshold", "Intervals"), list(zip(*data)))))
##########################################################################
class BackgroundSummary(cpgTracker):
"""Summary stats of reads mapping inside/outside binding intervals. """
mPattern = "_background$"
def __call__(self, track, slice=None):
data = self.getFirstRow( '''select in_peaks, out_peaks, (in_peaks+out_peaks) as total,
round(((in_peaks+0.00)/(out_peaks+in_peaks+0.00))*100,1) as ratio,
round(((out_peaks+0.00)/(out_peaks+in_peaks+0.00))*100,1) as ratio2
from %(track)s_background;''' % locals() )
return odict(list(zip(("Reads Overlapping intervals", "Reads Outwith Intervals", "Total Reads", "Percent in Intervals", "Percent Background"), data)))
##########################################################################
class IntervalsSummaryFiltered(cpgTracker):
"""Summary stats of intervals after filtering by fold change and merging nearby intervals. """
mPattern = "_macs_merged_intervals$"
def __call__(self, track, slice=None):
data = self.getRow(
"SELECT COUNT(*) as Intervals, round(AVG(length),0) as Mean_length, round(AVG(nprobes),0) as Mean_reads FROM %(track)s_macs_merged_intervals" % locals())
return data
##########################################################################
class MacsDiagnostics(cpgTracker):
"""Closest distance of transcript models to gene models in the reference set."""
pattern = "(.*)_macsdiag"
def __call__(self, track, slice=None):
data = self.get(
"SELECT fc,npeaks,p20,p30,p40,p50,p60,p70,p80,p90 FROM %(track)s_macsdiag" % locals())
result = odict()
for fc, npeaks, p20, p30, p40, p50, p60, p70, p80, p90 in data:
result[fc] = odict()
result[fc]["npeaks"] = npeaks
result[fc]["proportion of reads"] = list(range(20, 100, 10))
result[fc]["proportion of peaks"] = list(map(
float, (p20, p30, p40, p50, p60, p70, p80, p90)))
return result
| mit |
idwaker/sklearn_pydata2015 | notebooks/fig_code/helpers.py | 74 | 2301 | """
Small helpers for code that is not shown in the notebooks
"""
from sklearn import neighbors, datasets, linear_model
import pylab as pl
import numpy as np
from matplotlib.colors import ListedColormap
# Create color maps for 3-class classification problem, as with iris
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
def plot_iris_knn():
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
knn = neighbors.KNeighborsClassifier(n_neighbors=3)
knn.fit(X, y)
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.xlabel('sepal length (cm)')
pl.ylabel('sepal width (cm)')
pl.axis('tight')
def plot_polynomial_regression():
rng = np.random.RandomState(0)
x = 2*rng.rand(100) - 1
f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9
y = f(x) + .4 * rng.normal(size=100)
x_test = np.linspace(-1, 1, 100)
pl.figure()
pl.scatter(x, y, s=4)
X = np.array([x**i for i in range(5)]).T
X_test = np.array([x_test**i for i in range(5)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='4th order')
X = np.array([x**i for i in range(10)]).T
X_test = np.array([x_test**i for i in range(10)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='9th order')
pl.legend(loc='best')
pl.axis('tight')
pl.title('Fitting a 4th and a 9th order polynomial')
pl.figure()
pl.scatter(x, y, s=4)
pl.plot(x_test, f(x_test), label="truth")
pl.axis('tight')
pl.title('Ground truth (9th order polynomial)')
| bsd-3-clause |
jzt5132/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
rupak0577/ginga | ginga/misc/plugins/MultiDim.py | 1 | 21828 | #
# MultiDim.py -- Multidimensional plugin for Ginga reference viewer
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import time
import re
from distutils import spawn
from contextlib import contextmanager
from ginga import AstroImage
from ginga.gw import Widgets
from ginga.misc import Future, Bunch
from ginga import GingaPlugin
from ginga.util.videosink import VideoSink
from ginga.util import iohelper
import numpy as np
import matplotlib.pyplot as plt
import copy
have_mencoder = False
if spawn.find_executable("mencoder"):
have_mencoder = True
have_pyfits = False
try:
from astropy.io import fits as pyfits
have_pyfits = True
except ImportError:
try:
import pyfits
have_pyfits = True
except ImportError:
pass
class MultiDim(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(MultiDim, self).__init__(fv, fitsimage)
self.hdu_info = []
self.hdu_db = {}
self.curhdu = 0
self.naxispath = []
self.name_pfx = 'NONAME'
self.image = None
self.orientation = 'vertical'
# For animation feature
self.play_axis = 2
self.play_idx = 1
self.play_max = 1
self.play_int_sec = 0.1
self.play_min_sec = 0.1
self.timer = fv.get_timer()
self.timer.set_callback('expired', self.play_next)
# Load plugin preferences
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_MultiDim')
self.settings.setDefaults(auto_start_naxis=False)
self.settings.load(onError='silent')
# register for new image notification in this channel
fitsimage.set_callback('image-set', self.new_image_cb)
self.gui_up = False
def build_gui(self, container):
assert have_pyfits == True, \
Exception("Please install astropy/pyfits to use this plugin")
top = Widgets.VBox()
top.set_border_width(4)
vbox, sw, orientation = Widgets.get_oriented_box(container,
scrolled=True)
self.orientation = orientation
vbox.set_border_width(4)
vbox.set_spacing(2)
self.msgFont = self.fv.getFont("sansFont", 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(self.msgFont)
self.tw = tw
fr = Widgets.Expander("Instructions")
fr.set_widget(tw)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame("HDU")
vb1 = Widgets.VBox()
captions = [("Num HDUs:", 'label', "Num HDUs", 'llabel'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.numhdu = b.num_hdus
self.w.update(b)
vb1.add_widget(w)
captions = [("Choose HDU", 'combobox'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
vb1.add_widget(w)
self.w.hdu = b.choose_hdu
self.w.hdu.set_tooltip("Choose which HDU to view")
self.w.hdu.add_callback('activated', self.set_hdu_cb)
fr.set_widget(vb1)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame("NAXIS (data cubes)")
self.naxisfr = fr
vbox.add_widget(fr, stretch=0)
captions = [("First", 'button', "Prev", 'button', "Stop", 'button'),
("Last", 'button', "Next", 'button', "Play", 'button'),
("Interval:", 'label', "Interval", 'spinfloat'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.next.add_callback('activated', lambda w: self.next_slice())
b.prev.add_callback('activated', lambda w: self.prev_slice())
b.first.add_callback('activated', lambda w: self.first_slice())
b.last.add_callback('activated', lambda w: self.last_slice())
b.play.add_callback('activated', lambda w: self.play_start())
b.stop.add_callback('activated', lambda w: self.play_stop())
lower, upper = 0.1, 8.0
b.interval.set_limits(lower, upper, incr_value=0.1)
b.interval.set_value(lower)
b.interval.set_decimals(2)
b.interval.add_callback('value-changed', self.play_int_cb)
b.next.set_enabled(False)
b.prev.set_enabled(False)
b.first.set_enabled(False)
b.last.set_enabled(False)
b.play.set_enabled(False)
b.stop.set_enabled(False)
b.interval.set_enabled(False)
vbox.add_widget(w, stretch=0)
captions = [("Slice:", 'label', "Slice", 'llabel',),
#"Value:", 'label', "Value", 'llabel'),
("Save Slice", 'button'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.save_slice.add_callback('activated', lambda w: self.save_slice_cb())
b.save_slice.set_enabled(False)
b.save_slice.set_tooltip("Save current slice as RGB image")
vbox.add_widget(w, stretch=0)
fr = Widgets.Frame("Movie")
if have_mencoder:
captions = [("Start:", 'label', "Start Slice", 'entry',
"End:", 'label', "End Slice", 'entry', 'Save Movie', 'button')]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.start_slice.set_tooltip("Starting slice")
b.end_slice.set_tooltip("Ending slice")
b.start_slice.set_length(6)
b.end_slice.set_length(6)
b.save_movie.add_callback('activated', lambda w: self.save_movie_cb())
b.save_movie.set_enabled(False)
fr.set_widget(w)
else:
infolbl = Widgets.Label()
infolbl.set_text("Please install 'mencoder' to save as movie")
fr.set_widget(infolbl)
vbox.add_widget(fr, stretch=0)
#spacer = Widgets.Label('')
#vbox.add_widget(spacer, stretch=1)
top.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def set_hdu_cb(self, w, val):
#idx = int(val)
idx = w.get_index()
idx = max(0, idx)
try:
self.set_hdu(idx)
except Exception as e:
self.logger.error("Error loading HDU #%d: %s" % (
idx+1, str(e)))
def set_naxis_cb(self, w, idx, n):
#idx = int(w.get_value()) - 1
self.set_naxis(idx, n)
def build_naxis(self, dims):
# build a vbox of NAXIS controls
captions = [("NAXIS1:", 'label', 'NAXIS1', 'llabel'),
("NAXIS2:", 'label', 'NAXIS2', 'llabel')]
self.naxispath = []
for n in range(2, len(dims)):
self.naxispath.append(0)
key = 'naxis%d' % (n+1)
title = key.upper()
maxn = int(dims[n])
self.logger.debug("NAXIS%d=%d" % (n+1, maxn))
if maxn <= 1:
captions.append((title+':', 'label', title, 'llabel'))
else:
captions.append((title+':', 'label', title, 'llabel',
#"Choose %s" % (title), 'spinbutton'))
"Choose %s" % (title), 'hscale'))
if len(dims) > 3: # only add radiobuttons if we have more than 3 dimensions
radiobuttons = []
for i in range(2, len(dims)):
title = 'AXIS%d' % (i+1)
radiobuttons.extend((title,'radiobutton'))
captions.append(radiobuttons)
# Remove old naxis widgets
for key in self.w:
if key.startswith('choose_'):
self.w[key] = None
w, b = Widgets.build_info(captions, orientation=self.orientation)
self.w.update(b)
for n in range(0, len(dims)):
key = 'naxis%d' % (n+1)
lbl = b[key]
maxn = int(dims[n])
lbl.set_text("%d" % maxn)
slkey = 'choose_' + key
if slkey in b:
slider = b[slkey]
lower = 1
upper = maxn
slider.set_limits(lower, upper, incr_value=1)
slider.set_value(lower)
slider.set_tracking(True)
#slider.set_digits(0)
#slider.set_wrap(True)
slider.add_callback('value-changed', self.set_naxis_cb, n)
# Add vbox of naxis controls to gui
self.naxisfr.set_widget(w)
# for storing play_idx for each dim of image. used for going back to
# the idx where you left off.
self.play_indices = [0 for i in range(len(dims) - 2)] if len(dims) > 3 else None
if len(dims) > 3:
# dims only exists in here, hence this function exists here
def play_axis_change_func_creator(n):
# widget callable needs (widget, value) args
def play_axis_change():
self.play_indices[self.play_axis - 2] = (self.play_idx - 1) % dims[self.play_axis]
self.play_axis = n
self.logger.debug("play_axis changed to %d" % n)
if self.play_axis < len(dims):
self.play_max = dims[self.play_axis]
self.play_idx = self.play_indices[n - 2]
def check_if_we_need_change(w,v):
if self.play_axis is not n:
play_axis_change()
return check_if_we_need_change
for n in range(2, len(dims)):
key = 'axis%d' % (n + 1)
self.w[key].add_callback('activated', play_axis_change_func_creator(n))
if n == 2:
self.w[key].set_state(True)
self.play_axis = 2
if self.play_axis < len(dims):
self.play_max = dims[self.play_axis]
self.play_idx = 1
# Enable or disable NAXIS animation controls
is_dc = len(dims) > 2
self.w.next.set_enabled(is_dc)
self.w.prev.set_enabled(is_dc)
self.w.first.set_enabled(is_dc)
self.w.last.set_enabled(is_dc)
self.w.play.set_enabled(is_dc)
self.w.stop.set_enabled(is_dc)
self.w.interval.set_enabled(is_dc)
self.w.save_slice.set_enabled(is_dc)
if have_mencoder:
self.w.save_movie.set_enabled(is_dc)
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def new_image_cb(self, fitsimage, image):
"""We are called when a new image is set in the channel.
If our GUI is not up, and auto_start_naxis preference is True,
and NAXIS >= 3 then start us up.
"""
if fitsimage != self.fitsimage:
# Focus is not our channel-->not an event for us
return False
image = fitsimage.get_image()
if image is None:
return False
auto_start = self.settings.get('auto_start_naxis', False)
if not hasattr(image, 'naxispath'):
return False
# Start ourselves if file is multidimensional
if len(image.naxispath) <= 0:
return False
# check preference for auto_start_naxis
if not auto_start:
return False
# Start ourselves if GUI is not up yet
if not self.gui_up:
self.fv.start_local_plugin(self.chname, str(self), None)
return True
def instructions(self):
self.tw.set_text("""Use mouse wheel to choose HDU or axis of data cube (NAXIS controls).""")
def start(self):
self.instructions()
self.resume()
def pause(self):
self.play_stop()
pass
def resume(self):
self.redo()
def stop(self):
self.gui_up = False
self.play_stop()
try:
self.fits_f.close()
except:
pass
self.image = None
self.fv.showStatus("")
def get_name(self, sfx):
return '%s[%s]' % (self.name_pfx, sfx)
def set_hdu(self, idx):
self.logger.debug("Loading fits hdu #%d" % (idx))
# determine canonical index of this HDU
info = self.hdu_info[idx]
aidx = (info.name, info.extver)
sfx = '%s,%d' % aidx
# See if this HDU is still in the channel's datasrc
imname = self.get_name(sfx)
chname = self.chname
chinfo = self.chinfo
if imname in chinfo.datasrc:
self.curhdu = idx
self.image = chinfo.datasrc[imname]
self.fv.switch_name(chname, imname)
# Still need to build datacube profile
hdu = self.fits_f[idx]
dims = list(hdu.data.shape)
dims.reverse()
self.build_naxis(dims)
return
# Nope, we'll have to load it
self.logger.debug("HDU %d not in memory; refreshing from file" % (idx))
# inherit from primary header?
inherit_prihdr = self.fv.settings.get('inherit_primary_header', False)
image = AstroImage.AstroImage(logger=self.logger,
inherit_primary_header=inherit_prihdr)
self.image = image
try:
self.curhdu = idx
dims = [0, 0]
hdu = self.fits_f[idx]
if hdu.data is None:
# <- empty data part to this HDU
self.logger.warning("Empty data part in HDU #%d" % (idx))
elif info['htype'].lower() not in ('imagehdu', 'primaryhdu'):
self.logger.warning("HDU #%d is not an image" % (idx))
else:
dims = list(hdu.data.shape)
dims.reverse()
image.load_hdu(hdu, fobj=self.fits_f)
# create a future for reconstituting this HDU
future = Future.Future()
future.freeze(self.fv.load_image, self.path, idx=aidx)
image.set(path=self.path, idx=aidx, name=imname, image_future=future)
## self.fitsimage.set_image(image,
## raise_initialize_errors=False)
self.fv.add_image(imname, image, chname=chname)
self.build_naxis(dims)
self.logger.debug("HDU #%d loaded." % (idx))
except Exception as e:
errmsg = "Error loading FITS HDU #%d: %s" % (
idx, str(e))
self.logger.error(errmsg)
self.fv.show_error(errmsg, raisetab=False)
def set_naxis(self, idx, n):
self.play_idx = idx
self.w['choose_naxis%d' % (n+1)].set_value(idx)
idx = idx - 1
self.logger.debug("naxis %d index is %d" % (n+1, idx+1))
image = self.fitsimage.get_image()
try:
if image is None:
raise ValueError("Please load an image cube")
m = n - 2
self.naxispath[m] = idx
self.logger.debug("m=%d naxispath=%s" % (m, str(self.naxispath)))
image.set_naxispath(self.naxispath)
self.logger.debug("NAXIS%d slice %d loaded." % (n+1, idx+1))
if self.play_indices:
text = self.play_indices
text[m]= idx
else:
text = idx
self.w.slice.set_text(str(text))
except Exception as e:
errmsg = "Error loading NAXIS%d slice %d: %s" % (
n+1, idx+1, str(e))
self.logger.error(errmsg)
self.fv.error(errmsg)
def play_start(self):
self._isplaying = True
self.play_next(self.timer)
def play_next(self, timer):
if self._isplaying:
time_start = time.time()
deadline = time_start + self.play_int_sec
self.next_slice()
#self.fv.update_pending(0.001)
delta = max(deadline - time.time(), 0.001)
self.timer.set(delta)
def play_stop(self):
self._isplaying = False
def first_slice(self):
play_idx = 1
self.fv.gui_do(self.set_naxis_cb, None, play_idx, self.play_axis)
def last_slice(self):
play_idx = self.play_max
self.fv.gui_do(self.set_naxis_cb, None, play_idx, self.play_axis)
def prev_slice(self):
play_idx = self.play_idx - 1
if play_idx < 1:
play_idx = self.play_max
self.fv.gui_do(self.set_naxis_cb, None, play_idx, self.play_axis)
def next_slice(self):
play_idx = self.play_idx + 1
if play_idx > self.play_max:
play_idx = 1
self.fv.gui_do(self.set_naxis_cb, None, play_idx, self.play_axis)
def play_int_cb(self, w, val):
# force at least play_min_sec, otherwise playback is untenable
self.play_int_sec = max(self.play_min_sec, val)
def prep_hdu_menu(self, w, info):
# clear old TOC
w.clear()
self.hdu_info = []
self.hdu_db = {}
idx = 0
extver_db = {}
for tup in info:
name = tup[1]
# figure out the EXTVER for this HDU
extver = extver_db.setdefault(name, 0)
extver += 1
extver_db[name] = extver
# prepare a record of pertinent info about the HDU for
# lookups by numerical index or (NAME, EXTVER)
d = Bunch.Bunch(index=idx, name=name, extver=extver,
htype=tup[2], dtype=tup[5])
self.hdu_info.append(d)
# different ways of accessing this HDU:
# by numerical index
self.hdu_db[idx] = d
# by (hduname, extver)
self.hdu_db[(name, extver)] = d
toc_ent = "%(index)4d %(name)-12.12s (%(extver)3d) %(htype)-12.12s %(dtype)-8.8s" % d
w.append_text(toc_ent)
idx += 1
idx = w.get_index()
if idx < 0:
idx = 0
if idx >= len(self.hdu_info):
idx = len(self.hdu_info) - 1
#w.set_index(idx)
def redo(self):
"""Called when an image is set in the channel."""
image = self.fitsimage.get_image()
if (image is None) or (image == self.image):
return True
path = image.get('path', None)
if path is None:
self.fv.show_error("Cannot open image: no value for metadata key 'path'")
return
self.path = path
name = image.get('name', self.fv.name_image_from_path(path))
idx = image.get('idx', None)
# remove index designation from root of name, if any
match = re.match(r'^(.+)\[(.+)\]$', name)
if match:
name = match.group(1)
self.name_pfx = name
self.fits_f = pyfits.open(path, 'readonly')
lower = 0
upper = len(self.fits_f) - 1
info = self.fits_f.info(output=False)
self.prep_hdu_menu(self.w.hdu, info)
self.num_hdu = upper
self.logger.debug("there are %d hdus" % (upper+1))
self.w.numhdu.set_text("%d" % (upper+1))
if idx is not None:
# set the HDU in the drop down if known
info = self.hdu_db.get(idx, None)
if info is not None:
index = info.index
self.w.hdu.set_index(index)
self.set_hdu(index)
self.w.hdu.set_enabled(len(self.fits_f) > 0)
def save_slice_cb(self):
target = Widgets.SaveDialog(title='Save slice', selectedfilter='*.png').get_path()
with open(target, 'w') as target_file:
hival = self.fitsimage.get_cut_levels()[1]
image = self.fitsimage.get_image()
curr_slice_data = image.get_data()
plt.imsave(target_file, curr_slice_data, vmax=hival, cmap=plt.get_cmap('gray'), origin='lower')
self.fv.showStatus("Successfully saved slice")
def save_movie_cb(self):
start = int(self.w.start_slice.get_text())
end = int(self.w.end_slice.get_text())
if not start or not end:
return
elif start < 0 or end > self.play_max:
self.fv.showStatus("Wrong slice index")
return
elif start > end:
self.fv.showStatus("Wrong slice order")
return
if start == 1:
start = 0
target = Widgets.SaveDialog(title='Save Movie', selectedfilter='*.avi').get_path()
if target:
self.save_movie(start, end, target)
def save_movie(self, start, end, target_file):
image = self.fitsimage.get_image()
loval, hival = self.fitsimage.get_cut_levels()
data = np.array(image.get_mddata()).clip(loval, hival)
# http://stackoverflow.com/questions/7042190/plotting-directly-to-movie-with-numpy-and-mencoder
data_rescaled = ((data - loval) * 255 / (hival - loval)).astype(np.uint8, copy=False)
W, H = image.get_data_size()
with self.video_writer(VideoSink((H, W), target_file)) as video:
for i in range(start, end):
video.write(np.flipud(data_rescaled[i]))
self.fv.showStatus("Successfully saved movie")
@contextmanager
def video_writer(self, v):
v.open()
try:
yield v
finally:
v.close()
return
def __str__(self):
return 'multidim'
#END
| bsd-3-clause |
liyi193328/seq2seq | seq2seq/contrib/learn/dataframe/dataframe.py | 27 | 4836 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
from tensorflow.python.util.deprecation import deprecated
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
@deprecated("2017-06-15", "contrib/learn/dataframe/** is deprecated.")
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
| apache-2.0 |
glouppe/scikit-learn | examples/mixture/plot_gmm_classifier.py | 22 | 4015 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.model_selection import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
print(color)
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
phobson/statsmodels | statsmodels/tsa/statespace/tests/test_kalman.py | 2 | 23468 | """
Tests for _statespace module
Author: Chad Fulton
License: Simplified-BSD
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
Hamilton, James D. 1994.
Time Series Analysis.
Princeton, N.J.: Princeton University Press.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
try:
from scipy.linalg.blas import find_best_blas_type
except ImportError:
# Shim for SciPy 0.11, derived from tag=0.11 scipy.linalg.blas
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'}
def find_best_blas_type(arrays):
dtype, index = max(
[(ar.dtype, i) for i, ar in enumerate(arrays)])
prefix = _type_conv.get(dtype.char, 'd')
return (prefix, dtype, None)
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.statespace import _statespace as ss
from .results import results_kalman_filter
from numpy.testing import assert_almost_equal, assert_allclose
from nose.exc import SkipTest
prefix_statespace_map = {
's': ss.sStatespace, 'd': ss.dStatespace,
'c': ss.cStatespace, 'z': ss.zStatespace
}
prefix_kalman_filter_map = {
's': ss.sKalmanFilter, 'd': ss.dKalmanFilter,
'c': ss.cKalmanFilter, 'z': ss.zKalmanFilter
}
current_path = os.path.dirname(os.path.abspath(__file__))
class Clark1987(object):
"""
Clark's (1987) univariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
@classmethod
def setup_class(cls, dtype=float, conserve_memory=0, loglikelihood_burn=0):
cls.true = results_kalman_filter.uc_uni
cls.true_states = pd.DataFrame(cls.true['states'])
# GDP, Quarterly, 1947.1 - 1995.3
data = pd.DataFrame(
cls.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP']
)
data['lgdp'] = np.log(data['GDP'])
# Parameters
cls.conserve_memory = conserve_memory
cls.loglikelihood_burn = loglikelihood_burn
# Observed data
cls.obs = np.array(data['lgdp'], ndmin=2, dtype=dtype, order="F")
# Measurement equation
cls.k_endog = k_endog = 1 # dimension of observed data
# design matrix
cls.design = np.zeros((k_endog, 4, 1), dtype=dtype, order="F")
cls.design[:, :, 0] = [1, 1, 0, 0]
# observation intercept
cls.obs_intercept = np.zeros((k_endog, 1), dtype=dtype, order="F")
# observation covariance matrix
cls.obs_cov = np.zeros((k_endog, k_endog, 1), dtype=dtype, order="F")
# Transition equation
cls.k_states = k_states = 4 # dimension of state space
# transition matrix
cls.transition = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
cls.transition[([0, 0, 1, 1, 2, 3],
[0, 3, 1, 2, 1, 3],
[0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
# state intercept
cls.state_intercept = np.zeros((k_states, 1), dtype=dtype, order="F")
# selection matrix
cls.selection = np.asfortranarray(np.eye(k_states)[:, :, None],
dtype=dtype)
# state covariance matrix
cls.state_cov = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
# Initialization: Diffuse priors
cls.initial_state = np.zeros((k_states,), dtype=dtype, order="F")
cls.initial_state_cov = np.asfortranarray(np.eye(k_states)*100,
dtype=dtype)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
cls.true['parameters'], dtype=dtype
)
cls.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
cls.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, sigma_w**2
]
# Initialization: modification
# Due to the difference in the way Kim and Nelson (1999) and Durbin
# and Koopman (2012) define the order of the Kalman filter routines,
# we need to modify the initial state covariance matrix to match
# Kim and Nelson's results, since the *Statespace models follow Durbin
# and Koopman.
cls.initial_state_cov = np.asfortranarray(
np.dot(
np.dot(cls.transition[:, :, 0], cls.initial_state_cov),
cls.transition[:, :, 0].T
)
)
@classmethod
def init_filter(cls):
# Use the appropriate Statespace model
prefix = find_best_blas_type((cls.obs,))
klass = prefix_statespace_map[prefix[0]]
# Instantiate the statespace model
model = klass(
cls.obs, cls.design, cls.obs_intercept, cls.obs_cov,
cls.transition, cls.state_intercept, cls.selection,
cls.state_cov
)
model.initialize_known(cls.initial_state, cls.initial_state_cov)
# Initialize the appropriate Kalman filter
klass = prefix_kalman_filter_map[prefix[0]]
kfilter = klass(model, conserve_memory=cls.conserve_memory,
loglikelihood_burn=cls.loglikelihood_burn)
return model, kfilter
@classmethod
def run_filter(cls):
# Filter the data
cls.filter()
# Get results
return {
'loglike': lambda burn: np.sum(cls.filter.loglikelihood[burn:]),
'state': np.array(cls.filter.filtered_state),
}
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](self.true['start']), self.true['loglike'], 5
)
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
class TestClark1987Single(Clark1987):
"""
Basic single precision test for the loglikelihood and filtered states.
"""
@classmethod
def setup_class(cls):
raise SkipTest('Not implemented')
super(TestClark1987Single, cls).setup_class(
dtype=np.float32, conserve_memory=0
)
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
def test_loglike(self):
assert_allclose(
self.result['loglike'](self.true['start']), self.true['loglike'],
rtol=1e-3
)
def test_filtered_state(self):
assert_allclose(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0],
atol=1e-2
)
assert_allclose(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1],
atol=1e-2
)
assert_allclose(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2],
atol=1e-2
)
class TestClark1987Double(Clark1987):
"""
Basic double precision test for the loglikelihood and filtered states.
"""
@classmethod
def setup_class(cls):
super(TestClark1987Double, cls).setup_class(
dtype=float, conserve_memory=0
)
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
class TestClark1987SingleComplex(Clark1987):
"""
Basic single precision complex test for the loglikelihood and filtered
states.
"""
@classmethod
def setup_class(cls):
raise SkipTest('Not implemented')
super(TestClark1987SingleComplex, cls).setup_class(
dtype=np.complex64, conserve_memory=0
)
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
def test_loglike(self):
assert_allclose(
self.result['loglike'](self.true['start']), self.true['loglike'],
rtol=1e-3
)
def test_filtered_state(self):
assert_allclose(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0],
atol=1e-2
)
assert_allclose(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1],
atol=1e-2
)
assert_allclose(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2],
atol=1e-2
)
class TestClark1987DoubleComplex(Clark1987):
"""
Basic double precision complex test for the loglikelihood and filtered
states.
"""
@classmethod
def setup_class(cls):
super(TestClark1987DoubleComplex, cls).setup_class(
dtype=complex, conserve_memory=0
)
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
class TestClark1987Conserve(Clark1987):
"""
Memory conservation test for the loglikelihood and filtered states.
"""
@classmethod
def setup_class(cls):
super(TestClark1987Conserve, cls).setup_class(
dtype=float, conserve_memory=0x01 | 0x02
)
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
class Clark1987Forecast(Clark1987):
"""
Forecasting test for the loglikelihood and filtered states.
"""
@classmethod
def setup_class(cls, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1987Forecast, cls).setup_class(
dtype, conserve_memory
)
cls.nforecast = nforecast
# Add missing observations to the end (to forecast)
cls._obs = cls.obs
cls.obs = np.array(np.r_[cls.obs[0, :], [np.nan]*nforecast],
ndmin=2, dtype=dtype, order="F")
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][3][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
class TestClark1987ForecastDouble(Clark1987Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
@classmethod
def setup_class(cls):
super(TestClark1987ForecastDouble, cls).setup_class()
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
class TestClark1987ForecastDoubleComplex(Clark1987Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
@classmethod
def setup_class(cls):
super(TestClark1987ForecastDoubleComplex, cls).setup_class(
dtype=complex
)
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
class TestClark1987ForecastConserve(Clark1987Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
@classmethod
def setup_class(cls):
super(TestClark1987ForecastConserve, cls).setup_class(
dtype=float, conserve_memory=0x01 | 0x02
)
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
class TestClark1987ConserveAll(Clark1987):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
@classmethod
def setup_class(cls):
super(TestClark1987ConserveAll, cls).setup_class(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08
)
cls.loglikelihood_burn = cls.true['start']
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](0), self.true['loglike'], 5
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.result['state'][0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.result['state'][1][-1],
self.true_states.iloc[end-1, 1], 4
)
class Clark1989(object):
"""
Clark's (1989) bivariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Tests two-dimensional observation data.
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
@classmethod
def setup_class(cls, dtype=float, conserve_memory=0, loglikelihood_burn=0):
cls.true = results_kalman_filter.uc_bi
cls.true_states = pd.DataFrame(cls.true['states'])
# GDP and Unemployment, Quarterly, 1948.1 - 1995.3
data = pd.DataFrame(
cls.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP', 'UNEMP']
)[4:]
data['GDP'] = np.log(data['GDP'])
data['UNEMP'] = (data['UNEMP']/100)
# Observed data
cls.obs = np.array(data, ndmin=2, dtype=dtype, order="C").T
# Parameters
cls.k_endog = k_endog = 2 # dimension of observed data
cls.k_states = k_states = 6 # dimension of state space
cls.conserve_memory = conserve_memory
cls.loglikelihood_burn = loglikelihood_burn
# Measurement equation
# design matrix
cls.design = np.zeros((k_endog, k_states, 1), dtype=dtype, order="F")
cls.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]
# observation intercept
cls.obs_intercept = np.zeros((k_endog, 1), dtype=dtype, order="F")
# observation covariance matrix
cls.obs_cov = np.zeros((k_endog, k_endog, 1), dtype=dtype, order="F")
# Transition equation
# transition matrix
cls.transition = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
cls.transition[([0, 0, 1, 1, 2, 3, 4, 5],
[0, 4, 1, 2, 1, 2, 4, 5],
[0, 0, 0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1, 1, 1]
# state intercept
cls.state_intercept = np.zeros((k_states, 1), dtype=dtype, order="F")
# selection matrix
cls.selection = np.asfortranarray(np.eye(k_states)[:, :, None],
dtype=dtype)
# state covariance matrix
cls.state_cov = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
# Initialization: Diffuse priors
cls.initial_state = np.zeros((k_states,), dtype=dtype)
cls.initial_state_cov = np.asfortranarray(np.eye(k_states)*100,
dtype=dtype)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec,
phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array(
cls.true['parameters'], dtype=dtype
)
cls.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [
alpha_1, alpha_2, alpha_3
]
cls.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
cls.obs_cov[1, 1, 0] = sigma_ec**2
cls.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, 0, sigma_w**2, sigma_vl**2
]
# Initialization: modification
# Due to the difference in the way Kim and Nelson (1999) and Drubin
# and Koopman (2012) define the order of the Kalman filter routines,
# we need to modify the initial state covariance matrix to match
# Kim and Nelson's results, since the *Statespace models follow Durbin
# and Koopman.
cls.initial_state_cov = np.asfortranarray(
np.dot(
np.dot(cls.transition[:, :, 0], cls.initial_state_cov),
cls.transition[:, :, 0].T
)
)
@classmethod
def init_filter(cls):
# Use the appropriate Statespace model
prefix = find_best_blas_type((cls.obs,))
klass = prefix_statespace_map[prefix[0]]
# Instantiate the statespace model
model = klass(
cls.obs, cls.design, cls.obs_intercept, cls.obs_cov,
cls.transition, cls.state_intercept, cls.selection,
cls.state_cov
)
model.initialize_known(cls.initial_state, cls.initial_state_cov)
# Initialize the appropriate Kalman filter
klass = prefix_kalman_filter_map[prefix[0]]
kfilter = klass(model, conserve_memory=cls.conserve_memory,
loglikelihood_burn=cls.loglikelihood_burn)
return model, kfilter
@classmethod
def run_filter(cls):
# Filter the data
cls.filter()
# Get results
return {
'loglike': lambda burn: np.sum(cls.filter.loglikelihood[burn:]),
'state': np.array(cls.filter.filtered_state),
}
def test_loglike(self):
assert_almost_equal(
# self.result['loglike'](self.true['start']),
self.result['loglike'](0),
self.true['loglike'], 2
)
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][4][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.result['state'][5][self.true['start']:],
self.true_states.iloc[:, 3], 4
)
class TestClark1989(Clark1989):
"""
Basic double precision test for the loglikelihood and filtered
states with two-dimensional observation vector.
"""
@classmethod
def setup_class(cls):
super(TestClark1989, cls).setup_class(dtype=float, conserve_memory=0)
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
class TestClark1989Conserve(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
@classmethod
def setup_class(cls):
super(TestClark1989Conserve, cls).setup_class(
dtype=float, conserve_memory=0x01 | 0x02
)
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
class Clark1989Forecast(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
@classmethod
def setup_class(cls, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1989Forecast, cls).setup_class(dtype, conserve_memory)
cls.nforecast = nforecast
# Add missing observations to the end (to forecast)
cls._obs = cls.obs
cls.obs = np.array(
np.c_[
cls._obs,
np.r_[[np.nan, np.nan]*nforecast].reshape(2, nforecast)
],
ndmin=2, dtype=dtype, order="F"
)
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][4][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.result['state'][5][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 3], 4
)
class TestClark1989ForecastDouble(Clark1989Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
@classmethod
def setup_class(cls):
super(TestClark1989ForecastDouble, cls).setup_class()
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
class TestClark1989ForecastDoubleComplex(Clark1989Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
@classmethod
def setup_class(cls):
super(TestClark1989ForecastDoubleComplex, cls).setup_class(
dtype=complex
)
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
class TestClark1989ForecastConserve(Clark1989Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
@classmethod
def setup_class(cls):
super(TestClark1989ForecastConserve, cls).setup_class(
dtype=float, conserve_memory=0x01 | 0x02
)
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
class TestClark1989ConserveAll(Clark1989):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
@classmethod
def setup_class(cls):
super(TestClark1989ConserveAll, cls).setup_class(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08,
)
# cls.loglikelihood_burn = cls.true['start']
cls.loglikelihood_burn = 0
cls.model, cls.filter = cls.init_filter()
cls.result = cls.run_filter()
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](0), self.true['loglike'], 2
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.result['state'][0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.result['state'][1][-1],
self.true_states.iloc[end-1, 1], 4
)
assert_almost_equal(
self.result['state'][4][-1],
self.true_states.iloc[end-1, 2], 4
)
assert_almost_equal(
self.result['state'][5][-1],
self.true_states.iloc[end-1, 3], 4
)
| bsd-3-clause |
pgroth/independence-indicators | Temporal-Coauthor-Networks/vincent/examples/bar_chart_examples.py | 11 | 2026 | # -*- coding: utf-8 -*-
"""
Vincent Bar Chart Example
"""
#Build a Bar Chart from scratch
from vincent import *
import pandas as pd
farm_1 = {'apples': 10, 'berries': 32, 'squash': 21, 'melons': 13, 'corn': 18}
farm_2 = {'apples': 15, 'berries': 43, 'squash': 17, 'melons': 10, 'corn': 22}
farm_3 = {'apples': 6, 'berries': 24, 'squash': 22, 'melons': 16, 'corn': 30}
farm_4 = {'apples': 12, 'berries': 30, 'squash': 15, 'melons': 9, 'corn': 15}
data = [farm_1, farm_2, farm_3, farm_4]
index = ['Farm 1', 'Farm 2', 'Farm 3', 'Farm 4']
df = pd.DataFrame(data, index=index)
vis = Visualization(width=500, height=300)
vis.scales['x'] = Scale(name='x', type='ordinal', range='width',
domain=DataRef(data='table', field="data.idx"))
vis.scales['y'] = Scale(name='y', range='height', nice=True,
domain=DataRef(data='table', field="data.val"))
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
#Marks
enter_props = PropertySet(x=ValueRef(scale='x', field="data.idx"),
y=ValueRef(scale='y', field="data.val"),
width=ValueRef(scale='x', band=True, offset=-1),
y2=ValueRef(scale='y', value=0))
update_props = PropertySet(fill=ValueRef(value='steelblue'))
mark = Mark(type='rect', from_=MarkRef(data='table'),
properties=MarkProperties(enter=enter_props,
update=update_props))
vis.marks.append(mark)
data = Data.from_pandas(df['apples'])
#Using a Vincent KeyedList here
vis.data['table'] = data
vis.axis_titles(x='Farms', y='Data')
vis.to_json('vega.json')
#Convenience methods
vis = Bar(df['apples'])
#Fruit
trans = df.T
vis = Bar(trans['Farm 1'])
#From dict
vis = Bar(farm_1)
#From dict of iterables
vis = Bar({'x': ['apples', 'berries', 'squash', 'melons', 'corn'],
'y': [10, 32, 21, 13, 18]}, iter_idx='x')
#Finally, a boring bar chart from a list
vis = Bar([10, 20, 30, 15, 35, 10, 20])
| gpl-2.0 |
ville-k/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 26 | 6490 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform._parameter # pylint: disable=protected-access
def data(self):
return self._data
@transform._parameter # pylint: disable=protected-access
def num_threads(self):
return self._num_threads
@transform._parameter # pylint: disable=protected-access
def enqueue_size(self):
return self._enqueue_size
@transform._parameter # pylint: disable=protected-access
def batch_size(self):
return self._batch_size
@transform._parameter # pylint: disable=protected-access
def queue_capacity(self):
return self._queue_capacity
@transform._parameter # pylint: disable=protected-access
def shuffle(self):
return self._shuffle
@transform._parameter # pylint: disable=protected-access
def min_after_dequeue(self):
return self._min_after_dequeue
@transform._parameter # pylint: disable=protected-access
def seed(self):
return self._seed
@transform._parameter # pylint: disable=protected-access
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + list(self._data.keys()))
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 |
lukeshingles/artistools | artistools/makemodel/shen2018.py | 1 | 3448 | #!/usr/bin/env python3
import argparse
import math
import os.path
import numpy as np
import pandas as pd
from astropy import units as u
import artistools as at
def addargs(parser):
parser.add_argument('-inputpath', '-i',
default='1.00_5050.dat',
help='Path of input file')
parser.add_argument('-outputpath', '-o',
default='.',
help='Path for output files')
def main(args=None, argsraw=None, **kwargs) -> None:
if args is None:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Convert Shen et al. 2018 models to ARTIS format.')
addargs(parser)
parser.set_defaults(**kwargs)
args = parser.parse_args(argsraw)
with open(args.inputpath) as infile:
columns = infile.readline().split()
atomicnumberofspecies = {}
isotopesofelem = {}
for species in columns[5:]:
atomic_number = at.get_atomic_number(species.rstrip('0123456789'))
atomicnumberofspecies[species] = atomic_number
isotopesofelem.setdefault(atomic_number, list()).append(species)
datain = pd.read_csv(args.inputpath, delim_whitespace=True, skiprows=0, header=[0]).dropna()
dfmodel = pd.DataFrame(
columns=[
'inputcellid', 'velocity_outer', 'logrho', 'X_Fegroup', 'X_Ni56', 'X_Co56', 'X_Fe52', 'X_Cr48',
'X_Ni57', 'X_Co57'])
dfmodel.index.name = 'cellid'
dfabundances = pd.DataFrame(columns=['inputcellid', *['X_' + at.elsymbols[x] for x in range(1, 31)]])
dfabundances.index.name = 'cellid'
t_model_init_seconds = 10.
t_model_init_days = t_model_init_seconds / 24 / 60 / 60
v_inner = 0. # velocity at inner boundary of cell
m_enc_inner = 0. # mass enclosed at inner boundary
tot_ni56mass = 0.
for cellid, shell in datain.iterrows():
m_enc_outer = float(shell['m']) * u.solMass.to('g') # convert Solar masses to grams
v_outer = float(shell['v']) * 1e-5 # convert cm/s to km/s
m_shell_grams = (m_enc_outer - m_enc_inner)
r_outer = v_outer * 1e5 * t_model_init_seconds
r_inner = v_inner * 1e5 * t_model_init_seconds
vol_shell = 4. / 3. * math.pi * (r_outer ** 3 - r_inner ** 3)
rho = m_shell_grams / vol_shell
tot_ni56mass += m_shell_grams * shell.ni56
abundances = [0. for _ in range(31)]
X_fegroup = 0.
for atomic_number in range(1, 31):
abundances[atomic_number] = sum([float(shell[species]) for species in isotopesofelem[atomic_number]])
if atomic_number >= 26:
X_fegroup += abundances[atomic_number]
radioabundances = [X_fegroup, shell.ni56, shell.co56, shell.fe52, shell.cr48, shell.ni57, shell.co57]
dfmodel.loc[cellid] = [cellid, v_outer, math.log10(rho), *radioabundances]
dfabundances.loc[cellid] = [cellid, *abundances[1:31]]
v_inner = v_outer
m_enc_inner = m_enc_outer
print(f'M_tot = {m_enc_outer / u.solMass.to("g"):.3f} solMass')
print(f'M_Ni56 = {tot_ni56mass / u.solMass.to("g"):.3f} solMass')
at.save_modeldata(dfmodel, t_model_init_days, os.path.join(args.outputpath, 'model.txt'))
at.inputmodel.save_initialabundances(dfabundances, os.path.join(args.outputpath, 'abundances.txt'))
if __name__ == "__main__":
main()
| mit |
alheinecke/tensorflow-xsmm | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 62 | 5053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
cmtm/networkx | examples/drawing/giant_component.py | 15 | 2287 | #!/usr/bin/env python
"""
This example illustrates the sudden appearance of a
giant connected component in a binomial random graph.
Requires pygraphviz and matplotlib to draw.
"""
# Copyright (C) 2006-2016
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
try:
import matplotlib.pyplot as plt
except:
raise
import networkx as nx
import math
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
layout = graphviz_layout
except ImportError:
try:
import pydotplus
from networkx.drawing.nx_pydot import graphviz_layout
layout = graphviz_layout
except ImportError:
print("PyGraphviz and PyDotPlus not found;\n"
"drawing with spring layout;\n"
"will be slow.")
layout = nx.spring_layout
n=150 # 150 nodes
# p value at which giant component (of size log(n) nodes) is expected
p_giant=1.0/(n-1)
# p value at which graph is expected to become completely connected
p_conn=math.log(n)/float(n)
# the following range of p values should be close to the threshold
pvals=[0.003, 0.006, 0.008, 0.015]
region=220 # for pylab 2x2 subplot layout
plt.subplots_adjust(left=0,right=1,bottom=0,top=0.95,wspace=0.01,hspace=0.01)
for p in pvals:
G=nx.binomial_graph(n,p)
pos=layout(G)
region+=1
plt.subplot(region)
plt.title("p = %6.3f"%(p))
nx.draw(G,pos,
with_labels=False,
node_size=10
)
# identify largest connected component
Gcc=sorted(nx.connected_component_subgraphs(G), key = len, reverse=True)
G0=Gcc[0]
nx.draw_networkx_edges(G0,pos,
with_labels=False,
edge_color='r',
width=6.0
)
# show other connected components
for Gi in Gcc[1:]:
if len(Gi)>1:
nx.draw_networkx_edges(Gi,pos,
with_labels=False,
edge_color='r',
alpha=0.3,
width=5.0
)
plt.savefig("giant_component.png")
plt.show() # display
| bsd-3-clause |
oknuutti/visnav-py | visnav/algo/image.py | 1 | 16219 | from functools import lru_cache
import math
from scipy import optimize, stats, integrate
import numpy as np
import quaternion # adds to numpy # noqa # pylint: disable=unused-import
import cv2
from scipy.optimize import leastsq
from visnav.settings import *
class ImageProc:
latest_opt = None
show_fit = None
@staticmethod
def add_noise_to_image(image, noise_img_file):
tmp = cv2.imread(noise_img_file, cv2.IMREAD_UNCHANGED)
noise_img = cv2.resize(tmp, None,
fx=image.shape[1] / tmp.shape[1],
fy=image.shape[0] / tmp.shape[0],
interpolation=cv2.INTER_CUBIC)
return cv2.add(image, noise_img[:, :, 3])
@staticmethod
def crop_and_zoom_image(image, x_off, y_off, width, height, scale, trg_w_h=None, others=tuple()):
tw, th = trg_w_h
if scale is None:
scale = min(th / height, tw / width)
res = []
for img in [image] + list(others):
imgc = cv2.resize(img[y_off:y_off + height, x_off:x_off + width], None, fx=scale, fy=scale,
interpolation=cv2.INTER_AREA)
oh, ow = img.shape
ch, cw = imgc.shape
if trg_w_h is not None:
if x_off + width >= ow:
x0 = tw - cw
elif x_off <= 0:
x0 = 0
else:
x0 = (tw - cw) // 2
if y_off + height >= oh:
y0 = th - ch
elif y_off <= 0:
y0 = 0
else:
y0 = (th - ch) // 2
imgd = np.zeros((th, tw), dtype=img.dtype)
imgd[y0:y0 + ch, x0:x0 + cw] = imgc
else:
imgd = imgc
res.append(imgd)
if len(others) > 0:
return res
return res[0]
@staticmethod
def single_object_bounds(img, threshold, crop_marg, min_px, debug=False):
# binary image
_, mask = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY)
# remove stars
mask = cv2.erode(mask, ImageProc.bsphkern(9), iterations=1)
if np.sum(mask) < min_px:
return (None,) * 4
# detect target
x_, y_, w_, h_ = cv2.boundingRect(mask)
# add margin
x, y = max(0, x_ - crop_marg), max(0, y_ - crop_marg)
w = min(mask.shape[1] - x, w_ + 2 * crop_marg - (x - x_ + crop_marg))
h = min(mask.shape[0] - y, h_ + 2 * crop_marg - (y - y_ + crop_marg))
if debug:
img_color = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img_color = cv2.rectangle(img_color, (x, y), (x + w, y + h), (0, 0, 255), thickness=1)
img_color[y + h // 2, x + w // 2] = (0, 0, 255)
cv2.imshow('box', cv2.resize(img_color, (512, 512)))
return x, y, w, h
@staticmethod
def equalize_brightness(image, ref_image, percentile=98, image_gamma=1):
image = ImageProc.adjust_gamma(image, 1 / image_gamma)
ip = np.percentile(image, percentile)
rp = np.percentile(ImageProc.adjust_gamma(ref_image, 1 / image_gamma), percentile)
image = cv2.convertScaleAbs(image, None, rp / ip, 0)
return ImageProc.adjust_gamma(image, image_gamma)
@staticmethod
def normalize_brightness(image, quantiles=(0.0005, 0.9999), top_margin=1.2, src_gamma=1.0, gamma=1.0):
image = ImageProc.adjust_gamma(image, src_gamma, inverse=True)
bot_v, top_v = np.quantile(image, quantiles)
top_v = top_v * top_margin
if image.dtype == np.uint8:
sc = 255 / (top_v - bot_v)
image = cv2.convertScaleAbs(image, None, sc, -bot_v * sc)
elif image.dtype in (float, np.float32):
sc = 1 / (top_v - bot_v)
image = np.clip((image - bot_v) * sc, 0, 1)
else:
assert False, 'unsupported image dtype: %s' % image.dtype
return ImageProc.adjust_gamma(image, gamma)
@staticmethod
def default_preprocess(image, max=255):
bg = np.percentile(image, 250 / 1024 * 100)
return ImageProc.adjust_gamma(np.clip((image - bg) * max / (max - bg), 0, max), 1.8)
@staticmethod
def change_color_depth(img, src_bits, dst_bits):
if src_bits == dst_bits:
return img
if str(img.dtype)[:4] == 'uint':
new_type = 'uint' + str(math.ceil(dst_bits / 8) * 8)
else:
new_type = img.dtype
if src_bits < dst_bits:
img = img.astype(new_type)
img = img * (2 ** (dst_bits - src_bits))
if src_bits > dst_bits:
img = img.astype(new_type)
return img
@staticmethod
def remove_bg(img, bg_img, gain=None, max_val=None, offset=0):
if gain is None:
# estimate correct gain
cost_fun = lambda g: np.var((img - g[0] * bg_img).reshape((-1, 3)), axis=0)
x, _ = leastsq(cost_fun, np.array([1]))
gain = x[0]
print('estimated bg gain: %f' % gain)
imgr = img.astype('float') - gain * bg_img
if offset not in (None, False):
imgr += offset - np.min(imgr)
if max_val and offset is not False:
return np.clip(imgr, 0, max_val).astype(img.dtype)
return imgr
@staticmethod
def color_correct(img, bgr_mx, inverse=False, max_val=None):
assert img.shape[2] == 3, 'can only do to BGR images'
if inverse:
bgr_mx = np.linalg.inv(bgr_mx)
imgc = bgr_mx.dot(img.reshape((-1, 3)).T).T.reshape(img.shape)
if max_val:
return np.clip(imgc, 0, max_val).astype(img.dtype)
return imgc
@staticmethod
def adjust_gamma(image, gamma, gamma_break=None, linear_part=True, inverse=False, max_val=255):
if gamma == 1:
return image
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = gamma if inverse else 1.0 / gamma
gamma_break = gamma_break or 0
if image.dtype == 'uint8' and gamma_break == 0:
# apply gamma correction using the lookup table
max_val = min(max_val, 255)
table = np.array([((i / max_val) ** invGamma) * max_val for i in np.arange(0, max_val + 1)]).astype(
image.dtype)
adj_img = cv2.LUT(image, table)
elif gamma_break == 0:
adj_img = np.round(((image / max_val) ** invGamma) * max_val).astype(image.dtype)
elif True:
# from https://se.mathworks.com/help/vision/ref/gammacorrection.html
b_p = gamma_break
s_ls = 1 / (gamma / b_p ** (1 / gamma - 1) - gamma * gamma_break + gamma_break)
f_s = gamma * s_ls / b_p ** (1 / gamma - 1)
c_o = f_s * b_p ** (1 / gamma) - s_ls * b_p
img = image.flatten() / max_val
I = img <= (s_ls if inverse else 1) * b_p
nI = np.logical_not(I)
adj_img = np.zeros(image.shape).flatten()
adj_img[I] = (img[I] / s_ls) if inverse else (img[I] * s_ls)
adj_img[nI] = (((img[nI] + c_o) / f_s) ** gamma) if inverse else (f_s * img[nI] ** (1 / gamma) - c_o)
adj_img = (adj_img * max_val).reshape(image.shape).astype(image.dtype)
else:
# from https://en.wikipedia.org/wiki/SRGB
if 1:
a = gamma_break
K0 = a / (gamma - 1)
else:
K0 = gamma_break
a = K0 * (gamma - 1)
alpha = 1 + a
th = alpha ** gamma * (gamma - 1) ** (gamma - 1) / a ** (gamma - 1) / gamma ** gamma
lim = K0 if inverse else K0 / th
img = image.flatten() / max_val
I = img <= lim
nI = np.logical_not(I)
adj_img = np.zeros(image.shape).flatten()
adj_img[I] = (img[I] / th) if inverse else (th * img[I])
adj_img[nI] = (((img[nI] + a) / alpha) ** gamma) if inverse else (alpha * img[nI] ** (1 / gamma) - a)
adj_img = (adj_img * max_val).reshape(image.shape).astype(image.dtype)
# adj_img = np.round(adj_img * max_val).reshape(image.shape).astype(image.dtype)
return adj_img
@staticmethod
def apply_point_spread_fn(img, ratio):
# ratio is how many % of power on central pixel
sd = 1 / math.sqrt(2 * math.pi * ratio)
size = 1 + 2 * math.ceil(sd * 2)
kernel = ImageProc.gkern2d(size, sd)
cv2.filter2D(img, -1, kernel, img)
return img
@staticmethod
@lru_cache(maxsize=5)
def gkern2d(l=5, sig=1.):
"""
creates gaussian kernel with side length l and a sigma of sig
"""
w, h = (l[0], l[1]) if '__iter__' in dir(l) else (l, l)
sx, sy = (sig[0], sig[1]) if '__iter__' in dir(sig) else (sig, sig)
ax = np.arange(-w // 2 + 1., w // 2 + 1.)
ay = np.arange(-h // 2 + 1., h // 2 + 1.)
xx, yy = np.meshgrid(ax, ay)
kernel = np.exp(-((xx / sx) ** 2 + (yy / sy) ** 2) / 2)
return kernel / np.sum(kernel)
@staticmethod
def bsphkern(l=5):
"""
creates a binary spherical kernel
"""
gkern = ImageProc.gkern2d(l=l, sig=l)
limit = gkern[l // 2 if isinstance(l, int) else l[1] // 2, -1] * 0.995
return np.array(gkern >= limit, dtype=np.uint8)
@staticmethod
def fuzzy_kernel(kernel, sig):
w = int(sig // 2)
skernel = np.zeros(tuple(np.array(kernel.shape[:2]) + int(sig)) + kernel.shape[2:3], dtype=kernel.dtype)
skernel[w:w + kernel.shape[0], w:w + kernel.shape[1]] = kernel
gkrn = ImageProc.gkern2d(sig, sig / 2)
skernel = cv2.filter2D(skernel, kernel.shape[2], gkrn)
return skernel
@staticmethod
def _img_max_valid(img):
max = 1.0 if 'float' in str(img.dtype) else 255
assert max != 255 or img.dtype == np.uint8, 'wrong datatype for image: %s' % img.dtype
return max
@staticmethod
def add_stars(img, mask, coef=2, cache=False):
# add power law distributed stars to image
assert img.shape == img.shape[:2], 'works only with grayscale images'
if not cache:
ImageProc._cached_random_stars.cache_clear()
stars = ImageProc._cached_random_stars(coef, img.shape)
# can be over 255, will clip later
img[mask] = np.clip(stars[mask], 0, 600)
return img
@staticmethod
@lru_cache(maxsize=1)
def _cached_random_stars(coef, shape):
return np.random.pareto(coef, shape)
@staticmethod
def add_sensor_noise(img, mean=7, sd=2, cache=False):
if not cache:
ImageProc._cached_sensor_noise.cache_clear()
img += ImageProc._cached_sensor_noise(mean, sd, img.shape)
return img
@staticmethod
@lru_cache(maxsize=1)
def _cached_sensor_noise(mean, sd, shape):
return np.random.normal(mean, sd, shape)
@staticmethod
def process_target_image(image_src):
hist = cv2.calcHist([image_src], [0], None, [256], [0, 256])
if False:
threshold_value = ImageProc.optimal_threshold(hist)
else:
threshold_value = 50
th, image_dst = cv2.threshold(image_src, threshold_value, 255, cv2.THRESH_TOZERO)
return image_dst, hist, threshold_value
@staticmethod
def optimal_threshold(hist, image=None):
if hist is None:
hist = cv2.calcHist([image], [0], None, [256], [0, 256])
tot_px = 256 # sum(hist) -- for some reason get error if divide with pixel count
x = list(range(1, len(hist) + 1))
loghist = np.array(list(map(lambda x: math.log(x + 1) / tot_px, hist)))
def fitfun1(p, x):
return stats.gamma.pdf(x, p[0], loc=0, scale=p[1]) * p[2]
def fitfun2(p, x):
return stats.norm.pdf(x, p[0], p[1]) * p[2]
def fitfun(p, x):
return fitfun1(p[:3], x) + fitfun2(p[3:], x)
def errfun(p, x, y):
tmp = y - fitfun(p, x)
# assert False, 'p:%s, x:%s, y:%s, ffval:%s'%(p, x[0:50], y[0:50], fitfun(p, x[0:50]))
return tmp
shape = 1.5
init = [
shape, np.argmax(loghist) / (shape - 1), 1, # for fitfun1
127, 50, 1, # for fitfun2
]
if not BATCH_MODE or DEBUG:
print('init: %s' % init)
out = optimize.leastsq(errfun, init, args=(x, loghist))
ImageProc.latest_opt = out
if not BATCH_MODE or DEBUG:
print('result: %s' % list(out))
# threshold value where background makes up roughly a fourth of all pixels
bg = reversed(fitfun1(out[0][:3], x))
ast = list(reversed(fitfun2(out[0][3:], x)))
threshold_value = 255 - next((i for i, v in enumerate(bg) if v / ast[i] > 0.33), 255 - 100)
if not BATCH_MODE or DEBUG:
bg_ratio = out[0][:3][2] / out[0][3:][2]
print('threshold_value: %s; bg_ratio: %s' % (threshold_value, bg_ratio))
# plot figure with histogram and estimated distributions
if DEBUG:
from matplotlib import pyplot as plt
plt.clf()
plt.plot(x, fitfun1(out[0][:3], x), label='background')
plt.plot(x, fitfun2(out[0][3:], x), label='foreground')
plt.plot(x, fitfun(out[0], x), label='total fit')
plt.plot(x, loghist, label='log(hist)')
plt.legend()
fig = plt.gcf()
fig.canvas.draw()
data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
w, h = fig.canvas.get_width_height()
data = data.reshape((h * 3, w * 3, 3)) # for some reason get_width_height returns 1/3 of the actual dims
cv2.imshow('histogram fitting', data)
return threshold_value
@staticmethod
def overlay_mask(image, mask):
sc_img = min(image.shape[0], mask.shape[0])/image.shape[0]
sc_mask = min(image.shape[0], mask.shape[0])/mask.shape[0]
img_color = cv2.cvtColor(cv2.resize(image, None, fx=sc_img, fy=sc_img, interpolation=cv2.INTER_CUBIC), cv2.COLOR_GRAY2RGB)
mask_color = cv2.cvtColor(cv2.resize((mask > 0).astype(np.uint8)*255, None, fx=sc_mask, fy=sc_mask, interpolation=cv2.INTER_CUBIC), cv2.COLOR_GRAY2RGB)
mask_color[:, :, 0:2] = 0
return cv2.addWeighted(img_color, 0.5, mask_color, 0.5, 0.0)
@staticmethod
def merge(images):
summed_weights = 1
summed_images = images[0]
for i in range(1, len(images)):
summed_images = cv2.addWeighted(summed_images, summed_weights / (summed_weights + 1),
images[i], 1 / (summed_weights + 1), 0.0)
summed_weights += 1
return summed_images
@staticmethod
def norm_xcorr(sce_img, res_img):
""" calculate normalized cross corralation of images """
if sce_img.shape[:2] != res_img.shape[:2]:
sce_img = cv2.resize(sce_img, None,
fx=res_img.shape[1] / sce_img.shape[1],
fy=res_img.shape[0] / sce_img.shape[0],
interpolation=cv2.INTER_CUBIC)
sce_img = np.atleast_3d(sce_img)
res_img = np.atleast_3d(res_img)
sce_mean, sce_std = cv2.meanStdDev(sce_img)
res_mean, res_std = cv2.meanStdDev(res_img)
stds = sce_std * res_std
if stds == 0:
return 0
corr = (sce_img - sce_mean) * (res_img - res_mean)
nxcorr = np.mean(corr) / stds
if False:
# for debugging
tmp = np.log(corr - np.min(corr) + 0.001)
mint = np.min(tmp)
maxt = np.max(tmp)
tmp = (tmp - mint) * (1 / (maxt - mint))
print('sm %.3f, ss %.3f, rm %.3f, rs %.3f, min %.3f, max %.3f, res %.3f' % (
sce_mean, sce_std, res_mean, res_std, mint, maxt, nxcorr))
cv2.imshow('corr', tmp)
cv2.waitKey()
return nxcorr
| mit |
rahulraghatate/sp17-i524 | project/S17-IO-3012/code/bin/benchmark_replicas_find.py | 19 | 5441 | import matplotlib.pyplot as plt
import sys
import pandas as pd
def get_parm():
"""retrieves mandatory parameter to program
@param: none
@type: n/a
"""
try:
return sys.argv[1]
except:
print ('Must enter file name as parameter')
exit()
def read_file(filename):
"""reads a file into a pandas dataframe
@param: filename The name of the file to read
@type: string
"""
try:
return pd.read_csv(filename)
except:
print ('Error retrieving file')
exit()
def select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica):
benchmark_df = benchmark_df[benchmark_df.mongo_version == 34]
benchmark_df = benchmark_df[benchmark_df.test_size == "large"]
if cloud != 'X':
benchmark_df = benchmark_df[benchmark_df.cloud == cloud]
if config_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]
if mongos_instances != 'X':
benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]
if shard_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]
if shards_per_replica != 'X':
benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]
# benchmark_df1 = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica']).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
benchmark_df = benchmark_df.groupby(
['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
# print benchmark_df1['shard_replicas']
# print benchmark_df1
# print benchmark_df
benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)
return benchmark_df
def make_figure(find_seconds_kilo, replicas_kilo, find_seconds_chameleon, replicas_chameleon, find_seconds_jetstream, replicas_jetstream):
"""formats and creates a line chart
@param1: find_seconds_kilo Array with find_seconds from kilo
@type: numpy array
@param2: replicas_kilo Array with replicas from kilo
@type: numpy array
@param3: find_seconds_chameleon Array with find_seconds from chameleon
@type: numpy array
@param4: replicas_chameleon Array with replicas from chameleon
@type: numpy array
"""
fig = plt.figure()
#plt.title('Average Find Command Runtime by Shard Replication Factor')
plt.ylabel('Runtime in Seconds')
plt.xlabel('Degree of Replication Per Set')
# Make the chart
plt.plot(replicas_kilo, find_seconds_kilo, label='Kilo Cloud')
plt.plot(replicas_chameleon, find_seconds_chameleon, label='Chameleon Cloud')
plt.plot(replicas_jetstream, find_seconds_jetstream, label='Jetstream Cloud')
# http://stackoverflow.com/questions/11744990/how-to-set-auto-for-upper-limit-but-keep-a-fixed-lower-limit-with-matplotlib
plt.ylim(ymin=0)
plt.legend(loc='best')
# Show the chart (for testing)
# plt.show()
# Save the chart
fig.savefig('../report/replica_find.png')
# Run the program by calling the functions
if __name__ == "__main__":
filename = get_parm()
benchmark_df = read_file(filename)
cloud = 'kilo'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
find_seconds_kilo = select_df.as_matrix(columns=[select_df.columns[7]])
replicas_kilo = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'chameleon'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
find_seconds_chameleon = select_df.as_matrix(columns=[select_df.columns[7]])
replicas_chameleon = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'jetstream'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
find_seconds_jetstream = select_df.as_matrix(columns=[select_df.columns[7]])
replicas_jetstream = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
make_figure(find_seconds_kilo, replicas_kilo, find_seconds_chameleon, replicas_chameleon, find_seconds_jetstream, replicas_jetstream)
| apache-2.0 |
nightjean/Deep-Learning | tensorflow/python/estimator/inputs/queues/feeding_queue_runner_test.py | 116 | 5164 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff._enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff._enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff._enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff._enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
AsimmHirani/ISpyPi | tensorflow/contrib/tensorflow-master/tensorflow/contrib/learn/python/learn/estimators/kmeans_test.py | 3 | 18489 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
import time
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
from tensorflow.contrib import factorization
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators import kmeans as kmeans_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments,
np.add.reduce(offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self, batch_size=None, points=None, randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(self.num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies([q.enqueue_many(
math_ops.range(self.num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0,
_init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(self.batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig(tf_random_seed=tf_random_seed)
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=factorization.KMEANS_PLUS_PLUS_INIT,
distance_metric=factorization.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
steps = 10 * self.num_points // self.batch_size
kmeans.fit(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=factorization.KMEANS_PLUS_PLUS_INIT,
distance_metric=factorization.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=learn.RunConfig(tf_random_seed=14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.fit(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertNear(self.true_score, score, self.true_score * 0.01)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.clusters()
# Make a small test set
num_points = 10
points, true_assignments, true_offsets = make_random_points(clusters,
num_points)
# Test predict
assignments = list(kmeans.predict_cluster_idx(input_fn=self.input_fn(
batch_size=num_points, points=points, num_epochs=1)))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(
input_fn=lambda: (constant_op.constant(points), None))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) - 2 * np.dot(
points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_fit_raise_if_num_clusters_larger_than_num_points_random_init(self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError('less'):
kmeans = learn.KMeansClustering(
num_clusters=3,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
initial_clusters=factorization.RANDOM_INIT)
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None),
steps=10)
def test_fit_raise_if_num_clusters_larger_than_num_points_kmeans_plus_plus(
self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError(AssertionError):
kmeans = learn.KMeansClustering(
num_clusters=3,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
initial_clusters=factorization.KMEANS_PLUS_PLUS_INIT)
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None),
steps=10)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(
normalize(self.points)[0:4, :], axis=0, keepdims=True))[0],
normalize(
np.mean(
normalize(self.points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=factorization.RANDOM_INIT,
distance_metric=factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.clusters())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = self.kmeans.transform(input_fn=self.input_fn(
batch_size=self.num_points))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
assignments = list(self.kmeans.predict_cluster_idx(
input_fn=self.input_fn(num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(input_fn=self.input_fn(
batch_size=self.num_points), steps=1)
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(
normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(
normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(np.mean(
normalize(points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=factorization.KMEANS_PLUS_PLUS_INIT,
distance_metric=factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.clusters())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (
input_lib.limit_epochs(constant_op.constant(points), num_epochs=1),
None)
assignments = list(kmeans.predict_cluster_idx(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=factorization.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
config=run_config.RunConfig(tf_random_seed=3))
tf_kmeans.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=50,
relative_tolerance=1e-6)
_ = tf_kmeans.clusters()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None),
steps=1))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
if __name__ == '__main__':
test.main()
| apache-2.0 |
edhuckle/statsmodels | statsmodels/tsa/filters/_utils.py | 29 | 4391 | from functools import wraps
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.base import datetools
from statsmodels.tsa.tsatools import freq_to_period
def _get_pandas_wrapper(X, trim_head=None, trim_tail=None, names=None):
index = X.index
#TODO: allow use index labels
if trim_head is None and trim_tail is None:
index = index
elif trim_tail is None:
index = index[trim_head:]
elif trim_head is None:
index = index[:-trim_tail]
else:
index = index[trim_head:-trim_tail]
if hasattr(X, "columns"):
if names is None:
names = X.columns
return lambda x : X.__class__(x, index=index, columns=names)
else:
if names is None:
names = X.name
return lambda x : X.__class__(x, index=index, name=names)
def _maybe_get_pandas_wrapper(X, trim_head=None, trim_tail=None):
"""
If using pandas returns a function to wrap the results, e.g., wrapper(X)
trim is an integer for the symmetric truncation of the series in some
filters.
otherwise returns None
"""
if _is_using_pandas(X, None):
return _get_pandas_wrapper(X, trim_head, trim_tail)
else:
return
def _maybe_get_pandas_wrapper_freq(X, trim=None):
if _is_using_pandas(X, None):
index = X.index
func = _get_pandas_wrapper(X, trim)
freq = index.inferred_freq
return func, freq
else:
return lambda x : x, None
def pandas_wrapper(func, trim_head=None, trim_tail=None, names=None, *args,
**kwargs):
@wraps(func)
def new_func(X, *args, **kwargs):
# quick pass-through for do nothing case
if not _is_using_pandas(X, None):
return func(X, *args, **kwargs)
wrapper_func = _get_pandas_wrapper(X, trim_head, trim_tail,
names)
ret = func(X, *args, **kwargs)
ret = wrapper_func(ret)
return ret
return new_func
def pandas_wrapper_bunch(func, trim_head=None, trim_tail=None,
names=None, *args, **kwargs):
@wraps(func)
def new_func(X, *args, **kwargs):
# quick pass-through for do nothing case
if not _is_using_pandas(X, None):
return func(X, *args, **kwargs)
wrapper_func = _get_pandas_wrapper(X, trim_head, trim_tail,
names)
ret = func(X, *args, **kwargs)
ret = wrapper_func(ret)
return ret
return new_func
def pandas_wrapper_predict(func, trim_head=None, trim_tail=None,
columns=None, *args, **kwargs):
pass
def pandas_wrapper_freq(func, trim_head=None, trim_tail=None,
freq_kw='freq', columns=None, *args, **kwargs):
"""
Return a new function that catches the incoming X, checks if it's pandas,
calls the functions as is. Then wraps the results in the incoming index.
Deals with frequencies. Expects that the function returns a tuple,
a Bunch object, or a pandas-object.
"""
@wraps(func)
def new_func(X, *args, **kwargs):
# quick pass-through for do nothing case
if not _is_using_pandas(X, None):
return func(X, *args, **kwargs)
wrapper_func = _get_pandas_wrapper(X, trim_head, trim_tail,
columns)
index = X.index
freq = index.inferred_freq
kwargs.update({freq_kw : freq_to_period(freq)})
ret = func(X, *args, **kwargs)
ret = wrapper_func(ret)
return ret
return new_func
def dummy_func(X):
return X
def dummy_func_array(X):
return X.values
def dummy_func_pandas_columns(X):
return X.values
def dummy_func_pandas_series(X):
return X['A']
import pandas as pd
import numpy as np
def test_pandas_freq_decorator():
X = pd.util.testing.makeDataFrame()
# in X, get a function back that returns an X with the same columns
func = pandas_wrapper(dummy_func)
np.testing.assert_equal(func(X.values), X)
func = pandas_wrapper(dummy_func_array)
pd.util.testing.assert_frame_equal(func(X), X)
expected = X.rename(columns=dict(zip('ABCD', 'EFGH')))
func = pandas_wrapper(dummy_func_array, names=list('EFGH'))
pd.util.testing.assert_frame_equal(func(X), expected)
| bsd-3-clause |
ybroze/trading-with-python | cookbook/getDataFromYahooFinance.py | 77 | 1391 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 18:37:23 2011
@author: jev
"""
from urllib import urlretrieve
from urllib2 import urlopen
from pandas import Index, DataFrame
from datetime import datetime
import matplotlib.pyplot as plt
sDate = (2005,1,1)
eDate = (2011,10,1)
symbol = 'SPY'
fName = symbol+'.csv'
try: # try to load saved csv file, otherwise get from the net
fid = open(fName)
lines = fid.readlines()
fid.close()
print 'Loaded from ' , fName
except Exception as e:
print e
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
print 'Downloading from ', urlStr
urlretrieve(urlStr,symbol+'.csv')
lines = urlopen(urlStr).readlines()
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
df.plot(secondary_y=['volume'])
| bsd-3-clause |
jakobworldpeace/scikit-learn | sklearn/svm/tests/test_sparse.py | 63 | 13366 | import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import (assert_raises, assert_true, assert_false,
assert_warns, assert_raise_message,
ignore_warnings)
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
ngoix/OCRF | examples/feature_selection/plot_select_from_model_boston.py | 146 | 1527 | """
===================================================
Feature selection using SelectFromModel and LassoCV
===================================================
Use SelectFromModel meta-transformer along with Lasso to select the best
couple of features from the Boston dataset.
"""
# Author: Manoj Kumar <[email protected]>
# License: BSD 3 clause
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
# Load the boston dataset.
boston = load_boston()
X, y = boston['data'], boston['target']
# We use the base estimator LassoCV since the L1 norm promotes sparsity of features.
clf = LassoCV()
# Set a minimum threshold of 0.25
sfm = SelectFromModel(clf, threshold=0.25)
sfm.fit(X, y)
n_features = sfm.transform(X).shape[1]
# Reset the threshold till the number of features equals two.
# Note that the attribute can be set directly instead of repeatedly
# fitting the metatransformer.
while n_features > 2:
sfm.threshold += 0.1
X_transform = sfm.transform(X)
n_features = X_transform.shape[1]
# Plot the selected two features from X.
plt.title(
"Features selected from Boston using SelectFromModel with "
"threshold %0.3f." % sfm.threshold)
feature1 = X_transform[:, 0]
feature2 = X_transform[:, 1]
plt.plot(feature1, feature2, 'r.')
plt.xlabel("Feature number 1")
plt.ylabel("Feature number 2")
plt.ylim([np.min(feature2), np.max(feature2)])
plt.show()
| bsd-3-clause |
mzwiessele/applygpy | applygpy/tests/test_plotting.py | 1 | 4489 | '''
Created on 30 Sep 2015
@author: Max Zwiessele
'''
import matplotlib
from GPy.testing.plotting_tests import flatten_axis as fl, compare_axis_dicts as cm
matplotlib.use('agg')
import matplotlib.pyplot as plt # @UnresolvedImport
import GPy, numpy as np
from applygpy.prediction import PredictionModelSparse, PredictionModel
from io import StringIO
import unittest
class Test(unittest.TestCase):
def setUp(self):
self.X, self.Y = np.random.normal(0, 1, (10, 1)), np.random.normal(0, 1, (10, 1))
pass
def tearDown(self):
plt.close('all')
def testPlotting(self):
m = GPy.models.GPRegression(self.X, self.Y)
p = PredictionModel(m)
fig, ax1 = plt.subplots()
m.plot(plot_training_data=False, ax=ax1)
ax1.set_ylim(0, 1)
ax1.set_xlim(-2, 2)
#i1 = StringIO()
#fig.savefig(i1, format='svg')
#i1.seek(0)
fig, ax2 = plt.subplots()
p.plot(plot_training_data=False, ax=ax2)
ax2.set_ylim(0, 1)
ax2.set_xlim(-2, 2)
#i2 = StringIO()
#fig.savefig(i2, format='svg')
#i2.seek(0)
#self.assertEqual(i1.read(), i2.read())
cm(fl(ax1), fl(ax2))
def testPlottingSparse(self):
m = GPy.models.SparseGPRegression(self.X, self.Y)
p = PredictionModelSparse(m)
fig, ax1 = plt.subplots()
m.plot(plot_training_data=False, ax=ax1)
ax1.set_ylim(0, 1)
ax1.set_xlim(-2, 2)
#i1 = StringIO()
#fig.savefig(i1, format='svg')
#i1.seek(0)
fig, ax2 = plt.subplots()
p.plot(plot_training_data=False, ax=ax2)
ax2.set_ylim(0, 1)
ax2.set_xlim(-2, 2)
#i2 = StringIO()
#fig.savefig(i2, format='svg')
#i2.seek(0)
#self.assertEqual(i1.read(), i2.read())
cm(fl(ax1), fl(ax2))
def testPlottingClass(self):
m = GPy.models.GPClassification(self.X, self.Y<0)
p = PredictionModel(m)
fig, ax1 = plt.subplots()
m.plot(plot_training_data=False, ax=ax1)
ax1.set_ylim(0, 1)
ax1.set_xlim(-2, 2)
#i1 = StringIO()
#fig.savefig(i1, format='svg')
#i1.seek(0)
fig, ax2 = plt.subplots()
p.plot(plot_training_data=False, ax=ax2)
ax2.set_ylim(0, 1)
ax2.set_xlim(-2, 2)
#i2 = StringIO()
#fig.savefig(i2, format='svg')
#i2.seek(0)
#self.assertEqual(i1.read(), i2.read())
cm(fl(ax1), fl(ax2))
def testPlottingSparseClass(self):
m = GPy.models.SparseGPClassification(self.X, self.Y<0)
p = PredictionModelSparse(m)
fig, ax1 = plt.subplots()
m.plot(plot_training_data=False, ax=ax1)
ax1.set_ylim(0, 1)
ax1.set_xlim(-2, 2)
#i1 = StringIO()
#fig.savefig(i1, format='svg')
#i1.seek(0)
fig, ax2 = plt.subplots()
p.plot(plot_training_data=False, ax=ax2)
ax2.set_ylim(0, 1)
ax2.set_xlim(-2, 2)
#i2 = StringIO()
#fig.savefig(i2, format='svg')
#i2.seek(0)
#self.assertEqual(i1.read(), i2.read())
cm(fl(ax1), fl(ax2))
def testPlottingDataNotShow(self):
m = GPy.models.SparseGPRegression(self.X, self.Y)
p = PredictionModelSparse(m)
p.plot_data()
fig, ax1 = plt.subplots()
p.plot(plot_training_data=False, ax=ax1)
ax1.set_ylim(0, 1)
ax1.set_xlim(-2, 2)
#i1 = StringIO()
#fig.savefig(i1, format='svg')
#i1.seek(0)
fig, ax2 = plt.subplots()
p.plot(plot_training_data=True, ax=ax2)
ax2.set_ylim(0, 1)
ax2.set_xlim(-2, 2)
#i2 = StringIO()
#fig.savefig(i2, format='svg')
#i2.seek(0)
cm(fl(ax1), fl(ax2))
m = GPy.models.GPRegression(self.X, self.Y)
p = PredictionModel(m)
p.plot_data()
fig, ax1 = plt.subplots()
p.plot(plot_training_data=False, ax=ax1)
ax1.set_ylim(0, 1)
ax1.set_xlim(-2, 2)
#i1 = StringIO()
#fig.savefig(i1, format='svg')
#i1.seek(0)
fig, ax2 = plt.subplots()
p.plot(plot_training_data=True, ax=ax2)
ax2.set_ylim(0, 1)
ax2.set_xlim(-2, 2)
#i2 = StringIO()
#fig.savefig(i2, format='svg')
#i2.seek(0)
cm(fl(ax1), fl(ax2))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testPlotting']
unittest.main()
| bsd-3-clause |
rogerblandford/Music | Scripts/_PlotMostProbVals.py | 2 | 3033 | #Get the noisless reconstruction
execfile ('_MostProbValsBias.py')
#--------------------------------------------------
# Now plot the most probable values
import matplotlib.pyplot as plt
from scipy.stats import norm
from scipy.stats import multivariate_normal
num=10
postn=np.zeros(numreal)
for i in range(numreal):
postn[i] = beatbox.You.all_reconstructed_universes[i].fn[num]
n, bins, patches = plt.hist(postn, 20, normed=1, facecolor='green', alpha=0.75)
plt.xlabel('$f_n$ values for $n=$'+str(num))
plt.ylabel('Probability')
plt.title(r'$\mathrm{Histogram\ of\ most\ probable\ reconstructed\ values:}$')
plt.axis([-0.1, 0.11, 0, 1])
plt.grid(True)
plt.axvline(beatbox.You.all_simulated_universes[-1].fn[num])
plt.axvline(beatbox.You.all_reconstructed_universes[-1].fn[num], color='y')
#N = (2.*pi)**(len(beatbox.You.all_simulated_universes[-1-i].fn)/2.) / (np.linalg.det(beatbox.You.A)**0.5) * np.exp(0.5*np.dot(beatbox.You.all_reconstructed_universes[0].fn.T, np.dot(beatbox.You.inv_A, beatbox.You.all_reconstructed_universes[0].fn)))
#t = np.linspace(-3.,3.,6./200.)
t = np.linspace(-4.5,2,1000)
#plt.plot(t, norm.pdf(t, loc=beatbox.You.all_reconstructed_universes[-1].fn[num], scale=1./np.sqrt(beatbox.You.A[num, num])), 'k-', lw=1, label='frozen pdf')
#plt.plot(t, np.linalg.det(beatbox.You.A)**0.5/(2.*np.pi)**(0.5*len(beatbox.You.all_reconstructed_universes[0].fn)) *np.exp(-0.5*(beatbox.You.all_reconstructed_universes[0].fn[num]-t)**2. * beatbox.You.A[num, num]), 'k-', lw=2, label='frozen pdf')
#xj=np.append(beatbox.You.all_reconstructed_universes[0].fn[:num], beatbox.You.all_reconstructed_universes[0].fn[num+1:])
#Ani=np.append(beatbox.You.A[num, :num], beatbox.You.A[num, 1+num:])
#Aij_1112 = np.append(beatbox.You.A[:num, :num], beatbox.You.A[:num, 1+num:], axis=1)
#Aij_2122 = np.append(beatbox.You.A[1+num:, :num], beatbox.You.A[1+num:, 1+num:], axis=1)
#Aij = np.append(Aij_1112, Aij_2122, axis=0)
#first = (t - 1./beatbox.You.A[num, num]*(beatbox.You.A[num, num]*beatbox.You.all_reconstructed_universes[0].fn[num]-np.dot(Ani,xj)))**2. * beatbox.You.A[num, num]
#second = beatbox.You.all_reconstructed_universes[0].fn[num]**2 * beatbox.You.A[num, num]
#third = (np.dot(Ani, xj))**2 * 1./beatbox.You.A[num, num]
#fourth = np.dot(xj.T , np.dot(Aij,xj))
#plt.plot(t, np.linalg.det(beatbox.You.A)**0.5/(2.*np.pi)**(0.5*len(beatbox.You.all_reconstructed_universes[0].fn)) *np.exp(-0.5 * (first-second+third+fourth )), 'r-', lw=1)
plt.plot(t, norm.pdf(t, loc=beatbox.You.all_reconstructed_universes[-1].fn[num], scale=np.sqrt(beatbox.You.inv_A[num, num])), 'r-', lw=1, label='frozen pdf')
path_to_save='RobustnessAnalysis/rob_plt_lmax'+str(beatbox.Universe.truncated_lmax)+'_lmin'+str(beatbox.Universe.truncated_lmin)+'_nmax'+str(beatbox.Universe.truncated_nmax)+'_nmin'+str(beatbox.Universe.truncated_nmin)
try:
os.makedirs(path_to_save)
except OSError:
if not os.path.isdir(path_to_save):
raise
plt.savefig(path_to_save +'/mostprobvalues_f11.png')
plt.show() | mit |
BSchilperoort/BR-DTS-Processing | data_processing/customFunctions.py | 1 | 3861 | def index_closest_value(search_list,search_value):
import numpy as np
search_list = np.array(search_list)
idx = (np.abs(search_list-search_value)).argmin()
return idx
def runningMean(x, N):
import numpy as np
return np.convolve(x, np.ones((N,))/N, mode='valid')
def plotVariable(xtime,y,xlabel='x-axis',ylabel='y-axis'):
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
fig, ax = plt.subplots(1)
Plot = ax.plot(xtime,y)
ax.xaxis_date()
dateFormat = mdates.DateFormatter('%d/%m %H:%M:%S')
ax.xaxis.set_major_formatter(dateFormat)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.autofmt_xdate()
def plotMultiple(x_list, y_list, xlabel='x-axis', ylabel='y-axis', x_time=True):
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
if len(x_list) != len(y_list):
print('Not an equal amount of x and y plots')
return
colors=['r','g','b']*5
plotAmount = len(x_list)
fig, ax = plt.subplots(1)
plots = [0]*plotAmount
for i in range(0,plotAmount):
plots[i] = ax.plot(x_list[i], y_list[i], color=colors[i])
if x_time:
ax.xaxis_date()
dateFormat = mdates.DateFormatter('%d/%m %H:%M:%S')
ax.xaxis.set_major_formatter(dateFormat)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.autofmt_xdate()
def timeAveraged2d(avg_list, numAvg):
import numpy as np
#Average an array over time (x axis)
newlength = len(avg_list)//numAvg
new_list = np.empty((newlength,np.shape(avg_list)[1]))
for i in range(0,newlength):
newSum = 0
for j in range(0,numAvg):
newSum += avg_list[i*numAvg+j]
new_list[i][:] = newSum/numAvg
return new_list
def timeAverageMiddle(avg_list, numAvg):
#Average a list in steps
newlength = len(avg_list)//numAvg
new_list = [0]*newlength
for i in range(0,newlength):
new_list[i] = avg_list[i*numAvg+numAvg//2]
return new_list
def wetTemperature_opt(Twet_est, Tdry, RH):
#Tdry, RH = args
from math import exp
Es_Tw = 0.61*exp(19.9*Twet_est/(Twet_est+273))
Es_Td = 0.61*exp(19.9*Tdry/(Tdry+273))
Ea = Es_Tw - 0.066*(Tdry-Twet_est)
RHest = Ea/Es_Td*100
return abs(RHest-RH)
#Function to add ticks
def addticks(ax,newLocs,newLabels,pos='x'):
import matplotlib.pyplot as plt
# Draw to get ticks
plt.draw()
# Get existing ticks
if pos=='x':
locs = ax.get_xticks().tolist()
labels=[x.get_text() for x in ax.get_xticklabels()]
elif pos =='y':
locs = ax.get_yticks().tolist()
labels=[x.get_text() for x in ax.get_yticklabels()]
else:
print("WRONG pos. Use 'x' or 'y'")
return
# Build dictionary of ticks
Dticks=dict(zip(locs,labels))
# Add/Replace new ticks
for Loc,Lab in zip(newLocs,newLabels):
Dticks[Loc]=Lab
# Get back tick lists
locs=list(Dticks.keys())
labels=list(Dticks.values())
# Generate new ticks
if pos=='x':
ax.set_xticks(locs)
ax.set_xticklabels(labels)
elif pos =='y':
ax.set_yticks(locs)
ax.set_yticklabels(labels)
def solarAngleCorrection(mtime):
from pysolar import solar
import pytz
from datetime import datetime
from datetime import timedelta
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
lat, long = 52.2398473,5.6908362
time = mtime - 2/24
date = mdates.num2date(time)
altitude = solar.get_altitude(lat, long, date, elevation = 90)
#correctionFactor = np.cos(altitude*np.pi/180)
correctionFactor = 1/np.tan(altitude*np.pi/180)
return correctionFactor
| mit |
martinshelton/news_pgp_adoption | dateplot.py | 1 | 4328 | # Credit to Randal Olson for the starter code:
# http://www.randalolson.com/2014/06/28/how-to-make-beautiful-data-visualizations-in-python-with-matplotlib/
# I've augmented this script to fit my PGP keyserver data.
import matplotlib.pyplot as plt
import pandas as pd
# Read the data into a pandas DataFrame.
data = pd.read_csv("newsdata.csv")
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# Set the figure size.
plt.figure(figsize=(12, 15))
# Remove the plot frame lines.
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
# Only get tick marks on the left and bottom sides of the graph.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
plt.ylim(0, 202)
plt.xlim(0, 256)
# Adjust font size and spacing of axis ticks.
plt.yticks(range(0, 210, 10), [str(x) for x in range(0, 210, 10)], fontsize=14)
plt.xticks(range(0, 256, 23), [str(x) for x in range(1994, 2018, 2)], fontsize=14)
# Add tick lines for the y-axis.
for y in range(20, 220, 20):
plt.plot(range(0, 256), [y] * len(range(0, 256)), "--", lw=0.5, color="black", alpha=0.4)
# Remove tick marks.
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# I plotted the news orgs in order of the highest % in the final year.
news_orgs = ['CNN', 'New York Times', 'ESPN', 'Huffington Post',
'Fox News', 'Washington Post', 'BuzzFeed',
'USA Today', 'Forbes', 'CNET']
# Plot each line separately with its own color, using tableau20 colors.
for rank, column in enumerate(news_orgs):
plt.plot(data.Months.values,
data[column.replace("\n", " ")].values,
lw=2, color=tableau20[rank])
# We assign labels to each line, and offset their positions so they don't overlap.
y_pos = data[column.replace("\n", " ")].values[-1] - 1
if column == "CNN":
y_pos -= 0.25
elif column == "New York Times":
y_pos += 0.25
elif column == "ESPN":
y_pos += 0.75
elif column == "Huffington Post":
y_pos -= 1.75
elif column == "Fox News":
y_pos += 0.5
elif column == "Washington Post":
y_pos += 1.75
elif column == "BuzzFeed":
y_pos += 0.25
elif column == "USA Today":
y_pos += 0.75
elif column == "Forbes":
y_pos += 0
elif column == "CNET":
y_pos += 0
# Again, make sure that all labels are large enough to be easily read
# by the viewer.
plt.text(257, y_pos, column, fontsize=14, color=tableau20[rank])
# Plot the title.
plt.text(142, 206, "Number of public keys for emails tied to news orgs"
", by organization (12/1994 - 03/2016)", fontsize=17, ha="center")
# Plot some descriptive information and credits at the bottom of the graph.
plt.text(10, -18, "Data source: pgp.mit.edu. Rankings: alexa.com/topsites"
"\nAuthor: Martin Shelton (mshelt.onl / @mshelton)"
"\nNote: credit for matplotlib and design genius goes to Randy Olson"
"\nhttp://www.randalolson.com/2014/06/28/how-to-make-beautiful-data-visualizations-in-python-with-matplotlib/",
fontsize=10)
# Save the picture as a PNG. It can also be saved as PDF, JPEG, etc. by changing the file extension.
plt.savefig("pgp_adoption.png", bbox_inches="tight")
| gpl-3.0 |
haudren/scipy | scipy/special/c_misc/struve_convergence.py | 76 | 3725 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
try:
import mpmath
except:
from sympy import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import \
_struve_power_series, _struve_asymp_large_z, _struve_bessel_series
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
import os
import sys
if '--main' in sys.argv:
main()
else:
import subprocess
subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'),
'-g', '--python', __file__, '--main'])
| bsd-3-clause |
robottwo/pyjstat | pyjstat/pyjstat.py | 1 | 30046 | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import requests
import logging
import inspect
import warnings
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
basestring
except NameError:
basestring = str
class NumpyEncoder(json.JSONEncoder):
"""Custom JSON encoder class for Numpy data types.
"""
def default(self, obj):
if isinstance(obj, np.integer) or isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
def to_str(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
int(variable)
return str(variable)
except ValueError:
return variable
def check_version_2(dataset):
"""Checks if json-stat version attribute exists and is equal or greater \
than 2.0 for a given dataset.
Args:
dataset (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
Returns:
bool: True if version exists and is equal or greater than 2.0, \
False otherwise. For datasets without the version attribute, \
always return False.
"""
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False
def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list)
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dimensions(js_dict, naming):
"""Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names.
"""
dimensions = []
dim_names = []
if check_version_2(js_dict):
dimension_dict = js_dict
else:
dimension_dict = js_dict['dimension']
for dim in dimension_dict['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names
def get_dim_label(js_dict, dim, input="dataset"):
"""Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data.
"""
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
if pd.__version__ <= '0.16.2':
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
else:
dim_label = pd.merge(dim_label, dim_index, on='id').sort_values(by='index')
return dim_label
def get_dim_index(js_dict, dim):
"""Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
"""
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
if pd.__version__ < '0.17':
dim_index = dim_index.sort_index(by='index')
else:
dim_index = dim_index.sort_values(by='index')
return dim_index
def get_values(js_dict, value='value'):
"""Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values.
"""
values = js_dict[value]
if type(values) is list:
if type(values[0]) is not dict or tuple:
return values
# being not a list of dicts or tuples leaves us with a dict...
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array((js_dict['size'])))
else:
max_val = np.prod(np.array((js_dict['dimension']['size'])))
vals = max_val * [None]
for (key, value) in values.items():
vals[key] = value
values = vals
return values
def get_df_row(dimensions, naming='label', i=0, record=None):
"""Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column
"""
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop()
def uniquify(seq):
"""Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def generate_df(js_dict, naming, value="value"):
"""Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
"""
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
def from_json_stat(datasets, naming='label', value='value'):
"""Decode JSON-stat formatted data into pandas.DataFrame object.
Args:
datasets(OrderedDict, list): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(), for example.\
Both List and OrderedDict are accepted \
as inputs.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id'.Defaults to 'label'.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
results(list): list of pandas.DataFrame with imported data.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use read() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
check_input(naming)
results = []
if type(datasets) is list:
for idx, element in enumerate(datasets):
for dataset in element:
js_dict = datasets[idx][dataset]
results.append(generate_df(js_dict, naming, value))
elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \
isinstance(datasets, Dataset):
if 'class' in datasets:
if datasets['class'] == 'dataset':
js_dict = datasets
results.append(generate_df(js_dict, naming, value))
else: # 1.00 bundle type
for dataset in datasets:
js_dict = datasets[dataset]
results.append(generate_df(js_dict, naming, value))
return results
def to_json_stat(input_df, value='value', output='list', version='1.3'):
"""Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value (string, optional): name of the value column. Defaults to 'value'.
output(string): accepts two values: 'list' or 'dict'. Produce list of\
dicts or dict of dicts as output.
version(string): desired json-stat version. 2.0 is preferred now.\
Apart from this, only older 1.3 format is accepted,
which is the default parameter in order to preserve
backwards compatibility.
Returns:
output(string): String with JSON-stat object.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use write() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
data = []
if output == 'list':
result = []
elif output == 'dict':
result = OrderedDict({})
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
for row, dataframe in enumerate(data):
dims = data[row].filter([item for item in data[row].columns.values
if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{to_int(i):
{"label": to_str(i),
"category":
{"index":
OrderedDict([(to_str(j), to_int(k))
for k, j in enumerate(
uniquify(dims[i]))]),
"label":
OrderedDict([(to_str(j), to_str(j))
for k, j in enumerate(
uniquify(dims[i]))])}}}
for i in dims.columns.values]
if float(version) >= 2.0:
dataset = {"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}
dataset["version"] = version
dataset["class"] = "dataset"
for category in categories:
dataset["dimension"].update(category)
dataset.update({"id": dim_names})
dataset.update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dimension"].update(category)
else:
dataset = {"dataset" +
str(row + 1):
{"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}}
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
dataset["dataset" + str(row + 1)][
"dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)][
"dimension"].update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
if output == 'list':
result.append(dataset)
elif output == 'dict':
result.update(dataset)
else:
result = None
return json.dumps(result, cls=NumpyEncoder)
def request(path):
"""Send a request to a given URL accepting JSON format and return a \
deserialized Python object.
Args:
path (str): The URI to be requested.
Returns:
response: Deserialized JSON Python object.
Raises:
HTTPError: the HTTP error returned by the requested server.
InvalidURL: an invalid URL has been requested.
Exception: generic exception.
"""
headers = {'Accept': 'application/json'}
try:
requested_object = requests.get(path, headers=headers)
requested_object.raise_for_status()
except requests.exceptions.HTTPError as exception:
LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +
str(exception.response.status_code) + ' ' +
str(exception.response.reason) + ' ' + str(path))
raise
except requests.exceptions.InvalidURL as exception:
LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))
raise
except Exception:
import traceback
LOGGER.error('Generic exception: ' + traceback.format_exc())
raise
else:
response = requested_object.json()
return response
class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
| apache-2.0 |
Monika319/EWEF-1 | Cw2Rezonans/Karolina/Oscyloskop/OscyloskopZ5W1.py | 1 | 1298 | # -*- coding: utf-8 -*-
"""
Plot oscilloscope files from MultiSim
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
from matplotlib import rc
rc('font',family="Consolas")
files=["real_zad5_1f.txt"]
for NazwaPliku in files:
print NazwaPliku
Plik=open(NazwaPliku)
#print DeltaT
Dane=Plik.readlines()#[4:]
DeltaT=float(Dane[2].split()[3].replace(",","."))
#M=len(Dane[4].split())/2
M=2
Dane=Dane[5:]
Plik.close()
print M
Ys=[np.zeros(len(Dane)) for i in range(M)]
for m in range(M):
for i in range(len(Dane)):
try:
Ys[m][i]=float(Dane[i].split()[2+3*m].replace(",","."))
except:
print m, i, 2+3*m, len(Dane[i].split()), Dane[i].split()
#print i, Y[i]
X=np.zeros_like(Ys[0])
for i in range(len(X)):
X[i]=i*DeltaT
for y in Ys:
print max(y)-min(y)
Opis=u"Układ szeregowy\nCzęstotliwośc rezonansowa"
Nazwa=u"Z5W1"
plt.title(u"Przebieg napięciowy\n"+Opis)
plt.xlabel(u"Czas t [s]")
plt.ylabel(u"Napięcie [V]")
plt.plot(X,Ys[0],label=u"Wejście")
plt.plot(X,Ys[1],label=u"Wyjście")
plt.grid()
plt.legend(loc="best")
plt.savefig(Nazwa + ".png", bbox_inches='tight')
plt.show()
| gpl-2.0 |
mrcslws/nupic.research | projects/sdr_math/plot_numerical_results.py | 3 | 5606 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This uses plotly to create a nice looking graph of average false positive
# error rates as a function of N, the dimensionality of the vectors. I'm sorry
# this code is so ugly.
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt # noqa E402 I001
# Observed vs theoretical error values
# a=64 cells active, s=24 synapses on segment, dendritic threshold is theta=12
experimental_errors_a64 = [1.09318E-03, 5.74000E-06, 1.10000E-07]
theoretical_errors_a64 = [0.00109461662333690, 5.69571108769533e-6,
1.41253230930730e-7]
# a=128 cells active, s=24 synapses on segment, dendritic threshold is theta=12
experimental_errors_a128 = [0.292048, 0.00737836, 0.00032014, 0.00002585,
0.00000295, 0.00000059, 0.00000013, 0.00000001,
0.00000001]
theoretical_errors_a128 = [0.292078213737764, 0.00736788303358289,
0.000320106080889471, 2.50255519815378e-5,
2.99642102590114e-6,
4.89399786076359e-7, 1.00958512780931e-7,
2.49639031779358e-8,
7.13143762262004e-9]
# a=256 cells active, s=24 synapses on segment, dendritic threshold is theta=12
experimental_errors_a256 = [
9.97368E-01, 6.29267E-01, 1.21048E-01, 1.93688E-02, 3.50879E-03,
7.49560E-04,
1.86590E-04, 5.33200E-05, 1.65000E-05, 5.58000E-06, 2.23000E-06,
9.30000E-07,
3.20000E-07, 2.70000E-07, 7.00000E-08, 4.00000E-08, 2.00000E-08
]
# a=n/2 cells active, s=24 synapses on segment, dendritic threshold is theta=12
errors_dense = [0.584014929308308, 0.582594747080399, 0.582007206016863,
0.581686021979051, 0.581483533877904, 0.581344204898149,
0.581242471033283,
0.581164924569868, 0.581103856001899, 0.581054517612207,
0.581013825794851,
0.580979690688467, 0.580950645707841, 0.580925631309445,
0.580903862938630,
0.580884747253428, 0.580867827216677]
theoretical_errors_a256 = [0.999997973443107, 0.629372754740777,
0.121087724790945, 0.0193597645959856,
0.00350549721741729,
0.000748965962032781, 0.000186510373919969,
5.30069204544174e-5,
1.68542688790000e-5, 5.89560747849969e-6,
2.23767020178735e-6,
9.11225564771580e-7, 3.94475072403605e-7,
1.80169987461924e-7,
8.62734957588259e-8, 4.30835081022293e-8,
2.23380881095835e-8]
list_of_n_values = [300, 500, 700, 900, 1100, 1300, 1500, 1700, 1900, 2100,
2300,
2500, 2700, 2900, 3100, 3300, 3500]
fig, ax = plt.subplots()
fig.suptitle("Match probability for sparse binary vectors")
ax.set_xlabel("Dimensionality (n)")
ax.set_ylabel("Frequency of matches")
ax.set_yscale("log")
ax.scatter(list_of_n_values[0:3], experimental_errors_a64,
label="a=64 (predicted)",
marker="o", color="black")
ax.scatter(list_of_n_values[0:9], experimental_errors_a128,
label="a=128 (predicted)", marker="o", color="black")
ax.scatter(list_of_n_values, experimental_errors_a256,
label="a=256 (predicted)",
marker="o", color="black")
ax.plot(list_of_n_values, errors_dense, "k:", label="a=n/2 (predicted)",
color="black")
ax.plot(list_of_n_values[0:3], theoretical_errors_a64, "k:",
label="a=64 (observed)")
ax.plot(list_of_n_values[0:9], theoretical_errors_a128, "k:",
label="a=128 (observed)", color="black")
ax.plot(list_of_n_values, theoretical_errors_a256, "k:",
label="a=256 (observed)")
ax.annotate(r"$a = 64$", xy=(list_of_n_values[2], theoretical_errors_a64[-1]),
xytext=(-5, 2), textcoords="offset points", ha="right",
color="black")
ax.annotate(r"$a = 128$", xy=(list_of_n_values[8], theoretical_errors_a64[-1]),
ha="center", color="black")
ax.annotate(r"$a = 256$", xy=(list_of_n_values[-1], theoretical_errors_a64[-1]),
xytext=(-10, 0), textcoords="offset points", ha="center",
color="black")
ax.annotate(r"$a = \frac{n}{2}$",
xy=(list_of_n_values[-2], experimental_errors_a256[2]),
xytext=(-10, 0), textcoords="offset points", ha="center",
color="black")
plt.minorticks_off()
plt.grid(True, alpha=0.3)
plt.savefig("images/effect_of_n.pdf")
plt.close()
| agpl-3.0 |
4bic-attic/data_viz | highs_lows.py | 1 | 1170 | import csv
from datetime import datetime
from matplotlib import pyplot as plt
#Get dates and high and low temps from files
# filename = 'sitka_weather_2014.csv'
#use death valley for ERROR CHECKING purposes
filename = 'death_valley_2014.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
dates, highs, lows = [], [], []
for row in reader:
try:
current_date = datetime.strptime(row[0], "%Y-%m-%d")
high = int(row[1])
low = int(row[3])
except ValueError:
print(current_date, 'missing data')
else:
dates.append(current_date)
highs.append(high)
lows.append(low)
#plot Data
fig = plt.figure(dpi=128, figsize=(10,6))
plt.plot(dates, highs, c='red')
plt.plot(dates, lows, c='blue')
plt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)
#format Plot
title = "Daily High and Low Temperatures, 2014\nDeath Valley, CA"
plt.title(title, fontsize=18)
plt.xlabel('', fontsize=16)
fig.autofmt_xdate()
plt.ylabel("Temperature (F)", fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show()
| mit |
aje/POT | examples/plot_OT_L1_vs_L2.py | 3 | 5000 | # -*- coding: utf-8 -*-
"""
==========================================
2D Optimal transport for different metrics
==========================================
2D OT on empirical distributio with different gound metric.
Stole the figure idea from Fig. 1 and 2 in
https://arxiv.org/pdf/1706.07650.pdf
"""
# Author: Remi Flamary <[email protected]>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
##############################################################################
# Dataset 1 : uniform sampling
# ----------------------------
n = 20 # nb samples
xs = np.zeros((n, 2))
xs[:, 0] = np.arange(n) + 1
xs[:, 1] = (np.arange(n) + 1) * -0.001 # to make it strictly convex...
xt = np.zeros((n, 2))
xt[:, 1] = np.arange(n) + 1
a, b = ot.unif(n), ot.unif(n) # uniform distribution on samples
# loss matrix
M1 = ot.dist(xs, xt, metric='euclidean')
M1 /= M1.max()
# loss matrix
M2 = ot.dist(xs, xt, metric='sqeuclidean')
M2 /= M2.max()
# loss matrix
Mp = np.sqrt(ot.dist(xs, xt, metric='euclidean'))
Mp /= Mp.max()
# Data
pl.figure(1, figsize=(7, 3))
pl.clf()
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
pl.title('Source and target distributions')
# Cost matrices
pl.figure(2, figsize=(7, 3))
pl.subplot(1, 3, 1)
pl.imshow(M1, interpolation='nearest')
pl.title('Euclidean cost')
pl.subplot(1, 3, 2)
pl.imshow(M2, interpolation='nearest')
pl.title('Squared Euclidean cost')
pl.subplot(1, 3, 3)
pl.imshow(Mp, interpolation='nearest')
pl.title('Sqrt Euclidean cost')
pl.tight_layout()
##############################################################################
# Dataset 1 : Plot OT Matrices
# ----------------------------
#%% EMD
G1 = ot.emd(a, b, M1)
G2 = ot.emd(a, b, M2)
Gp = ot.emd(a, b, Mp)
# OT matrices
pl.figure(3, figsize=(7, 3))
pl.subplot(1, 3, 1)
ot.plot.plot2D_samples_mat(xs, xt, G1, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT Euclidean')
pl.subplot(1, 3, 2)
ot.plot.plot2D_samples_mat(xs, xt, G2, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT squared Euclidean')
pl.subplot(1, 3, 3)
ot.plot.plot2D_samples_mat(xs, xt, Gp, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT sqrt Euclidean')
pl.tight_layout()
pl.show()
##############################################################################
# Dataset 2 : Partial circle
# --------------------------
n = 50 # nb samples
xtot = np.zeros((n + 1, 2))
xtot[:, 0] = np.cos(
(np.arange(n + 1) + 1.0) * 0.9 / (n + 2) * 2 * np.pi)
xtot[:, 1] = np.sin(
(np.arange(n + 1) + 1.0) * 0.9 / (n + 2) * 2 * np.pi)
xs = xtot[:n, :]
xt = xtot[1:, :]
a, b = ot.unif(n), ot.unif(n) # uniform distribution on samples
# loss matrix
M1 = ot.dist(xs, xt, metric='euclidean')
M1 /= M1.max()
# loss matrix
M2 = ot.dist(xs, xt, metric='sqeuclidean')
M2 /= M2.max()
# loss matrix
Mp = np.sqrt(ot.dist(xs, xt, metric='euclidean'))
Mp /= Mp.max()
# Data
pl.figure(4, figsize=(7, 3))
pl.clf()
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
pl.title('Source and traget distributions')
# Cost matrices
pl.figure(5, figsize=(7, 3))
pl.subplot(1, 3, 1)
pl.imshow(M1, interpolation='nearest')
pl.title('Euclidean cost')
pl.subplot(1, 3, 2)
pl.imshow(M2, interpolation='nearest')
pl.title('Squared Euclidean cost')
pl.subplot(1, 3, 3)
pl.imshow(Mp, interpolation='nearest')
pl.title('Sqrt Euclidean cost')
pl.tight_layout()
##############################################################################
# Dataset 2 : Plot OT Matrices
# -----------------------------
#%% EMD
G1 = ot.emd(a, b, M1)
G2 = ot.emd(a, b, M2)
Gp = ot.emd(a, b, Mp)
# OT matrices
pl.figure(6, figsize=(7, 3))
pl.subplot(1, 3, 1)
ot.plot.plot2D_samples_mat(xs, xt, G1, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT Euclidean')
pl.subplot(1, 3, 2)
ot.plot.plot2D_samples_mat(xs, xt, G2, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT squared Euclidean')
pl.subplot(1, 3, 3)
ot.plot.plot2D_samples_mat(xs, xt, Gp, c=[.5, .5, 1])
pl.plot(xs[:, 0], xs[:, 1], '+b', label='Source samples')
pl.plot(xt[:, 0], xt[:, 1], 'xr', label='Target samples')
pl.axis('equal')
# pl.legend(loc=0)
pl.title('OT sqrt Euclidean')
pl.tight_layout()
pl.show()
| mit |
AudreyFrancisco/AliPhysics | PWGPP/FieldParam/fitsol.py | 39 | 8343 | #!/usr/bin/env python
debug = True # enable trace
def trace(x):
global debug
if debug: print(x)
trace("loading...")
from itertools import combinations, combinations_with_replacement
from glob import glob
from math import *
import operator
from os.path import basename
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
import sklearn.feature_selection
import datetime
def prec_from_pathname(path):
if '2k' in path: return 0.002
elif '5k' in path: return 0.005
else: raise AssertionError('Unknown field strengh: %s' % path)
# ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...]
def combinatrial_vars(vars_str='xyz', length=3):
term_list = []
for l in range(length):
term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)])
return term_list
# product :: a#* => [a] -> a
def product(xs):
return reduce(operator.mul, xs, 1) # foldl in Haskell
# (XYZ, "xx") -> XX
def term(dataframe, vars_str):
return product(map(lambda x: dataframe[x], list(vars_str)))
# (f(X), Y) -> (max deviation, max%, avg dev, avg%)
def deviation_stat(fX, Y, prec=0.005):
dev = np.abs(fX - Y)
(max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0))
(max_pct, avg_pct) = (max_dev / prec * 100, avg_dev / prec * 100)
return (max_dev, max_pct, avg_dev, avg_pct)
# IO Df
def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]):
sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz']
df = pd.read_csv(path, sep=' ', names=sample_cols)
if cylindrical_axis:
df['r'] = np.sqrt(df.x**2 + df.y**2)
df['p'] = np.arctan2(df.y, df.x)
df['Bt'] = np.sqrt(df.Bx**2 + df.By**2)
df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x)
df['Br'] = df.Bt * np.cos(df.Bpsi)
df['Bp'] = df.Bt * np.sin(df.Bpsi)
if absolute_axis:
df['X'] = np.abs(df.x)
df['Y'] = np.abs(df.y)
df['Z'] = np.abs(df.z)
for var in genvars:
df[var] = term(df, var)
return df
def choose(vars, df1, df2):
X1 = df1.loc[:, vars].as_matrix()
X2 = df2.loc[:, vars].as_matrix()
return (X1, X2)
# IO ()
def run_analysis_for_all_fields():
sample_set = glob("dat_z22/*2k*.sample.dat")
test_set = glob("dat_z22/*2k*.test.dat")
#print(sample_set, test_set)
assert(len(sample_set) == len(test_set) and len(sample_set) > 0)
result = pd.DataFrame()
for i, sample_file in enumerate(sample_set):
trace("run_analysis('%s', '%s')" % (sample_file, test_set[i]))
df = run_analysis(sample_file, test_set[i])
result = result.append(df, ignore_index=True)
write_header(result)
def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat',
test_file = 'dat_z22/tpc2k-z0-q2.test.dat'):
global precision, df, test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result
precision = prec_from_pathname(sample_file)
assert(precision == prec_from_pathname(test_file))
xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto 3 dims
trace("reading training samples... " + sample_file)
df = load_samples(sample_file, genvars=xvars_full)
trace("reading test samples..." + test_file)
test = load_samples(test_file, genvars=xvars_full)
trace("linear regression fit...")
lr = sklearn.linear_model.LinearRegression()
#ri = sklearn.linear_model.RidgeCV()
#la = sklearn.linear_model.LassoCV()
fs = sklearn.feature_selection.RFE(lr, 1, verbose=0)
#xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz']
#xvars = ["xx", "yy", "zz", 'x', 'y', 'z', 'xzz', 'yzz']
#xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr']
#xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz']
yvars = ['Bx', 'By', 'Bz']
#yvars = ['Bz']
(Y, Ytest) = choose(yvars, df, test)
#(Y, Ytest) = (df['Bz'], test['Bz'])
xvars = combinatrial_vars('xyz', 3) # use all terms upto 3rd power
(X, Xtest) = choose(xvars, df, test)
for y in yvars:
fs.fit(X, df[y])
res = pd.DataFrame({ "term": xvars, "rank": fs.ranking_ })
trace(y)
trace(res.sort_values(by = "rank"))
#xvars=list(res.sort_values(by="rank")[:26]['term'])
lr.fit(X, Y)
trace(', '.join(yvars) + " = 1 + " + ' + '.join(xvars))
test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#for i in range(len(yvars)):
# arr = [lr.intercept_[i]] + lr.coef_[i]
# arr = [ str(x) for x in arr ]
# print(yvars[i] + " = { " + ', '.join(arr) + " }")
# print("deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
# ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] ))
(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
trace("linear regression R^2 [train data]: %.8f" % sample_score)
trace("linear regression R^2 [test data] : %.8f" % test_score)
return pd.DataFrame(
{ "xvars": [xvars],
"yvars": [yvars],
"max_dev": [test_dev[0]],
"max%": [test_dev[1]],
"avg_dev": [test_dev[2]],
"avg%": [test_dev[3]],
"sample_score": [sample_score],
"score": [test_score],
"coeffs": [lr.coef_],
"intercept": [lr.intercept_],
"sample_file": [sample_file],
"test_file": [test_file],
"precision": [precision],
"volume_id": [volume_id_from_path(sample_file)]
})
def volume_id_from_path(path):
return basename(path)\
.replace('.sample.dat', '')\
.replace('-', '_')
def get_location_by_volume_id(id):
if 'its' in id: r_bin = 0
if 'tpc' in id: r_bin = 1
if 'tof' in id: r_bin = 2
if 'tofext' in id: r_bin = 3
if 'cal' in id: r_bin = 4
z_bin = int(id.split('_')[1][1:]) # "tofext2k_z0_q4" -> 0
if 'q1' in id: quadrant = 0
if 'q2' in id: quadrant = 1
if 'q3' in id: quadrant = 2
if 'q4' in id: quadrant = 3
return r_bin, z_bin, quadrant
def write_header(result):
#result.to_csv("magfield_params.csv")
#result.to_html("magfield_params.html")
print("# This file was generated from sysid.py at " + str(datetime.datetime.today()))
print("# " + ', '.join(result.iloc[0].yvars) + " = 1 + " + ' + '.join(result.iloc[0].xvars))
print("# barrel r: 0 < its < 80 < tpc < 250 < tof < 400 < tofext < 423 < cal < 500")
print("# barrel z: -550 < z < 550")
print("# phi: 0 < q1 < 0.5pi < q2 < pi < q3 < 1.5pi < q4 < 2pi")
print("# header: Rbin Zbin Quadrant Nval_per_compoment(=20)")
print("# data: Nval_per_compoment x floats")
#print("# R^2: coefficient of determination in multiple linear regression. [0,1]")
print("")
for index, row in result.iterrows():
#print("// ** %s - R^2 %s" % (row.volume_id, row.score))
print("#" + row.volume_id)
r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id)
print("%s %s %s 20" % (r_bin, z_bin, quadrant))
for i, yvar in enumerate(row.yvars):
name = row.volume_id #+ '_' + yvar.lower()
print("# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
(row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i]))
coef = [row['intercept'][i]] + list(row['coeffs'][i])
arr = [ "%.5e" % x for x in coef ]
body = ' '.join(arr)
#decl = "const double[] %s = { %s };\n" % (name, body)
#print(decl)
print(body)
print("")
#write_header(run_analysis())
run_analysis_for_all_fields()
#for i in range(10):
# for xvars in combinations(xvars_full, i+1):
#(X, Xtest) = choose(xvars, df, test)
#lr.fit(X, Y)
#ri.fit(X, Y)
#la.fit(X, Y)
#fs.fit(X, Y)
#print xvars
#(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
#print("linear R^2[sample] %.8f" % sample_score)
#print("linear R^2[test] %.8f" % test_score)
#(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest))
#print("lasso R^2[sample] %.8f" % sample_score2)
#print("lasso R^2[test] %.8f" % test_score2)
#print(la.coef_)
#for i in range(len(yvars)):
# print(yvars[i])
# print(pd.DataFrame({"Name": xvars, "Params": lr.coef_[i]}).sort_values(by='Params'))
# print("+ %e" % lr.intercept_[i])
#sample_dev = deviation_stat(lr.predict(X), Y, prec=precision)
#test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision)
#print("[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % sample_dev)
#print("[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev )
#print("lasso [test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev2 )
| bsd-3-clause |
eike-welk/clair | src/clairweb/collect/get_ebay.py | 1 | 40390 | # -*- coding: utf-8 -*-
###############################################################################
# Clair - Project to discover prices on e-commerce sites. #
# #
# Copyright (C) 2013 by Eike Welk #
# [email protected] #
# #
# License: GPL #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
"""
Get listings from Ebay through its API.
"""
import os.path
import math
import json
import logging
from datetime import datetime
from pprint import pformat
import pandas as pd
from ebaysdk.finding import Connection as FConnection
from ebaysdk.shopping import Connection as SConnection
import ebaysdk.exception
import requests.exceptions
from libclair.dataframes import make_data_frame
from libclair.textprocessing import HtmlTool
from econdata.models import Listing
class EbayError(Exception):
pass
def to_str_list(list_or_str):
"""
Convert list of strings to long comma separated string.
A single string is returned unaltered.
"""
if isinstance(list_or_str, str):
return list_or_str
elif isinstance(list_or_str, list):
return ', '.join(list_or_str)
else:
raise TypeError('Expecting list or str.')
class EbayFindingAPIConnector(object):
"""
Abstraction for Ebay's finding API.
Connects to Ebay over the internet and returns listings.
Can search with keywords and can return new listings.
However it can't get all information, especially not the item's description.
Application code should **not** use this class, but rather ``EbayConnector``.
Relevant Ebay documentation here:
http://developer.ebay.com/DevZone/finding/CallRef/index.html
Parameters
-------------
keyfile : str
Name of the configuration file for the ``python-ebay`` library,
that contains the (secret) access keys for the Ebay API.
ebay_site : str
Ebay site (country) where the search is executed.
* Ebay USA: 'EBAY-US'
* Ebay Germany: 'EBAY-DE'
http://developer.ebay.com/Devzone/finding/Concepts/SiteIDToGlobalID.html
ebay_name : str
String that will be put into the ``df['site']`` field of the dataframe.
For example ``'ebay'``.
"""
def __init__(self, keyfile, ebay_site, ebay_name):
assert isinstance(keyfile, (str, type(None)))
assert os.path.isfile(keyfile)
assert isinstance(ebay_site, str)
assert isinstance(ebay_name, str)
self.keyfile = keyfile
self.ebay_site = ebay_site
self.ebay_name = ebay_name
def find_listings(self, keywords, n_listings,
price_min=None, price_max=None, currency="USD",
time_from=None, time_to=None):
"""
Find listings on Ebay by keyword.
Returns only incomplete information: the description is missing.
Calls the Ebay API function 'findItemsAdvanced':
https://developer.ebay.com/devzone/finding/CallRef/findItemsAdvanced.html
Parameters
-----------
keywords : str
Search string in Ebay's searching language. See:
http://pages.ebay.com/help/search/advanced-search.html#using
n_listings : int
Number of listings that Ebay should return. Might return fewer or
slightly more listings.
Currently limited to 100 listings, the maximum number of listings
in one page.
price_min : float
Minimum price for listings, that are returned.
price_max : float
Maximum price for listings, that are returned.
currency : str
Currency unit for ``price_min`` and ``price_max``.
US Dollar: USD
Euro: EUR
https://developer.ebay.com/devzone/finding/CallRef/Enums/currencyIdList.html
time_from : datetime
Earliest end time for listings (auctions) that are returned.
Time is in UTC!
time_to : datetime
Latest end time for listings (auctions) that are returned.
Time is in UTC!
Returns
-------
pandas.DataFrame
Table with one row for each listing. Our Id is the index.
Some columns are empty (especially "description"), because Ebay's
find API call doesn't return this information. These columns
can be filled in with a subsequent call to ``update_listings``.
"""
assert isinstance(keywords, (str))
assert isinstance(n_listings, (int))
assert isinstance(price_min, (float, int, type(None)))
assert isinstance(price_max, (float, int, type(None)))
assert isinstance(currency, (str, type(None)))
assert isinstance(time_from, (datetime, pd.Timestamp, type(None)))
assert isinstance(time_to, (datetime, pd.Timestamp, type(None)))
# Ebay returns a maximum of 100 listings per call (pagination).
# Compute necessary number of calls to Ebay and number of
# listings per call.
max_per_page = 100 # max number of listings per call - Ebay limit
n_pages = math.ceil(n_listings / max_per_page)
n_per_page = round(n_listings / n_pages)
# Call Ebay repeatedly and concatenate results
listings = make_data_frame(Listing, 0)
for i_page in range(1, int(n_pages + 1)):
resp = self._call_find_api(keywords, n_per_page, i_page,
price_min, price_max, currency,
time_from, time_to)
listings_part = self._parse_find_response(resp)
# Stop searching when Ebay returns an empty result.
if len(listings_part) == 0:
break
listings = listings.append(listings_part, ignore_index=True,
verify_integrity=False)
return listings
def _call_find_api(self, keywords, n_per_page, i_page,
price_min=None, price_max=None, currency="USD",
time_from=None, time_to=None):
"""
Perform Ebay API call to find listings on Ebay; by keyword.
Returns only incomplete information: the description is missing.
For documentation on parameters see: ``EbayConnector.find_listings``
* Calls the Ebay API function 'findItemsAdvanced':
https://developer.ebay.com/devzone/finding/CallRef/findItemsAdvanced.html
* Ebay's searching language.
http://pages.ebay.com/help/search/advanced-search.html#using
* Currency unit for ``price_min`` and ``price_max``.
* US Dollar: USD
* Euro: EUR
https://developer.ebay.com/devzone/finding/CallRef/Enums/currencyIdList.html
"""
itemFilters = []
if price_min:
itemFilters += [{'name': 'MinPrice', 'value': price_min,
'paramName': 'Currency', 'paramValue': currency}]
if price_max:
itemFilters += [{'name': 'MaxPrice', 'value': price_max,
'paramName': 'Currency', 'paramValue': currency}]
if time_from:
itemFilters += [{'name': 'EndTimeFrom',
'value': time_from.strftime("%Y-%m-%dT%H:%M:%S.000Z")}]
if time_to:
itemFilters += [{'name': 'EndTimeTo',
'value': time_to.strftime("%Y-%m-%dT%H:%M:%S.000Z")}]
try:
api = FConnection(config_file=self.keyfile, siteid=self.ebay_site)
response = api.execute('findItemsAdvanced',
{'keywords': keywords, 'descriptionSearch': 'true',
'paginationInput': {'entriesPerPage': n_per_page,
'pageNumber': i_page},
'itemFilter': itemFilters,
})
except (ebaysdk.exception.ConnectionError,
requests.exceptions.ConnectionError) as err:
err_text = 'Finding items on Ebay failed! Error: ' + str(err)
logging.error(err_text)
logging.debug(err.response.dict())
raise EbayError(err_text)
# #TODO: react on the following status information
# # Returns the HTTP response code.
# response_code()
# # Returns the HTTP response status
# response_status()
# # Returns an array of eBay response codes
# response_codes()
resp_dict = response.dict()
# Act on resonse status
if resp_dict['ack'] == 'Success':
logging.debug('Successfully called Ebay finding API.')
elif resp_dict['ack'] in ['Warning', 'PartialFailure']:
logging.warning('Ebay finding API returned warning.')
logging.debug(pformat(resp_dict))
else:
logging.error('Ebay finding API returned error.')
logging.debug(pformat(resp_dict))
raise EbayError('Ebay finding API returned error.')
return resp_dict
def _parse_find_response(self, resp_dict):
"""
Parse response from call to Ebay's finding API.
See:
https://developer.ebay.com/devzone/finding/CallRef/findItemsAdvanced.html#Output
"""
# pprint(resp_dict)
eb_items = resp_dict['searchResult']['item']
listings = make_data_frame(Listing, len(eb_items))
for i, item in enumerate(eb_items):
try:
"The ID that uniquely identifies the item listing."
eb_id = item['itemId']
# print('itemId: ' + eb_id)
listings.loc[i, 'id_site'] = eb_id
listings.loc[i, 'title'] = item['title']
listings.loc[i, 'item_url'] = item['viewItemURL']
# https://developer.ebay.com/devzone/finding/CallRef/Enums/conditionIdList.html
listings.loc[i, 'condition'] = self.convert_condition(item['condition']['conditionId'])
listings.loc[i, 'time'] = pd.Timestamp(item['listingInfo']['endTime']).to_datetime64()
# String describing location. For example: 'Pensacola,FL,USA'.
listings.loc[i, 'location'] = item['location']
# ISO currency codes. https://en.wikipedia.org/wiki/ISO_4217
# EUR: Euro; GBP: British Pound; USD: US Dollar.
listings.loc[i, 'currency'] = item_currency = item['sellingStatus']['convertedCurrentPrice']['_currencyId']
listings.loc[i, 'price'] = item['sellingStatus']['convertedCurrentPrice']['value']
try:
# https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
# List of country codes, to which the item can be delivered. For example:
# ['US', 'CA', 'GB', 'AU', 'NO'] or 'Worldwide' or 'US'.
listings.loc[i, 'shipping_locations'] = to_str_list(item['shippingInfo']['shipToLocations'])
eb_shipping_currency = item['shippingInfo']['shippingServiceCost']['_currencyId']
assert eb_shipping_currency == item_currency, \
'Prices in a listing must be of the same currency.'
listings.loc[i, 'shipping_price'] = item['shippingInfo']['shippingServiceCost']['value']
except KeyError as err:
logging.debug('Missing field in "shippingInfo": ' + str(err))
# https://developer.ebay.com/devzone/finding/CallRef/types/ItemFilterType.html
listings.loc[i, 'listing_type'] = ltype = self.convert_listing_type(item['listingInfo']['listingType'])
# https://developer.ebay.com/devzone/finding/CallRef/types/SellingStatus.html
sstate_raw = item['sellingStatus']['sellingState']
listings.loc[i, 'status'] = sstate = self.convert_selling_state(sstate_raw)
if ltype in ['fixed-price', 'classified']:
listings.loc[i, 'is_real'] = True
elif ltype == 'auction' and sstate == 'ended':
listings.loc[i, 'is_real'] = True
else:
listings.loc[i, 'is_real'] = False
if sstate_raw == 'EndedWithSales':
listings.loc[i, 'is_sold'] = True
elif sstate_raw == 'EndedWithoutSales':
listings.loc[i, 'is_sold'] = False
else:
listings.loc[i, 'is_sold'] = None
except (KeyError, AssertionError) as err:
logging.error('Error while parsing Ebay find result: ' + repr(err))
logging.debug(pformat(item))
listings['site'] = self.ebay_name
return listings
@staticmethod
def convert_condition(ebay_condition):
"""
Convert Ebay condition numbers to internal condition values.
Ebay condition numbers:
http://developer.ebay.com/DevZone/finding/CallRef/Enums/conditionIdList.html
--------------------------------------------------------------
Ebay code Description Internal code
--------- --------------------------- ----------------------
1000 New, brand-new new
1500 New other new-defects
1750 New with defects (very small new-defects
defects of clothes)
2000 Manufacturer refurbished refurbished
2500 Seller refurbished refurbished
3000 Used used
4000 Very Good (books) used-very-good
5000 Good (books) used-good
6000 Acceptable (books) used-acceptable
7000 For parts or not working not-working
--------------------------------------------------------------
Parameters
----------
ebay_condition: str
Ebay condition code (numeric string).
Returns
-------
str
Internal condition code.
"""
cond_map = {'1000': 'new', '1500': 'new-defects', '1750': 'new-defects',
'2000': 'refurbished', '2500': 'refurbished',
'3000': 'used',
'4000': 'used-very-good', '5000': 'used-good', '6000': 'used-acceptable',
'7000': 'not-working', }
return cond_map[ebay_condition]
@staticmethod
def convert_listing_type(listing_type):
"""
Convert Ebay listing type (ListingType) codes to internal codes.
Ebay listing type numbers:
https://developer.ebay.com/devzone/finding/CallRef/types/ItemFilterType.html
--------------------------------------------------------------
Ebay code Description Internal code
--------- ------------------------ ----------------------
Auction Auction listing. auction
AuctionWithBIN Auction listing with auction
"Buy It Now" available.
Classified Classified Ad. classified
FixedPrice Fixed price items. fixed-price
StoreInventory Store Inventory format fixed-price
items.
--------------------------------------------------------------
Parameters
----------
listing_type: str
Ebay listing-type code.
Returns
-------
str
Internal listing-type code.
"""
ltype_map = {'Auction': 'auction', 'AuctionWithBIN': 'auction',
'Classified': 'classified',
'FixedPrice': 'fixed-price', 'StoreInventory': 'fixed-price'}
return ltype_map[listing_type]
@staticmethod
def convert_selling_state(selling_state):
"""
Convert Ebay selling state codes to internal codes.
Ebay selling state codes:
https://developer.ebay.com/devzone/finding/CallRef/types/SellingStatus.html
---------------------------------------------------------------
Ebay code Description Internal code
--------- ------------------------ -------------
Active The listing is still live. active
Canceled The listing has been canceled canceled
by either the seller or eBay.
Ended The listing has ended and eBay ended
has completed the processing.
EndedWithSales The listing has been ended ended
with sales.
EndedWithoutSales The listing has been ended ended
without sales.
---------------------------------------------------------------
Parameters
----------
selling_state: str
Ebay selling state code.
Returns
-------
str
Internal selling state code.
"""
smap = {'Active': 'active', 'Canceled': 'canceled', 'Ended': 'ended',
'EndedWithSales': 'ended', 'EndedWithoutSales': 'ended'}
return smap[selling_state]
class EbayShoppingAPIConnector(object):
"""
Abstraction for Ebay's shopping API.
Connect to Ebay over the internet and return listings.
Can search with keywords and can return new listings.
However it can't get all information, especially not the item's description.
Application code should not use this class, but rather ``EbayConnector``.
http://developer.ebay.com/DevZone/shopping/docs/CallRef/index.html
https://github.com/timotheus/ebaysdk-python/wiki/Shopping-API-Class
Parameters
-------------
keyfile : str
Name of the configuration file for the ``python-ebay`` library,
that contains the (secret) access keys for the Ebay API.
ebay_site : str
Ebay site (country) where the search is executed.
* Ebay USA: 'EBAY-US'
* Ebay Germany: 'EBAY-DE'
http://developer.ebay.com/Devzone/finding/Concepts/SiteIDToGlobalID.html
ebay_name : str
String that will be put into the ``df['site']`` field of the dataframe.
For example ``'ebay'``.
"""
def __init__(self, keyfile, ebay_site, ebay_name):
assert isinstance(keyfile, (str, type(None)))
assert os.path.isfile(keyfile)
assert isinstance(ebay_site, str)
assert isinstance(ebay_name, str)
self.keyfile = keyfile
self.ebay_site = ebay_site
self.ebay_name = ebay_name
def update_listings(self, listings, ebay_site):
"""
Update listings by connecting to Ebay over the Internet.
Retrieves all columns in listing (as opposed to
``EbayFindingAPIConnector.find_listings``.)
Argument
--------
listings : pandas.DataFrame
Table with listings that should be updated.
Expects that column 'id' is used as the table's index.
ebay_site : str
Localized site that is accessed. Influences shipping costs and
currency.
Returns
-------
pandas.DataFrame
New table with updated information.
"""
assert isinstance(listings, pd.DataFrame)
assert isinstance(ebay_site, str)
# Get ids from listings that are really from Ebay
ebay_listings = listings[listings['site'] == self.ebay_name]
ids = ebay_listings["id_site"]
# Remove duplicate IDs
ids = list(set(ids))
# Download information in chunks of 20 listings.
listings = make_data_frame(Listing, 0)
for i_start in range(0, len(ids), 20):
resp = self._call_shopping_api(ids[i_start:i_start + 20], ebay_site)
listings_part = self._parse_shopping_response(resp)
listings = listings.append(listings_part, ignore_index=True,
verify_integrity=False)
return listings
def _call_shopping_api(self, ids, ebay_site):
"""
Call Ebay's shopping API to get complete information about a listing.
"""
try:
api = SConnection(config_file=self.keyfile, siteid=ebay_site)
response = api.execute('GetMultipleItems',
{'IncludeSelector': 'Description,Details,ItemSpecifics,ShippingCosts',
'ItemID': ids})
except (ebaysdk.exception.ConnectionError,
requests.exceptions.ConnectionError) as err:
err_text = 'Downloading full item information from Ebay failed! ' \
'Error: ' + str(err)
logging.error(err_text)
logging.debug(err.response.dict())
raise EbayError(err_text)
# #TODO: react on the following status information
# # Returns the HTTP response code.
# response_code()
# # Returns the HTTP response status
# response_status()
# # Returns an array of eBay response codes
# response_codes()
resp_dict = response.dict()
# Act on resonse status
if resp_dict['Ack'] == 'Success':
logging.debug('Successfully called Ebay shopping API.')
elif resp_dict['Ack'] in ['Warning', 'PartialFailure']:
logging.warning('Ebay shopping API returned warning.')
logging.debug(pformat(resp_dict['Errors']))
else:
logging.error('Ebay shopping API returned error.')
logging.debug(pformat(resp_dict))
raise EbayError('Ebay shopping API returned error.')
return resp_dict
def _parse_shopping_response(self, resp):
"""
Parse response from call to Ebay's shopping API.
See:
http://developer.ebay.com/DevZone/Shopping/docs/CallRef/GetMultipleItems.html
"""
# pprint(resp)
items = resp['Item']
listings = make_data_frame(Listing, len(items))
for i, item in enumerate(items):
try:
# ID --------------------------------------------------
listings.loc[i, 'id_site'] = item['ItemID']
# Product description --------------------------------------------------
listings.loc[i, 'title'] = item['Title']
listings.loc[i, 'description'] = HtmlTool.to_nice_text(item['Description'])
try:
listings.loc[i, 'prod_spec'] = self.convert_ItemSpecifics(item['ItemSpecifics'])
except KeyError as err:
logging.debug("Missing field 'ItemSpecifics': " + str(err))
listings.loc[i, 'condition'] = self.convert_condition(item['ConditionID'])
# Price -----------------------------------------------------------
listings.loc[i, 'time'] = pd.Timestamp(item['EndTime']).to_datetime64()
listings.loc[i, 'currency'] = item['ConvertedCurrentPrice']['_currencyID']
listings.loc[i, 'price'] = item['ConvertedCurrentPrice']['value']
try:
listings.loc[i, 'shipping_price'] = item['ShippingCostSummary']['ShippingServiceCost']['value']
shipping_currency = item['ShippingCostSummary']['ShippingServiceCost']['_currencyID']
assert shipping_currency == listings.loc[i, 'currency'], \
'Prices in a listing must be of the same currency.'
except KeyError as err:
logging.debug("Missing field in 'ShippingCostSummary': " + str(err))
# Listing Data -----------------------------------------------------------
listings.loc[i, 'location'] = item['Location'] + ', ' + item['Country']
listings.loc[i, 'shipping_locations'] = to_str_list(item['ShipToLocations'])
listings.loc[i, 'seller'] = item['Seller']['UserID']
listings.loc[i, 'item_url'] = item['ViewItemURLForNaturalSearch']
# Status values -----------------------------------------------------------
listings.loc[i, 'status'] = status = self.convert_listing_status_shp(item['ListingStatus'])
listings.loc[i, 'listing_type'] = lstype = self.convert_listing_type_shp(item['ListingType'])
quantitySold = int(item['QuantitySold'])
# is_real - If True: One could really buy the item for this price.
if lstype == 'fixed-price':
is_real = True
elif lstype == 'auction' and status == 'ended' and quantitySold >= 1:
is_real = True
else:
is_real = False
listings.loc[i, 'is_real'] = is_real
# is_sold - Successful sale if ``True``.
if lstype == 'fixed-price' and quantitySold >= 1:
is_sold = True
elif lstype == 'auction' and status == 'ended' and quantitySold >= 1:
is_sold = True
else:
is_sold = False
listings.loc[i, 'is_sold'] = is_sold
if is_sold:
try:
listings.loc[i, 'buyer'] = item['HighBidder']['UserID']
except KeyError as err:
logging.debug("Missing field in 'HighBidder': " + str(err))
except (TypeError, KeyError, AssertionError) as err:
logging.error('Error while parsing Ebay shopping API result: ' + repr(err))
logging.info(pformat(item))
listings['site'] = self.ebay_name
return listings
@staticmethod
def convert_ItemSpecifics(item_specifics):
"""Convert the ``ItemSpecifics`` to a suitable JSON representation."""
try:
specs = {}
for nvpair in item_specifics['NameValueList']:
specs[nvpair['Name']] = nvpair['Value']
except TypeError as err:
logging.error(str(err) + '\n`item_specifics`:\n' + pformat(item_specifics))
return json.dumps(specs, ensure_ascii=False, check_circular=False, sort_keys=True)
# return str(specs)
@staticmethod
def convert_condition(ebay_condition):
"""
Convert Ebay condition numbers to internal condition values.
Ebay condition numbers:
http://developer.ebay.com/DevZone/finding/CallRef/Enums/conditionIdList.html
--------------------------------------------------------------
Ebay code Description Internal code
--------- --------------------------- ----------------------
1000 New, brand-new new
1500 New other new-defects
1750 New with defects (very small new-defects
defects of clothes)
2000 Manufacturer refurbished refurbished
2500 Seller refurbished refurbished
3000 Used used
4000 Very Good (books) used-very-good
5000 Good (books) used-good
6000 Acceptable (books) used-acceptable
7000 For parts or not working not-working
--------------------------------------------------------------
Parameters
----------
ebay_condition: str
Ebay condition code (numeric string).
Returns
-------
str
Internal condition code.
"""
cond_map = {'1000': 'new', '1500': 'new-defects', '1750': 'new-defects',
'2000': 'refurbished', '2500': 'refurbished',
'3000': 'used',
'4000': 'used-very-good', '5000': 'used-good', '6000': 'used-acceptable',
'7000': 'not-working', }
return cond_map[ebay_condition]
@staticmethod
def convert_listing_type_shp(listing_type):
"""
Convert Ebay listing type (ListingType) codes to internal codes.
Ebay listing type numbers:
http://developer.ebay.com/DevZone/Shopping/docs/CallRef/extra/GtMltplItms.Rspns.Itm.LstngTyp.html
------------------------------------------------------------------------
Ebay code Description Internal code
---------------- --------------------------------------- -------------
AdType Advertisement. Permits no bidding on None
that item.
Chinese Single-quantity online auction format. auction
CustomCode Placeholder value. None
Dutch Deprecated. Multiple-quantity online auction
auction format.
Express Deprecated. Germany only: eBay None
Express-only format.
FixedPriceItem A basic fixed-price listing with a fixed-price
Quantity of 1.
LeadGeneration Advertisement-style listing, no bidding None
or fixed price.
Live Live auction, on-site auction that can auction
include non-eBay bidders.
PersonalOffer Second chance offer made to a non- auction
winning bidder on an ended listing.
StoresFixedPrice A fixed-price format for eBay Store fixed-price
sellers.
------------------------------------------------------------------------
Parameters
----------
listing_type: str
Ebay listing-type code.
Returns
-------
str
Internal listing-type code.
"""
ltype_map = {'Advertisement': None, 'Chinese': 'auction',
'CustomCode': None, 'Dutch': 'auction', 'Express': None,
'FixedPriceItem': 'fixed-price', 'LeadGeneration': None,
'Live': 'auction', 'PersonalOffer': 'auction',
'StoresFixedPrice': 'fixed-price'}
return ltype_map[listing_type]
@staticmethod
def convert_listing_status_shp(listing_status):
"""
Convert Ebay selling state codes to internal codes.
Ebay listing status codes:
http://developer.ebay.com/DevZone/Shopping/docs/CallRef/GetMultipleItems.html#Response.Item.ListingStatus
------------------------------------------------------------------------
Ebay code Description Internal code
---------------- --------------------------------------- -------------
Active The listing is still live. active
Completed The listing has ended. You can think of ended
Completed and Ended as essentially
equivalent.
CustomCode Placeholder value. None
Ended The listing has ended. ended
------------------------------------------------------------------------
Parameters
----------
listing_status: str
Ebay selling state code.
Returns
-------
str
Internal selling state code.
"""
smap = {'Active': 'active', 'Completed': 'ended', 'Ended': 'ended',
'CustomCode': None}
return smap[listing_status]
class EbayConnector(object):
"""
Connect to Ebay over the internet and return listings.
This is the class that application code should use to connect to Ebay.
"""
all_ebay_global_ids = {
"EBAY-AT", "EBAY-AU", "EBAY-CH", "EBAY-DE", "EBAY-ENC", "EBAY-ES",
"EBAY-FR", "EBAY-FRB", "EBAY-FRC", "EBAY-GB", "EBAY-HK", "EBAY-IE",
"EBAY-IN", "EBAY-IT", "EBAY-MOT", "EBAY-MY", "EBAY-NL", "EBAY-NLB",
"EBAY-PH", "EBAY-PL", "EBAY-SG", "EBAY-US", }
"Legal values for Ebay's global ID."
internal_site_name = 'ebay'
"Value for the dataframe's 'site' field, to show that the listings come from Ebay."
def __init__(self, keyfile):
"""
Parameters
-------------
keyfile : str
Name of the configuration file for the ``python-ebay`` library,
that contains the (secret) access keys for the Ebay API.
"""
assert isinstance(keyfile, (str, type(None)))
assert os.path.isfile(keyfile)
self.keyfile = keyfile
def find_listings(self, keywords, n_listings, ebay_site,
price_min=None, price_max=None, currency="USD",
time_from=None, time_to=None):
"""
Find listings on Ebay by keyword.
Returns only incomplete information: the description is missing.
Calls the Ebay API function 'findItemsAdvanced':
https://developer.ebay.com/devzone/finding/CallRef/findItemsAdvanced.html
Parameters
-----------
keywords : str
Search string in Ebay's searching language. See:
http://pages.ebay.com/help/search/advanced-search.html#using
n_listings : int
Number of listings that Ebay should return. Might return fewer or
slightly more listings.
Currently limited to 100 listings, the maximum number of listings
in one page.
ebay_site : str
Ebay site (country) where the search is executed.
* Ebay USA: 'EBAY-US'
* Ebay Germany: 'EBAY-DE'
http://developer.ebay.com/Devzone/finding/Concepts/SiteIDToGlobalID.html
price_min : float
Minimum price for listings, that are returned.
price_max : float
Maximum price for listings, that are returned.
currency : str
Currency unit for ``price_min`` and ``price_max``.
US Dollar: USD
Euro: EUR
https://developer.ebay.com/devzone/finding/CallRef/Enums/currencyIdList.html
time_from : datetime
Earliest end time for listings (auctions) that are returned.
Time is in UTC!
time_to : datetime
Latest end time for listings (auctions) that are returned.
Time is in UTC!
Returns
-------
pandas.DataFrame
Table with one row for each listing. Our Id is the index.
Some columns are empty (especially "description"), because Ebay's
find API call doesn't return this information. These columns
can be filled in with a subsequent call to ``update_listings``.
"""
assert isinstance(keywords, (str))
assert isinstance(n_listings, (int))
assert ebay_site in self.all_ebay_global_ids
assert isinstance(price_min, (float, int, type(None)))
assert isinstance(price_max, (float, int, type(None)))
assert isinstance(currency, (str, type(None)))
assert isinstance(time_from, (datetime, pd.Timestamp, type(None)))
assert isinstance(time_to, (datetime, pd.Timestamp, type(None)))
fapic = EbayFindingAPIConnector(self.keyfile, ebay_site, self.internal_site_name)
listings = fapic.find_listings(keywords, n_listings,
price_min, price_max, currency,
time_from, time_to)
self.create_ids(listings)
# Remove duplicate rows: Ebay uses the same ID for variants of the
# same product.
listings = listings.drop_duplicates(subset="id")
# Put internal IDs into index
listings.set_index("id", drop=False, inplace=True,
verify_integrity=True)
return listings
def update_listings(self, listings, ebay_site):
"""
Update listings by connecting to Ebay over the Internet.
Retrieves all columns in listing (as opposed to ``find_listings``.)
Argument
--------
listings : pandas.DataFrame
Table with listings that should be updated.
Expects that column 'id' is used as the table's index.
Returns
-------
pandas.DataFrame
New table with updated information.
"""
assert isinstance(listings, pd.DataFrame)
assert ebay_site in self.all_ebay_global_ids
sapic = EbayShoppingAPIConnector(self.keyfile, ebay_site,
self.internal_site_name)
listings = sapic.update_listings(listings, ebay_site)
listings.dropna(subset=['time'], inplace=True)
self.create_ids(listings)
listings.drop_duplicates(['id'], keep='first', inplace=True)
listings.set_index("id", drop=False, inplace=True,
verify_integrity=True)
return listings
def create_ids(self, listings):
"""
Create the internal IDs for Ebay listings.
The have the form: {date}-ebay-{number}
"""
# Ebay reuses ``itemId`` values for recurrent listings of professional
# sellers. Therefore the date is included in the listing's ID.
dates = listings['time'].map(lambda t: t.isoformat().split('T')[0])
listings['id'] = dates + '-' + listings['site'] + '-' + listings['id_site']
# return listings
| gpl-3.0 |
jumc/img_parallax_gif | testes/test3.py | 1 | 1451 | from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage.morphology import watershed, disk
from skimage import data, io, color
from skimage.filters import rank
from skimage.util import img_as_ubyte
# image = img_as_ubyte(data.camera())
image = io.imread('test2.jpg')
image = color.rgb2gray(image)
# denoise image
denoised = rank.median(image, disk(6))
# find continuous region (low gradient -
# where less than 10 for this image) --> markers
# disk(5) is used here to get a more smooth image
markers = rank.gradient(denoised, disk(10)) < 10
markers = ndi.label(markers)[0]
# local gradient (disk(2) is used to keep edges thin)
gradient = rank.gradient(denoised, disk(2))
# process the watershed
labels = watershed(gradient, markers)
# display results
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 8), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
ax = axes.ravel()
ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_title("Original")
ax[1].imshow(gradient, cmap=plt.cm.spectral, interpolation='nearest')
ax[1].set_title("Local Gradient")
ax[2].imshow(markers, cmap=plt.cm.spectral, interpolation='nearest')
ax[2].set_title("Markers")
ax[3].imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax[3].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest', alpha=.7)
ax[3].set_title("Segmented")
for a in ax:
a.axis('off')
fig.tight_layout()
plt.show() | gpl-3.0 |
khabibr/human_like_mouse_move | mouse_move.py | 1 | 11007 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import time
import subprocess
import math
import random
import numpy as np
from scipy.misc import comb
DEFAULT_MIN_SPEED = 60 # [1..100]
DEFAULT_MAX_SPEED = 90 # [1..100]
DEFAULT_MAX_X = 639
DEFAULT_MAX_Y = 479
DEFAULT_MIN_PAUSE = 0.5
DEFAULT_MAX_PAUSE = 4
USING_STR = (
'\nUsage:\n'
'{0} [COORDINATES] [PARAMS] [KEYS]\n'
'\n'
'COORDINATES: x1:y1 x2:y2 ... xM:yM\n'
'\tMove cursor sequentially along the COORDINATES\n'
'\tendless random movements if no COORDINATES\n'
'PARAMS: param1:val1 param2:val2 ... paramN:valN\n'
'\tSet of params:\n'
'\tcount - count of random movement\n'
'\tmin_pause, max_pause - pause between movements (seconds)\n'
'\tmin_speed, max_speed - speed of movements (1..100)\n'
'\ttop_left, bottom_right - movements range (top_left:x:y or bottom_right:x:y)\n'
'KEYS:\n'
'\t--debug_show_curve : Show mouse path curve (matplotlib figure)\n'
'\t--autopilot : Use autopilot mouse move method (default)\n'
'\t (https://developer.ubuntu.com/api/autopilot/python/1.5.0/autopilot.input)\n'
'\t--xdotool : Use xdotool mouse move method\n'
'Examples:\n'
'\t{0} 100:100 500:100 300:250 100:100 max_speed:50\n'
'\t{0} count:10 count:10 max_pause:1 top_left:0:0 bottom_right:639:199\n'
)
# MOUSE METHODS
"""
XDOTOOL method
Linux universal tool.
(medium speed. a lot of process calls).
"""
class xdotool(object):
def get_mouse_location(self):
spitted = subprocess.Popen(
"xdotool getmouselocation",
shell=True,
stdout=subprocess.PIPE
).stdout.read().decode().split()
if len(spitted) > 1 and 'x:' in spitted[0] and 'y:' in spitted[1]:
x = int(spitted[0].partition(':')[2])
y = int(spitted[1].partition(':')[2])
else:
x, y = 0, 0
return x, y
def move_mouse(self, x_pos, y_pos, speed):
subprocess.call(["xdotool", "mousemove", str(
x_pos), str(y_pos)]) # ignore speed
"""
AUTOPILOT method
https://developer.ubuntu.com/api/autopilot/python/1.5.0/autopilot.input/
(high speed, great velocity, low resource consumption).
"""
class autopilot(object):
def __init__(self):
from autopilot.input import Mouse # apt-get install python3-autopilot
self.mouse = Mouse.create()
def get_mouse_location(self):
return self.mouse.position()
def move_mouse(self, x_pos, y_pos, speed):
# time_between_events [0.02 (slowest) .. 0.0001 (fastest)]
self.mouse.move(x_pos, y_pos, animate=False,
time_between_events=(2 - 0.02 * speed) / 99)
if speed < 90:
# additional random slow down
time.sleep(random.uniform(0.001, 0.01))
# BEZIER CURVE
def bernstein_poly(i, n, t):
"""
The Bernstein polynomial of n, i as a function of t
"""
return comb(n, i) * (t**(n - i)) * (1 - t)**i
def bezier_curve(points, dots_cnt):
"""
Given a set of control points, return the
bezier curve defined by the control points.
points should be a list of lists, or list of tuples
such as [ [1,1],
[2,3],
[4,5], ..[Xn, Yn] ]
dots_cnt is the number of steps
See http://processingjs.nihongoresources.com/bezierinfo/
"""
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, dots_cnt)
polynomial_array = np.array(
[bernstein_poly(i, nPoints - 1, t) for i in range(0, nPoints)])
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
return xvals, yvals
# HUMAN LIKE MOVE
class human_like_mouse_move(object):
def __init__(self,
mouse_method,
top_left_corner,
bottom_right_corner,
min_spd,
max_spd,
show_curve):
self.mouse_method = mouse_method
self.min_spd = min_spd
self.max_spd = max_spd
self.show_curve = show_curve
self.get_resolution()
if top_left_corner:
self.min_x, self.min_y = top_left_corner
if bottom_right_corner:
self.max_x, self.max_y = bottom_right_corner
def rand_coords(self):
""" Infinite random coordinates generator"""
while True:
yield (
random.randrange(self.min_x, self.max_x),
random.randrange(self.min_y, self.max_y)
)
def get_resolution(self):
spitted = subprocess.Popen(
"xrandr | grep '*' | head -1",
shell=True,
stdout=subprocess.PIPE
).stdout.read().decode().split()
self.min_x, self.min_y = 0, 0
if spitted and 'x' in spitted[0]:
x, y = spitted[0].split('x')
self.res_x, self.res_y = int(x), int(y)
else:
self.res_x, self.res_y = DEFAULT_MAX_X, DEFAULT_MAX_Y
self.max_x, self.max_y = self.res_x, self.res_y
def get_move_curve(self, d_to):
d_from = self.mouse_method.get_mouse_location()
x_dist = d_to[0] - d_from[0]
y_dist = d_to[1] - d_from[1]
dist = math.hypot(x_dist, y_dist)
# first approx
deviation = dist * \
random.uniform(0.1, 0.3) * \
(random.randint(0, 1) * 2 - 1) # -1 or +1
middle = ((d_from[0] + d_to[0]) / 2,
(d_from[1] + d_to[1]) / 2 + deviation)
points = [d_to, middle, d_from]
dots_cnt = int(self.dots_per_100 * dist / (100 * 5))
if dots_cnt <= 3:
dots_cnt = 4
xvals, yvals = bezier_curve(points, dots_cnt)
xvals = [int(p) for p in xvals[1:-1]]
yvals = yvals[1:-1]
# second approx
delta_y = y_dist / dots_cnt
if self.show_curve:
points = [d_from]
rev_points = [d_to] # reveresed points
if y_dist == 0:
coef = 1
else:
coef = abs(x_dist / y_dist)
if coef > 1:
coef = 1
elif coef < 0.1:
coef = 0.1
for i, xval in enumerate(xvals):
deviation = delta_y * coef * \
random.uniform(0.5, 5) * \
(random.randint(0, 1) * 2 - 1) # -1 or +1
if self.show_curve:
points.append((xval, int(yvals[i] + deviation)))
rev_points.insert(1, (xval, int(yvals[i] + deviation)))
points.append(d_to)
rev_points.append(d_from)
if self.show_curve:
xpoints = [p[0] for p in points]
ypoints = [p[1] for p in points]
from matplotlib import pyplot as plt
ax = plt.gca()
ax.set_xlim([0, self.res_x])
ax.set_ylim([0, self.res_y])
ax.invert_yaxis()
# dots and text
plt.plot(xpoints, ypoints, "ro")
for nr in range(len(points)):
plt.text(points[nr][0], points[nr][1], nr)
dots_cnt = int(self.dots_per_100 * dist / (100))
xvals, yvals = bezier_curve(rev_points, dots_cnt) # rev_points
if self.show_curve:
plt.plot(xvals, yvals)
plt.show()
return xvals, yvals, dots_cnt
def move_to(self, point_to):
speed = random.uniform(self.min_spd, self.max_spd)
# dots_per_100 comfort diapason 5 .. 25
self.dots_per_100 = round((2495 - 20 * speed) / 99)
try:
xvals, yvals, dots_cnt = self.get_move_curve(point_to)
except TypeError:
return
for idx, xval in enumerate(xvals):
self.mouse_method.move_mouse(xval, yvals[idx], speed)
def pause_between_moves(self):
time.sleep(random.uniform(min_pause, max_pause))
def signal_handler(signal, frame):
print(' Cancelled...')
exit(0)
# MAIN
if __name__ == "__main__":
import signal
signal.signal(signal.SIGINT, signal_handler) # Ctrl-C signal handler
# parse_params
move_cnt = 0
top_left_corner = None
bottom_right_corner = None
min_spd = DEFAULT_MIN_SPEED
max_spd = DEFAULT_MAX_SPEED
min_pause = DEFAULT_MIN_PAUSE
max_pause = DEFAULT_MAX_PAUSE
coords = []
show_curve = False
use_xdotool = False
if len(sys.argv) > 1:
try:
for arg in sys.argv[1:]:
if arg == '--debug_show_curve':
show_curve = True
continue
elif arg == '--autopilot':
continue
elif arg == '--xdotool':
use_xdotool = True
continue
elif ':' not in arg:
raise Exception(arg)
par_name, par_val, *par_extra = arg.split(':')
if par_name == 'count':
move_cnt = int(par_val)
elif par_name == 'min_pause':
min_pause = float(par_val)
if max_pause < min_pause:
max_pause = min_pause
elif par_name == 'max_pause':
max_pause = float(par_val)
if min_pause > max_pause:
min_pause = max_pause
elif par_name == 'min_speed':
min_spd = int(par_val)
if min_spd < 1:
min_spd = 1
elif min_spd > 100:
min_spd = 100
if max_spd < min_spd:
max_spd = min_spd
elif par_name == 'max_speed':
max_spd = int(par_val)
if max_spd < 1:
max_spd = 1
elif max_spd > 100:
max_spd = 100
if min_spd > max_spd:
min_spd = max_spd
elif par_name == 'top_left':
top_left_corner = (int(par_val), int(par_extra[0]))
elif par_name == 'bottom_right':
bottom_right_corner = (int(par_val), int(par_extra[0]))
else: # just coord
coords.append((int(par_name), int(par_val)))
except Exception:
print(USING_STR.format(sys.argv[0]))
sys.exit()
if use_xdotool:
method = xdotool()
else:
method = autopilot()
human_like = human_like_mouse_move(
method,
top_left_corner,
bottom_right_corner,
min_spd,
max_spd,
show_curve)
if not coords:
coords = human_like.rand_coords()
cnt = 0
for dot in coords:
human_like.move_to(dot)
cnt += 1
if move_cnt and move_cnt == cnt:
break
human_like.pause_between_moves()
| mit |
openfisca/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/almost_ideal_demand_system/aids_dataframe_builder_energy_no_alime.py | 4 | 12676 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 05 16:16:20 2016
@author: thomas.douenne
"""
from __future__ import division
import pandas as pd
import numpy as np
import os
import pkg_resources
from openfisca_france_indirect_taxation.examples.utils_example import get_input_data_frame
from openfisca_france_indirect_taxation.almost_ideal_demand_system.aids_price_index_builder import \
df_indice_prix_produit
from openfisca_france_indirect_taxation.almost_ideal_demand_system.utils import \
add_area_dummy, add_stalog_dummy, add_vag_dummy, electricite_only, indices_prix_carbus, price_carbu_pond
assets_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location
)
# On importe la dataframe qui recense les indices de prix. Notre objectif est de construire une nouvelle dataframe avec
# le reste des informations, i.e. la consommation et autres variables pertinentes concernant les ménages.
# On commence par cinstruire une dataframe appelée data_conso rassemblant les informations sur les dépenses des ménages.
data_frame_for_reg = None
data_frame_all_years = pd.DataFrame()
for year in [2011]:
aggregates_data_frame = get_input_data_frame(year)
# Pour estimer QAIDS, on se concentre sur les biens non-durables.
# On élimine donc les biens durables de cette dataframe: 442 : redevance d'enlèvement des ordures, 711, 712, 713 :
# achat de véhicules, 911, 912, 9122, 913, 9151 : technologies high-tech, 9211, 921, 923: gros équipements loisirs,
# 941, 960 : voyages séjours et cadeaux, 10i0 : enseignement, 12.. : articles de soin et bijoux
biens_durables = ['poste_coicop_442', 'poste_coicop_711', 'poste_coicop_712', 'poste_coicop_713',
'poste_coicop_911', 'poste_coicop_912', 'poste_coicop_9122', 'poste_coicop_913', 'poste_coicop_9151',
'poste_coicop_9211', 'poste_coicop_921', 'poste_coicop_922', 'poste_coicop_923', 'poste_coicop_960',
'poste_coicop_941', 'poste_coicop_1010', 'poste_coicop_1015', 'poste_coicop_10152', 'poste_coicop_1020',
'poste_coicop_1040', 'poste_coicop_1050', 'poste_coicop_1212', 'poste_coicop_1231', 'poste_coicop_1240',
'poste_coicop_12411', 'poste_coicop_1270']
for bien in biens_durables:
try:
aggregates_data_frame = aggregates_data_frame.drop(bien, axis = 1)
except:
aggregates_data_frame = aggregates_data_frame
energie_logement = ['poste_coicop_451', 'poste_coicop_4511', 'poste_coicop_452', 'poste_coicop_4522',
'poste_coicop_453', 'poste_coicop_454', 'poste_coicop_455', 'poste_coicop_4552']
produits = [column for column in aggregates_data_frame.columns if column[:13] == 'poste_coicop_']
del column
aggregates_data_frame['depenses_carbu'] = aggregates_data_frame['poste_coicop_722']
aggregates_data_frame['depenses_logem'] = 0
for logem in energie_logement:
try:
aggregates_data_frame['depenses_logem'] += aggregates_data_frame[logem]
except:
pass
aggregates_data_frame['depenses_tot'] = 0
for produit in produits:
if produit[13:15] != '99' and produit[13:15] != '13':
aggregates_data_frame['depenses_tot'] += aggregates_data_frame[produit]
aggregates_data_frame['depenses_autre'] = (
aggregates_data_frame['depenses_tot'] - aggregates_data_frame['depenses_carbu'] -
aggregates_data_frame['depenses_logem'])
data_conso = aggregates_data_frame[produits + ['vag', 'ident_men', 'depenses_autre', 'depenses_carbu',
'depenses_logem']].copy()
# On renverse la dataframe pour obtenir une ligne pour chaque article consommé par chaque personne
df = pd.melt(data_conso, id_vars = ['vag', 'ident_men'], value_vars=produits,
value_name = 'depense_bien', var_name = 'bien')
df_indice_prix_produit = df_indice_prix_produit[['indice_prix_produit', 'prix', 'temps', 'mois']]
df['vag'] = df['vag'].astype(str)
df['indice_prix_produit'] = df['bien'] + '_' + df['vag']
# On merge les prix des biens avec les dépenses déjà présentes dans df. Le merge se fait sur 'indice_prix_produit'
# Indice prix produit correspond à poste_coicop_xyz_vag
df_depenses_prix = pd.merge(df, df_indice_prix_produit, on = 'indice_prix_produit')
# df_depenses_prix contient les dépenses de consommation et les prix associés à ces dépenses.
# Il faut maintenant construire les catégories de biens que l'on souhaite comparer.
df_depenses_prix['type_bien'] = 'autre'
df_depenses_prix.loc[df_depenses_prix['bien'] == 'poste_coicop_722', 'type_bien'] = 'carbu'
for logem in energie_logement:
df_depenses_prix.loc[df_depenses_prix['bien'] == logem, 'type_bien'] = 'logem'
del logem, produit
# Construire les indices de prix pondérés pour les deux catégories
df_depenses_prix[['type_bien', 'ident_men']] = df_depenses_prix[['type_bien', 'ident_men']].astype(str)
df_depenses_prix['id'] = df_depenses_prix['type_bien'] + '_' + df_depenses_prix['ident_men']
data_conso['ident_men'] = data_conso['ident_men'].astype(str)
df_depenses_prix = pd.merge(
df_depenses_prix, data_conso[['depenses_autre', 'depenses_carbu', 'depenses_logem', 'ident_men']],
on = 'ident_men')
del data_conso
df_depenses_prix[['depenses_autre', 'depense_bien', 'depenses_carbu', 'depenses_logem', 'prix']] = \
df_depenses_prix[['depenses_autre', 'depense_bien', 'depenses_carbu', 'depenses_logem', 'prix']].astype(float)
df_depenses_prix['part_bien_categorie'] = 0
df_depenses_prix.loc[df_depenses_prix['type_bien'] == 'autre', 'part_bien_categorie'] = \
df_depenses_prix['depense_bien'] / df_depenses_prix['depenses_autre']
df_depenses_prix.loc[df_depenses_prix['type_bien'] == 'carbu', 'part_bien_categorie'] = \
df_depenses_prix['depense_bien'] / df_depenses_prix['depenses_carbu']
df_depenses_prix.loc[df_depenses_prix['type_bien'] == 'logem', 'part_bien_categorie'] = \
df_depenses_prix['depense_bien'] / df_depenses_prix['depenses_logem']
df_depenses_prix.fillna(0, inplace=True)
# Les parts des biens dans leur catégorie permettent de construire des indices de prix pondérés (Cf. Lewbel)
df_depenses_prix['indice_prix_pondere'] = 0
df_depenses_prix['indice_prix_pondere'] = df_depenses_prix['part_bien_categorie'] * df_depenses_prix['prix']
# grouped donne l'indice de prix pondéré pour chacune des deux catégories pour chaque individu
# On met cette dataframe en forme pour avoir pour chaque individu l'indice de prix pour chaque catégorie
# Cela donne df_prix_to_merge
df_depenses_prix.sort_values(by = ['id'])
grouped = df_depenses_prix['indice_prix_pondere'].groupby(df_depenses_prix['id'])
assert len(grouped) == 3 * len(aggregates_data_frame), 'There is an issue in the aggregation of prices'
grouped = grouped.aggregate(np.sum)
grouped.index.name = 'id'
grouped = grouped.reset_index()
grouped['categorie'] = grouped['id'].str[:5]
categories = ['autre', 'carbu', 'logem']
for categorie in categories:
grouped['prix_' + categorie] = 0
grouped.loc[grouped['categorie'] == categorie, 'prix_' + categorie] = grouped['indice_prix_pondere']
grouped_autre = grouped[grouped['categorie'] == 'autre'].copy()
grouped_autre['ident_men'] = grouped_autre['id'].str[6:]
grouped_carbu = grouped[grouped['categorie'] == 'carbu'].copy()
grouped_carbu['ident_men'] = grouped_carbu['id'].str[6:]
grouped_logem = grouped[grouped['categorie'] == 'logem'].copy()
grouped_logem['ident_men'] = grouped_logem['id'].str[6:]
df_prix_to_merge = pd.merge(grouped_carbu[['ident_men', 'prix_carbu']], grouped_autre[['ident_men', 'prix_autre']],
on = 'ident_men')
df_prix_to_merge = pd.merge(df_prix_to_merge, grouped_logem[['ident_men', 'prix_logem']], on = 'ident_men')
del grouped, grouped_autre, grouped_carbu, grouped_logem
# Problème: ceux qui ne consomment pas de carbu ou d'alimentaire se voient affecter un indice de prix égal à 0. Ils
# sont traités plus bas.
# On crée une variable dummy pour savoir si le ménage ne consomme que de l'électricité ou aussi du gaz.
# Si seulement électricité, elle est égale à 1.
aggregates_data_frame = electricite_only(aggregates_data_frame)
# On récupère les informations importantes sur les ménages, dont les variables démographiques
df_info_menage = aggregates_data_frame[['agepr', 'depenses_autre', 'depenses_carbu',
'depenses_logem', 'depenses_tot', 'dip14pr', 'elect_only', 'ident_men', 'nenfants', 'nactifs', 'ocde10',
'revtot', 'situacj', 'situapr', 'stalog', 'strate', 'typmen', 'vag', 'veh_diesel',
'veh_essence']].copy()
df_info_menage.index.name = 'ident_men'
df_info_menage.reset_index(inplace = True)
df_info_menage['ident_men'] = df_info_menage['ident_men'].astype(str)
df_info_menage['part_autre'] = df_info_menage['depenses_autre'] / df_info_menage['depenses_tot']
df_info_menage['part_carbu'] = df_info_menage['depenses_carbu'] / df_info_menage['depenses_tot']
df_info_menage['part_logem'] = df_info_menage['depenses_logem'] / df_info_menage['depenses_tot']
# On merge les informations sur les caractéristiques du ménage et leurs consommations avec les indices de prix
# pondérés pour les deux catégories
dataframe = pd.merge(df_info_menage, df_prix_to_merge, on = 'ident_men')
del df_info_menage, df_prix_to_merge
# Pour ceux qui ne consomment pas de carburants, on leur associe le prix correspondant à leur vague d'enquête
price_carbu = df_indice_prix_produit[df_indice_prix_produit['indice_prix_produit'].str[13:16] == '722'].copy()
price_carbu['vag'] = price_carbu['indice_prix_produit'].str[17:].astype(int)
price_carbu = price_carbu[['vag', 'prix']]
price_carbu['prix'] = price_carbu['prix'].astype(float)
dataframe = pd.merge(dataframe, price_carbu, on = 'vag')
del price_carbu
dataframe.loc[dataframe['prix_carbu'] == 0, 'prix_carbu'] = dataframe['prix']
dataframe['depenses_par_uc'] = dataframe['depenses_tot'] / dataframe['ocde10']
dataframe = dataframe[['ident_men', 'part_carbu', 'part_logem', 'part_autre',
'prix_carbu', 'prix_logem', 'prix_autre', 'depenses_par_uc', 'depenses_tot',
'typmen', 'strate', 'dip14pr', 'agepr', 'situapr', 'situacj', 'stalog', 'nenfants',
'nactifs', 'vag', 'veh_diesel', 'veh_essence', 'elect_only']]
# On supprime de la base de données les individus pour lesquels on ne dispose d'aucune consommation alimentaire.
# Leur présence est susceptible de biaiser l'analyse puisque de toute évidence s'ils ne dépensent rien pour la
# nourriture ce n'est pas qu'ils n'en consomment pas, mais qu'ils n'en ont pas acheté sur la période (réserves, etc)
dataframe = dataframe[dataframe['prix_logem'] != 0]
# On enlève les outliers, que l'on considère comme les individus dépensant plus de 25% de leur budget en carburants
# Cela correspond à 16 et 13 personnes pour 2000 et 2005 ce qui est négligeable, mais 153 i.e. 2% des consommateurs
# pour 2011 ce qui est assez important. Cette différence s'explique par la durée des enquêtes (1 semaine en 2011)
dataframe = dataframe[dataframe['part_carbu'] < 0.25]
indices_prix_carburants = indices_prix_carbus(year)
dataframe = pd.merge(dataframe, indices_prix_carburants, on = 'vag')
dataframe = price_carbu_pond(dataframe)
dataframe = add_area_dummy(dataframe)
dataframe = add_stalog_dummy(dataframe)
dataframe = add_vag_dummy(dataframe)
data_frame_for_reg = dataframe.rename(columns = {'part_carbu': 'w1', 'part_logem': 'w2',
'part_autre': 'w3', 'prix_carbu': 'p1', 'prix_logem': 'p2', 'prix_autre': 'p3'})
data_frame_all_years = pd.concat([data_frame_all_years, data_frame_for_reg])
data_frame_all_years.fillna(0, inplace = True)
data_frame_for_reg.to_csv(os.path.join(assets_directory, 'openfisca_france_indirect_taxation', 'assets',
'quaids', 'data_frame_energy_no_alime_{}.csv'.format(year)), sep = ',')
data_frame_all_years.to_csv(os.path.join(assets_directory, 'openfisca_france_indirect_taxation', 'assets',
'quaids', 'data_frame_energy_no_alime_all_years.csv'), sep = ',')
# Must correct what is useless, improve demographics : dip14
# dip14 : use only dip14pr (good proxy for dip14cj anyway), but change the nomenclature to have just 2 or 3 dummies
# describing whether they attended college or not, etc.
# Use more functions in utils
| agpl-3.0 |
kabrapratik28/Stanford_courses | cs20si/tf-stanford-tutorials/examples/autoencoder/utils.py | 5 | 1267 | import os
import sys
import tensorflow
import numpy as np
import matplotlib
matplotlib.use('TKAgg')
from matplotlib import pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist_image_shape = [28, 28, 1]
def load_dataset():
return input_data.read_data_sets('MNIST_data')
def get_next_batch(dataset, batch_size):
# dataset should be mnist.(train/val/test)
batch, _ = dataset.next_batch(batch_size)
batch_shape = [batch_size] + mnist_image_shape
return np.reshape(batch, batch_shape)
def visualize(_original, _reconstructions, num_visualize):
vis_folder = './vis/'
if not os.path.exists(vis_folder):
os.makedirs(vis_folder)
original = _original[:num_visualize]
reconstructions = _reconstructions[:num_visualize]
count = 1
for (orig, rec) in zip(original, reconstructions):
orig = np.reshape(orig, (mnist_image_shape[0],
mnist_image_shape[1]))
rec = np.reshape(rec, (mnist_image_shape[0],
mnist_image_shape[1]))
f, ax = plt.subplots(1,2)
ax[0].imshow(orig, cmap='gray')
ax[1].imshow(rec, cmap='gray')
plt.savefig(vis_folder + "test_%d.png" % count)
count += 1
| apache-2.0 |
antgonza/qiime | tests/test_plot_taxa_summary.py | 15 | 16573 | #!/usr/bin/env python
# file test_plot_taxa_summary.py
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2011, The QIIME Project" # consider project name
__credits__ = ["Jesse Stombaugh", "Julia Goodrich"] # remember to add yourself
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Stombaugh"
__email__ = "[email protected]"
import matplotlib
from matplotlib import use
use('Agg', warn=False)
from numpy import array
from os.path import exists
from StringIO import StringIO
from unittest import TestCase, main
from os import remove, mkdir, removedirs, listdir
from qiime.plot_taxa_summary import (make_pie_chart, make_img_name,
get_counts, write_html_file,
make_HTML_table, get_fracs, make_all_charts,
make_area_bar_chart, make_legend, DATA_HTML)
class TopLevelTests(TestCase):
"""Tests of top-level functions"""
def setUp(self):
"""define some top-level data"""
self.props = {"title": "Class: from 3 categories"}
self.prefs = {'1': {'column': '1'}}
self.counts1 = [(
1, "a;b;c", "a<br>b<br>c"), (3, "d;e;f", "d<br>e<br>f"),
(4, "a;g;h", "a<br>g<br>h"), (2, "d;e;i", "d<br>e<br>i")]
self.sample_ids = ['14FC041', '14FC042', '14FC043', '14FC044']
self.taxa = ["a;b;c", "d;e;i", "d;e;f", "a;g;h"]
self.lines_parsed = (['14FC041', '14FC042', '14FC043', '14FC044'],
['a;b;c', 'd;e;f', 'a;g;h', "d;e;i"],
[['0.1', '0.3', '0.2'], ['0', '0.2', '0.1'],
['0.4', '0', '0.3'], ['0.5', '0', '0.1']])
self.fracs = [("a;b;c", 1.0 / 10), ("d;e;f", 3.0 / 10),
("a;g;h", 4.0 / 10), ("d;e;i", 2.0 / 10)]
self.colors = ['#0000ff', '#00ff00', '#ff0000', '#00ffff']
self.area_fracs = [[0.1, 0.3, 0.2], [0.0, 0.2, 0.1],
[0.4, 0.0, 0.3], [0.5, 0.0, 0.1]]
self.color_prefs = {
"a;b;c": 'blue1', "d;e;i": 'red1', "d;e;f": 'blue2',
"a;g;h": 'red2'}
self.dpi = 80
self.plot_width = 12
self.plot_height = 6
self.bar_width = 1
self.generate_image_type = 'pdf'
self._paths_to_clean_up = []
self._dirs_to_clean_up = []
self.dir_path = "/tmp/qiimewebfiles/"
# make the webfile directory
try:
mkdir(self.dir_path)
except OSError:
pass
# make the charts directory
try:
mkdir("/tmp/qiimewebfiles/charts")
except OSError:
pass
# define directory to clean up
self._dirs_to_clean_up = ["/tmp/qiimewebfiles/charts"]
def tearDown(self):
map(remove, self._paths_to_clean_up)
map(removedirs, self._dirs_to_clean_up)
def test_make_legend(self):
"""make_legend create a legend image given an array of ids and
colors"""
fpath = '/tmp/qiimewebfiles/area.pdf'
filename1 = '/tmp/qiimewebfiles/area_legend.pdf'
obs = make_legend(self.sample_ids, self.colors, self.plot_width,
self.plot_height, 'black', 'white', fpath,
self.generate_image_type, self.dpi)
self.assertTrue(exists(filename1), 'The png file was not created in \
the appropriate location')
self._paths_to_clean_up = [filename1]
def test_get_counts(self):
"""get_counts should gets all the counts for an input file"""
# test the pie charts
img_data = get_counts("Phylum", ['14FC041', '14FC042', '14FC043'], 5,
"/tmp/qiimewebfiles/", 1, self.lines_parsed,
self.prefs, self.color_prefs, 'black', 'white',
'pie', self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi,
'file.txt', 0, 'categorical', False)
self.assertEqual(len(img_data), 8)
# test the area charts
img_data = get_counts("Phylum", ['14FC041', '14FC042', '14FC043'], 5,
"/tmp/qiimewebfiles/", 1, self.lines_parsed,
self.prefs, self.color_prefs, 'black', 'white',
'area', self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi,
'file.txt', 0, 'categorical', False)
self.assertEqual(len(img_data), 2)
# test the area charts
img_data = get_counts("Phylum", ['14FC041', '14FC042', '14FC043'], 5,
"/tmp/qiimewebfiles/", 1, self.lines_parsed,
self.prefs, self.color_prefs, 'black', 'white',
'bar', self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi,
'file.txt', 0, 'categorical', False)
self.assertEqual(len(img_data), 2)
# clean up files generated
self._paths_to_clean_up = ["/tmp/qiimewebfiles/charts/" + f
for f in listdir("/tmp/qiimewebfiles/charts")]
def test_get_fracs(self):
""""get_fracs should Return fractions for matplotlib chart"""
# test the pie charts
exp_all_counts = [DATA_HTML % (
(4.0 / 10) * 100.0, 'a<br>g', 'h', 'h', "a;g;h"),
DATA_HTML % (
(3.0 / 10) * 100,
'd<br>e',
'f',
'f',
"d;e;f"),
DATA_HTML % (
(2.0 / 10) * 100,
'd<br>e',
'i',
'i',
"d;e;i"),
DATA_HTML % ((1.0 / 10) * 100, 'a<br>b', 'c', 'c', "a;b;c")]
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac \
= get_fracs(self.counts1, 5, 10, 'pie')
self.assertEqual(
fracs_labels_other, [("a;b;c", 1.0 / 10), ("a;g;h", 4.0 / 10),
("d;e;f", 3.0 / 10), ("d;e;i", 2.0 / 10)])
self.assertEqual(fracs_labels, [])
self.assertEqual(all_counts, exp_all_counts)
self.assertEqual(other_cat, 0)
self.assertEqual(red, 10)
self.assertEqual(other_frac, 0)
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac \
= get_fracs(self.counts1, 3, 10, 'pie')
self.assertEqual(
fracs_labels_other, [("a;g;h", 4.0 / 10), ("d;e;f", 3.0 / 10)])
self.assertEqual(
fracs_labels, [("a;g;h", 4.0 / 7), ("d;e;f", 3.0 / 7)])
self.assertEqual(all_counts, exp_all_counts)
self.assertEqual(other_cat, 2)
self.assertEqual(red, 7)
self.assertEqual(other_frac, 3.0 / 10)
# test the area charts
exp_all_counts = ['4', '3', '2', '1']
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac \
= get_fracs(self.counts1, 5, 10, 'area')
self.assertEqual(
fracs_labels_other, [("a;b;c", 1.0 / 10), ("a;g;h", 4.0 / 10),
("d;e;f", 3.0 / 10), ("d;e;i", 2.0 / 10)])
self.assertEqual(fracs_labels, [])
self.assertEqual(all_counts, exp_all_counts)
self.assertEqual(other_cat, 0)
self.assertEqual(red, 10)
self.assertEqual(other_frac, 0)
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac \
= get_fracs(self.counts1, 3, 10, 'area')
self.assertEqual(
fracs_labels_other, [("a;g;h", 4.0 / 10), ("d;e;f", 3.0 / 10),
('d;e;i', 2.0 / 10)])
self.assertEqual(
fracs_labels, [("a;g;h", 8.0 / 18), ("d;e;f", 6.0 / 18)])
self.assertEqual(all_counts, exp_all_counts)
self.assertEqual(other_cat, 2)
self.assertEqual(red, 9)
self.assertEqual(other_frac, 3.0 / 10)
# test bar charts
exp_all_counts = ['4', '3', '2', '1']
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac \
= get_fracs(self.counts1, 5, 10, 'bar')
self.assertEqual(
fracs_labels_other, [("a;b;c", 1.0 / 10), ("a;g;h", 4.0 / 10),
("d;e;f", 3.0 / 10), ("d;e;i", 2.0 / 10)])
self.assertEqual(fracs_labels, [])
self.assertEqual(all_counts, exp_all_counts)
self.assertEqual(other_cat, 0)
self.assertEqual(red, 10)
self.assertEqual(other_frac, 0)
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac \
= get_fracs(self.counts1, 3, 10, 'bar')
self.assertEqual(
fracs_labels_other, [("a;g;h", 4.0 / 10), ("d;e;f", 3.0 / 10),
('d;e;i', 2.0 / 10)])
self.assertEqual(
fracs_labels, [("a;g;h", 8.0 / 18), ("d;e;f", 6.0 / 18)])
self.assertEqual(all_counts, exp_all_counts)
self.assertEqual(other_cat, 2)
self.assertEqual(red, 9)
self.assertEqual(other_frac, 3.0 / 10)
self._paths_to_clean_up = ["/tmp/qiimewebfiles/charts/" + f
for f in listdir("/tmp/qiimewebfiles/charts")]
def test_make_HTML_table(self):
"""make_HTML_table should Make HTML tables for one set charts"""
# test pie charts
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \
get_fracs(self.counts1, 5, 10, 'pie')
img_data = make_HTML_table("Phylum", other_frac, 10, red, other_cat,
fracs_labels_other, fracs_labels,
self.dir_path, all_counts, 1, self.prefs,
self.color_prefs, 'black', 'white', 'pie',
'Test1',
self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi, 0,
'categorical', False)
self.assertEqual(len(img_data), 2)
# test area charts
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \
get_fracs(self.counts1, 5, 10, 'area')
img_data = make_HTML_table("Phylum", other_frac, 10, red, other_cat,
fracs_labels_other, fracs_labels,
self.dir_path, all_counts, 1, self.prefs,
self.color_prefs, 'black', 'white', 'pie',
'Test1',
self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi, 0,
'categorical', False)
self.assertEqual(len(img_data), 2)
# test bar charts
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \
get_fracs(self.counts1, 5, 10, 'bar')
img_data = make_HTML_table("Phylum", other_frac, 10, red, other_cat,
fracs_labels_other, fracs_labels,
self.dir_path, all_counts, 1, self.prefs,
self.color_prefs, 'black', 'white', 'pie',
'Test1',
self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi, 0,
'categorical', False)
self.assertEqual(len(img_data), 2)
self._paths_to_clean_up = ["/tmp/qiimewebfiles/charts/" + f
for f in listdir("/tmp/qiimewebfiles/charts")]
def test_make_pie_chart(self):
"""make_pie_chart should create HTML source and pdfs for pie_charts"""
filename1 = '/tmp/qiimewebfiles/charts/pie_chart.png'
filename2 = '/tmp/qiimewebfiles/charts/pie_chart_legend.pdf'
filename3 = '/tmp/qiimewebfiles/charts/pie_chart.pdf'
obs1, obs2, obs3, obs4 = make_pie_chart(self.fracs, self.dir_path, 1,
self.prefs, self.color_prefs, "black", "white",
self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi, False,
file_prefix="pie_chart",
props=self.props)
self.assertTrue(exists(filename1), 'The png file was not created in \
the appropriate location')
self.assertTrue(exists(filename2), 'The eps file was not created in \
the appropriate location')
self.assertTrue(exists(filename3), 'The pdf file was not created in \
the appropriate location')
self._paths_to_clean_up = ["/tmp/qiimewebfiles/charts/" + f
for f in listdir("/tmp/qiimewebfiles/charts")]
def test_make_area_bar_chart(self):
"""make_area_bar_chart should create HTML source and pdfs for area
and bar charts"""
# following is a list of files being generated
filename1 = '/tmp/qiimewebfiles/charts/area_chart.png'
filename2 = '/tmp/qiimewebfiles/charts/area_chart_legend.pdf'
filename3 = '/tmp/qiimewebfiles/charts/area_chart.pdf'
filename4 = '/tmp/qiimewebfiles/charts/bar_chart.png'
filename5 = '/tmp/qiimewebfiles/charts/bar_chart_legend.pdf'
filename6 = '/tmp/qiimewebfiles/charts/bar_chart.pdf'
# test area chart
obs1, obs2, obs3, obs4 = make_area_bar_chart(self.sample_ids,
self.area_fracs,
self.taxa, self.dir_path, 1, self.prefs,
self.color_prefs, "black", "white", "area",
self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width,
self.dpi, 0, 'categorical', False,
"area_chart")
self.assertTrue(exists(filename1), 'The png file was not created in \
the appropriate location')
self.assertTrue(exists(filename2), 'The eps file was not created in \
the appropriate location')
self.assertTrue(exists(filename3), 'The pdf file was not created in \
the appropriate location')
# test bar chart
obs1, obs2, obs3, obs4 = make_area_bar_chart(self.sample_ids,
self.area_fracs,
self.taxa, self.dir_path, 1, self.prefs,
self.color_prefs, "black", "white", "bar",
self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi,
0, 'categorical', False, "bar_chart",
self.props)
self.assertTrue(exists(filename4), 'The png file was not created in \
the appropriate location')
self.assertTrue(exists(filename5), 'The eps file was not created in \
the appropriate location')
self.assertTrue(exists(filename6), 'The pdf file was not created in \
the appropriate location')
self._paths_to_clean_up = ["/tmp/qiimewebfiles/charts/" + f
for f in listdir("/tmp/qiimewebfiles/charts")]
def test_write_html_file(self):
"Write html and make sure it gets cleaned up"""
filename1 = '/tmp/test.html'
self._paths_to_clean_up = [filename1]
write_html_file('Test', '/tmp/test.html')
self.assertTrue(exists(filename1), 'The file was not created in \
the appropriate location')
self._paths_to_clean_up = [filename1]
# run tests if called from command line
if __name__ == "__main__":
main()
| gpl-2.0 |
louispotok/pandas | pandas/util/_validators.py | 4 | 13041 | """
Module that contains many useful utilities
for validating data or function arguments
"""
import warnings
from pandas.core.dtypes.common import is_bool
def _check_arg_length(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether 'args' has length of at most 'compat_args'. Raises
a TypeError if that is not the case, similar to in Python when a
function is called with too many arguments.
"""
if max_fname_arg_count < 0:
raise ValueError("'max_fname_arg_count' must be non-negative")
if len(args) > len(compat_args):
max_arg_count = len(compat_args) + max_fname_arg_count
actual_arg_count = len(args) + max_fname_arg_count
argument = 'argument' if max_arg_count == 1 else 'arguments'
raise TypeError(
"{fname}() takes at most {max_arg} {argument} "
"({given_arg} given)".format(
fname=fname, max_arg=max_arg_count,
argument=argument, given_arg=actual_arg_count))
def _check_for_default_values(fname, arg_val_dict, compat_args):
"""
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
"""
for key in arg_val_dict:
# try checking equality directly with '=' operator,
# as comparison may have been overridden for the left
# hand object
try:
v1 = arg_val_dict[key]
v2 = compat_args[key]
# check for None-ness otherwise we could end up
# comparing a numpy array vs None
if (v1 is not None and v2 is None) or \
(v1 is None and v2 is not None):
match = False
else:
match = (v1 == v2)
if not is_bool(match):
raise ValueError("'match' is not a boolean")
# could not compare them directly, so try comparison
# using the 'is' operator
except:
match = (arg_val_dict[key] is compat_args[key])
if not match:
raise ValueError(("the '{arg}' parameter is not "
"supported in the pandas "
"implementation of {fname}()".
format(fname=fname, arg=key)))
def validate_args(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether the length of the `*args` argument passed into a function
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
fname: str
The name of the function being passed the `*args` parameter
args: tuple
The `*args` parameter passed into a function
max_fname_arg_count: int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
implementations, an ordered dictionary ensures that the original
order of the keyword arguments is enforced. Note that if there is
only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are `compat_args`
ValueError if `args` contains values that do not correspond to those
of the default values specified in `compat_args`
"""
_check_arg_length(fname, args, max_fname_arg_count, compat_args)
# We do this so that we can provide a more informative
# error message about the parameters that we are not
# supporting in the pandas implementation of 'fname'
kwargs = dict(zip(compat_args, args))
_check_for_default_values(fname, kwargs, compat_args)
def _check_for_invalid_keys(fname, kwargs, compat_args):
"""
Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one.
"""
# set(dict) --> set of the dictionary's keys
diff = set(kwargs) - set(compat_args)
if diff:
bad_arg = list(diff)[0]
raise TypeError(("{fname}() got an unexpected "
"keyword argument '{arg}'".
format(fname=fname, arg=bad_arg)))
def validate_kwargs(fname, kwargs, compat_args):
"""
Checks whether parameters passed to the **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
kwargs: dict
The `**kwargs` parameter passed into `fname`
compat_args: dict
A dictionary of keys that `kwargs` is allowed to have and their
associated default values
Raises
------
TypeError if `kwargs` contains keys not in `compat_args`
ValueError if `kwargs` contains keys in `compat_args` that do not
map to the default values specified in `compat_args`
"""
kwds = kwargs.copy()
_check_for_invalid_keys(fname, kwargs, compat_args)
_check_for_default_values(fname, kwds, compat_args)
def validate_args_and_kwargs(fname, args, kwargs,
max_fname_arg_count,
compat_args):
"""
Checks whether parameters passed to the *args and **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
args: tuple
The `*args` parameter passed into a function
kwargs: dict
The `**kwargs` parameter passed into `fname`
max_fname_arg_count: int
The minimum number of arguments that the function `fname`
requires, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys that `kwargs` is allowed to
have and their associated default values. Note that if there
is only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are
`compat_args` OR `kwargs` contains keys not in `compat_args`
ValueError if `args` contains values not at the default value (`None`)
`kwargs` contains keys in `compat_args` that do not map to the default
value as specified in `compat_args`
See Also
--------
validate_args : purely args validation
validate_kwargs : purely kwargs validation
"""
# Check that the total number of arguments passed in (i.e.
# args and kwargs) does not exceed the length of compat_args
_check_arg_length(fname, args + tuple(kwargs.values()),
max_fname_arg_count, compat_args)
# Check there is no overlap with the positional and keyword
# arguments, similar to what is done in actual Python functions
args_dict = dict(zip(compat_args, args))
for key in args_dict:
if key in kwargs:
raise TypeError("{fname}() got multiple values for keyword "
"argument '{arg}'".format(fname=fname, arg=key))
kwargs.update(args_dict)
validate_kwargs(fname, kwargs, compat_args)
def validate_bool_kwarg(value, arg_name):
""" Ensures that argument passed in arg_name is of type bool. """
if not (is_bool(value) or value is None):
raise ValueError('For argument "{arg}" expected type bool, received '
'type {typ}.'.format(arg=arg_name,
typ=type(value).__name__))
return value
def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
"""Argument handler for mixed index, columns / axis functions
In an attempt to handle both `.method(index, columns)`, and
`.method(arg, axis=.)`, we have to do some bad things to argument
parsing. This translates all arguments to `{index=., columns=.}` style.
Parameters
----------
data : DataFrame or Panel
arg : tuple
All positional arguments from the user
kwargs : dict
All keyword arguments from the user
arg_name, method_name : str
Used for better error messages
Returns
-------
kwargs : dict
A dictionary of keyword arguments. Doesn't modify ``kwargs``
inplace, so update them with the return value here.
Examples
--------
>>> df._validate_axis_style_args((str.upper,), {'columns': id},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
This emits a warning
>>> df._validate_axis_style_args((str.upper, id), {},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
"""
# TODO(PY3): Change to keyword-only args and remove all this
out = {}
# Goal: fill 'out' with index/columns-style arguments
# like out = {'index': foo, 'columns': bar}
# Start by validating for consistency
if 'axis' in kwargs and any(x in kwargs for x in data._AXIS_NUMBERS):
msg = "Cannot specify both 'axis' and any of 'index' or 'columns'."
raise TypeError(msg)
# First fill with explicit values provided by the user...
if arg_name in kwargs:
if args:
msg = ("{} got multiple values for argument "
"'{}'".format(method_name, arg_name))
raise TypeError(msg)
axis = data._get_axis_name(kwargs.get('axis', 0))
out[axis] = kwargs[arg_name]
# More user-provided arguments, now from kwargs
for k, v in kwargs.items():
try:
ax = data._get_axis_name(k)
except ValueError:
pass
else:
out[ax] = v
# All user-provided kwargs have been handled now.
# Now we supplement with positional arguments, emitting warnings
# when there's ambiguity and raising when there's conflicts
if len(args) == 0:
pass # It's up to the function to decide if this is valid
elif len(args) == 1:
axis = data._get_axis_name(kwargs.get('axis', 0))
out[axis] = args[0]
elif len(args) == 2:
if 'axis' in kwargs:
# Unambiguously wrong
msg = ("Cannot specify both 'axis' and any of 'index' "
"or 'columns'")
raise TypeError(msg)
msg = ("Interpreting call\n\t'.{method_name}(a, b)' as "
"\n\t'.{method_name}(index=a, columns=b)'.\nUse named "
"arguments to remove any ambiguity. In the future, using "
"positional arguments for 'index' or 'columns' will raise "
" a 'TypeError'.")
warnings.warn(msg.format(method_name=method_name,), FutureWarning,
stacklevel=4)
out[data._AXIS_NAMES[0]] = args[0]
out[data._AXIS_NAMES[1]] = args[1]
else:
msg = "Cannot specify all of '{}', 'index', 'columns'."
raise TypeError(msg.format(arg_name))
return out
def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True):
"""Validate the keyword arguments to 'fillna'.
This checks that exactly one of 'value' and 'method' is specified.
If 'method' is specified, this validates that it's a valid method.
Parameters
----------
value, method : object
The 'value' and 'method' keyword arguments for 'fillna'.
validate_scalar_dict_value : bool, default True
Whether to validate that 'value' is a scalar or dict. Specifically,
validate that it is not a list or tuple.
Returns
-------
value, method : object
"""
from pandas.core.missing import clean_fill_method
if value is None and method is None:
raise ValueError("Must specify a fill 'value' or 'method'.")
elif value is None and method is not None:
method = clean_fill_method(method)
elif value is not None and method is None:
if validate_scalar_dict_value and isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
elif value is not None and method is not None:
raise ValueError("Cannot specify both 'value' and 'method'.")
return value, method
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/core/common.py | 7 | 15175 | """
Misc tools for implementing data structures
"""
import sys
import warnings
from datetime import datetime, timedelta
from functools import partial
import numpy as np
import pandas.lib as lib
import pandas.tslib as tslib
from pandas import compat
from pandas.compat import long, zip, iteritems
from pandas.core.config import get_option
from pandas.types.generic import ABCSeries
from pandas.types.common import _NS_DTYPE
from pandas.types.inference import _iterable_not_string
from pandas.types.missing import isnull
from pandas.api import types
from pandas.types import common
# back-compat of public API
# deprecate these functions
m = sys.modules['pandas.core.common']
for t in [t for t in dir(types) if not t.startswith('_')]:
def outer(t=t):
def wrapper(*args, **kwargs):
warnings.warn("pandas.core.common.{t} is deprecated. "
"import from the public API: "
"pandas.api.types.{t} instead".format(t=t),
DeprecationWarning, stacklevel=3)
return getattr(types, t)(*args, **kwargs)
return wrapper
setattr(m, t, outer(t))
# back-compat for non-public functions
# deprecate these functions
for t in ['is_datetime_arraylike',
'is_datetime_or_timedelta_dtype',
'is_datetimelike',
'is_datetimelike_v_numeric',
'is_datetimelike_v_object',
'is_datetimetz',
'is_int_or_datetime_dtype',
'is_period_arraylike',
'is_string_like',
'is_string_like_dtype']:
def outer(t=t):
def wrapper(*args, **kwargs):
warnings.warn("pandas.core.common.{t} is deprecated. "
"These are not longer public API functions, "
"but can be imported from "
"pandas.types.common.{t} instead".format(t=t),
DeprecationWarning, stacklevel=3)
return getattr(common, t)(*args, **kwargs)
return wrapper
setattr(m, t, outer(t))
# deprecate array_equivalent
def array_equivalent(*args, **kwargs):
warnings.warn("'pandas.core.common.array_equivalent' is deprecated and "
"is no longer public API", DeprecationWarning, stacklevel=2)
from pandas.types import missing
return missing.array_equivalent(*args, **kwargs)
class PandasError(Exception):
pass
class PerformanceWarning(Warning):
pass
class SettingWithCopyError(ValueError):
pass
class SettingWithCopyWarning(Warning):
pass
class AmbiguousIndexError(PandasError, KeyError):
pass
class UnsupportedFunctionCall(ValueError):
pass
class AbstractMethodError(NotImplementedError):
"""Raise this error instead of NotImplementedError for abstract methods
while keeping compatibility with Python 2 and Python 3.
"""
def __init__(self, class_instance):
self.class_instance = class_instance
def __str__(self):
return ("This method must be defined in the concrete class of %s" %
self.class_instance.__class__.__name__)
def flatten(l):
"""Flatten an arbitrarily nested sequence.
Parameters
----------
l : sequence
The non string sequence to flatten
Notes
-----
This doesn't consider strings sequences.
Returns
-------
flattened : generator
"""
for el in l:
if _iterable_not_string(el):
for s in flatten(el):
yield s
else:
yield el
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
if obj.name != name:
return None
return name
def _maybe_match_name(a, b):
a_has = hasattr(a, 'name')
b_has = hasattr(b, 'name')
if a_has and b_has:
if a.name == b.name:
return a.name
else:
return None
elif a_has:
return a.name
elif b_has:
return b.name
return None
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
raise TypeError('object of type %r has no info axis' %
type(obj).__name__)
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
def _maybe_box(indexer, values, obj, key):
# if we have multiples coming back, box em
if isinstance(values, np.ndarray):
return obj[indexer.get_loc(key)]
# return the value
return values
def _maybe_box_datetimelike(value):
# turn a datetime like into a Timestamp/timedelta as needed
if isinstance(value, (np.datetime64, datetime)):
value = tslib.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslib.Timedelta(value)
return value
_values_from_object = lib.values_from_object
def is_bool_indexer(key):
if isinstance(key, (ABCSeries, np.ndarray)):
if key.dtype == np.object_:
key = np.asarray(_values_from_object(key))
if not lib.is_bool_array(key):
if isnull(key).any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
return False
return True
elif key.dtype == np.bool_:
return True
elif isinstance(key, list):
try:
arr = np.asarray(key)
return arr.dtype == np.bool_ and len(arr) == len(key)
except TypeError: # pragma: no cover
return False
return False
def _default_index(n):
from pandas.core.index import RangeIndex
return RangeIndex(0, n, name=None)
def _mut_exclusive(**kwargs):
item1, item2 = kwargs.items()
label1, val1 = item1
label2, val2 = item2
if val1 is not None and val2 is not None:
raise TypeError('mutually exclusive arguments: %r and %r' %
(label1, label2))
elif val1 is not None:
return val1
else:
return val2
def _not_none(*args):
return (arg for arg in args if arg is not None)
def _any_none(*args):
for arg in args:
if arg is None:
return True
return False
def _all_not_none(*args):
for arg in args:
if arg is None:
return False
return True
def _count_not_none(*args):
return sum(x is not None for x in args)
def _try_sort(iterable):
listed = list(iterable)
try:
return sorted(listed)
except Exception:
return listed
def iterpairs(seq):
"""
Parameters
----------
seq : sequence
Returns
-------
iterator returning overlapping pairs of elements
Examples
--------
>>> list(iterpairs([1, 2, 3, 4]))
[(1, 2), (2, 3), (3, 4)]
"""
# input may not be sliceable
seq_it = iter(seq)
seq_it_next = iter(seq)
next(seq_it_next)
return zip(seq_it, seq_it_next)
def split_ranges(mask):
""" Generates tuples of ranges which cover all True value in mask
>>> list(split_ranges([1,0,0,1,0]))
[(0, 1), (3, 4)]
"""
ranges = [(0, len(mask))]
for pos, val in enumerate(mask):
if not val: # this pos should be ommited, split off the prefix range
r = ranges.pop()
if pos > r[0]: # yield non-zero range
yield (r[0], pos)
if pos + 1 < len(mask): # save the rest for processing
ranges.append((pos + 1, len(mask)))
if ranges:
yield ranges[-1]
def _long_prod(vals):
result = long(1)
for x in vals:
result *= x
return result
class groupby(dict):
"""
A simple groupby different from the one in itertools.
Does not require the sequence elements to be sorted by keys,
however it is slower.
"""
def __init__(self, seq, key=lambda x: x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
try:
__iter__ = dict.iteritems
except AttributeError: # pragma: no cover
# Python 3
def __iter__(self):
return iter(dict.items(self))
def map_indices_py(arr):
"""
Returns a dictionary with (element, index) pairs for each element in the
given array/list
"""
return dict([(x, i) for i, x in enumerate(arr)])
def union(*seqs):
result = set([])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result |= seq
return type(seqs[0])(list(result))
def difference(a, b):
return type(a)(list(set(a) - set(b)))
def intersection(*seqs):
result = set(seqs[0])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result &= seq
return type(seqs[0])(list(result))
def _asarray_tuplesafe(values, dtype=None):
from pandas.core.index import Index
if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')):
values = list(values)
elif isinstance(values, Index):
return values.values
if isinstance(values, list) and dtype in [np.object_, object]:
return lib.list_to_object_array(values)
result = np.asarray(values, dtype=dtype)
if issubclass(result.dtype.type, compat.string_types):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
if isinstance(values, list):
return lib.list_to_object_array(values)
else:
# Making a 1D array that safely contains tuples is a bit tricky
# in numpy, leading to the following
try:
result = np.empty(len(values), dtype=object)
result[:] = values
except ValueError:
# we have a list-of-list
result[:] = [tuple(x) for x in values]
return result
def _index_labels_to_array(labels):
if isinstance(labels, (compat.string_types, tuple)):
labels = [labels]
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
except TypeError: # non-iterable
labels = [labels]
labels = _asarray_tuplesafe(labels)
return labels
def _maybe_make_list(obj):
if obj is not None and not isinstance(obj, (tuple, list)):
return [obj]
return obj
def is_null_slice(obj):
""" we have a null slice """
return (isinstance(obj, slice) and obj.start is None and
obj.stop is None and obj.step is None)
def is_full_slice(obj, l):
""" we have a full length slice """
return (isinstance(obj, slice) and obj.start == 0 and obj.stop == l and
obj.step is None)
def _get_callable_name(obj):
# typical case has name
if hasattr(obj, '__name__'):
return getattr(obj, '__name__')
# some objects don't; could recurse
if isinstance(obj, partial):
return _get_callable_name(obj.func)
# fall back to class name
if hasattr(obj, '__call__'):
return obj.__class__.__name__
# everything failed (probably because the argument
# wasn't actually callable); we return None
# instead of the empty string in this case to allow
# distinguishing between no name and a name of ''
return None
def _apply_if_callable(maybe_callable, obj, **kwargs):
"""
Evaluate possibly callable input using obj and kwargs if it is callable,
otherwise return as it is
"""
if callable(maybe_callable):
return maybe_callable(obj, **kwargs)
return maybe_callable
def _all_none(*args):
for arg in args:
if arg is not None:
return False
return True
def _where_compat(mask, arr1, arr2):
if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE:
new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8'))
return new_vals.view(_NS_DTYPE)
import pandas.tslib as tslib
if arr1.dtype == _NS_DTYPE:
arr1 = tslib.ints_to_pydatetime(arr1.view('i8'))
if arr2.dtype == _NS_DTYPE:
arr2 = tslib.ints_to_pydatetime(arr2.view('i8'))
return np.where(mask, arr1, arr2)
def _dict_compat(d):
"""
Helper function to convert datetimelike-keyed dicts to Timestamp-keyed dict
Parameters
----------
d: dict like object
Returns
-------
dict
"""
return dict((_maybe_box_datetimelike(key), value)
for key, value in iteritems(d))
def sentinel_factory():
class Sentinel(object):
pass
return Sentinel()
# ----------------------------------------------------------------------
# Detect our environment
def in_interactive_session():
""" check if we're running in an interactive shell
returns True if running under python/ipython interactive shell
"""
def check_main():
import __main__ as main
return (not hasattr(main, '__file__') or
get_option('mode.sim_interactive'))
try:
return __IPYTHON__ or check_main() # noqa
except:
return check_main()
def in_qtconsole():
"""
check if we're inside an IPython qtconsole
DEPRECATED: This is no longer needed, or working, in IPython 3 and above.
"""
try:
ip = get_ipython() # noqa
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'qtconsole' in front_end.lower():
return True
except:
return False
return False
def in_ipnb():
"""
check if we're inside an IPython Notebook
DEPRECATED: This is no longer used in pandas, and won't work in IPython 3
and above.
"""
try:
ip = get_ipython() # noqa
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'notebook' in front_end.lower():
return True
except:
return False
return False
def in_ipython_frontend():
"""
check if we're inside an an IPython zmq frontend
"""
try:
ip = get_ipython() # noqa
return 'zmq' in str(type(ip)).lower()
except:
pass
return False
def _random_state(state=None):
"""
Helper function for processing random_state arguments.
Parameters
----------
state : int, np.random.RandomState, None.
If receives an int, passes to np.random.RandomState() as seed.
If receives an np.random.RandomState object, just returns object.
If receives `None`, returns np.random.
If receives anything else, raises an informative ValueError.
Default None.
Returns
-------
np.random.RandomState
"""
if types.is_integer(state):
return np.random.RandomState(state)
elif isinstance(state, np.random.RandomState):
return state
elif state is None:
return np.random
else:
raise ValueError("random_state must be an integer, a numpy "
"RandomState, or None")
| gpl-3.0 |
Bodidze/21v-python | unit_04/22.py | 2 | 2171 | # -*- coding:utf-8 -*-
def read_file(filename):
infile = open(filename, 'r')
infile.readline() # читаем заголовки столбцов
dates = []; prices = []
for line in infile:
columns = line.split(',') # разделяем по запятой
date = columns[0]
date = date[:-3] # пропускаем день месяца (три последних цифры)
price = columns[-1] # нам нужен только последний столбец
dates.append(date)
prices.append(float(price)) # не забываем конвертировать
infile.close()
dates.reverse() # возвращаем порядок: от более старых к новым
prices.reverse() # и соответственно цены
return dates, prices
dates = {}; prices = {}
d, p = read_file('stockprice_sun.csv')
dates['Sun'] = d; prices['Sun'] = p
d, p = read_file('stockprice_microsoft.csv')
dates['MS'] = d; prices['MS'] = p
d, p = read_file('stockprice_google.csv')
dates['Google'] = d; prices['Google'] = p
data = {'prices': prices, 'dates': dates}
# нормировка цен:
norm_price = prices['Sun'][0]
prices['Sun'] = [p/norm_price for p in prices['Sun']]
norm_price = prices['MS'][0]
prices['MS'] = [p/norm_price for p in prices['MS']]
jan15_MS = prices['MS'][dates['MS'].index('2015-01')]
jan15_Sun = prices['Sun'][dates['Sun'].index('2015-01')]
norm_price = prices['Google'][0]/max(jan15_MS, jan15_Sun)
prices['Google'] = [p/norm_price for p in prices['Google']]
# обозначаем "x" точки для построения графиков
x = {}
x['Sun'] = range(len(prices['Sun']))
x['MS'] = range(len(prices['MS']))
# для Google мы должны начать с января 2015:
jan15 = dates['Sun'].index('2015-01')
x['Google'] = range(jan15, jan15 + len(prices['Google']), 1)
import matplotlib.pyplot as plt
plt.plot(x['MS'], prices['MS'], 'g-')
plt.plot(x['Sun'], prices['Sun'], 'b-')
plt.plot(x['Google'], prices['Google'], 'r-')
plt.legend(['Microsoft', 'Sun', 'Google'], loc=0)
plt.grid()
plt.show()
| mit |
equialgo/scikit-learn | sklearn/decomposition/tests/test_pca.py | 14 | 20935 | import numpy as np
import scipy as sp
from itertools import product
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_less
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
solver_list = ['full', 'arpack', 'randomized', 'auto']
def test_pca():
# PCA on dense arrays
X = iris.data
for n_comp in np.arange(X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='full')
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
# test explained_variance_ratio_ == 1 with all components
pca = PCA(svd_solver='full')
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
def test_pca_arpack_solver():
# PCA on dense arrays
X = iris.data
d = X.shape[1]
# Loop excluding the extremes, invalid inputs for arpack
for n_comp in np.arange(1, d):
pca = PCA(n_components=n_comp, svd_solver='arpack', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(d), 12)
pca = PCA(n_components=0, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
pca = PCA(n_components=d, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
assert_equal(pca.n_components,
PCA(n_components=d,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
def test_pca_randomized_solver():
# PCA on dense arrays
X = iris.data
# Loop excluding the 0, invalid for randomized
for n_comp in np.arange(1, X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='randomized', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='randomized', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='randomized', random_state=0).svd_solver)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggered it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaining 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_greater(X.std(axis=0).std(), 43.8)
for solver, copy in product(solver_list, (True, False)):
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = PCA(n_components=n_components, whiten=True, copy=copy,
svd_solver=solver, random_state=0, iterated_power=7)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components),
decimal=6)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = PCA(n_components=n_components, whiten=False, copy=copy,
svd_solver=solver).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full').fit(X)
apca = PCA(n_components=2, svd_solver='arpack', random_state=0).fit(X)
assert_array_almost_equal(pca.explained_variance_,
apca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
apca.explained_variance_ratio_, 3)
rpca = PCA(n_components=2, svd_solver='randomized', random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_pca = apca.transform(X)
assert_array_almost_equal(apca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0),
decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_singular_values():
# Check that the PCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full',
random_state=rng).fit(X)
apca = PCA(n_components=2, svd_solver='arpack',
random_state=rng).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.singular_values_, apca.singular_values_, 12)
assert_array_almost_equal(pca.singular_values_, rpca.singular_values_, 1)
assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 1)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_apca = apca.transform(X)
X_rpca = rpca.transform(X)
assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(apca.singular_values_**2.0),
np.linalg.norm(X_apca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(rpca.singular_values_**2.0),
np.linalg.norm(X_rpca, "fro")**2.0, 0)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(pca.singular_values_,
np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
assert_array_almost_equal(apca.singular_values_,
np.sqrt(np.sum(X_apca**2.0, axis=0)), 12)
assert_array_almost_equal(rpca.singular_values_,
np.sqrt(np.sum(X_rpca**2.0, axis=0)), 2)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=3, svd_solver='full', random_state=rng)
apca = PCA(n_components=3, svd_solver='arpack', random_state=rng)
rpca = PCA(n_components=3, svd_solver='randomized', random_state=rng)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
apca.fit(X_hat)
rpca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
for solver in solver_list:
Yt = PCA(n_components=2, svd_solver=solver).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='full').fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
for solver in solver_list:
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for solver in solver_list:
for n_components in [-1, 3]:
assert_raises(ValueError,
PCA(n_components, svd_solver=solver).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by randomized PCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2, svd_solver='randomized',
random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by randomized PCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = PCA(n_components=1, svd_solver='randomized',
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that randomized PCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='randomized', random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True, svd_solver='randomized',
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_less(relative_max_delta, 1e-5)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle', svd_solver='full').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) +
np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5, svd_solver='full').fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives different scores if whiten=True
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
ll2 = pca.score(X)
assert_true(ll1 > ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k, svd_solver='full')
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
def test_svd_solver_auto():
rng = np.random.RandomState(0)
X = rng.uniform(size=(1000, 50))
# case: n_components in (0,1) => 'full'
pca = PCA(n_components=.5)
pca.fit(X)
pca_test = PCA(n_components=.5, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: max(X.shape) <= 500 => 'full'
pca = PCA(n_components=5, random_state=0)
Y = X[:10, :]
pca.fit(Y)
pca_test = PCA(n_components=5, svd_solver='full', random_state=0)
pca_test.fit(Y)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: n_components >= .8 * min(X.shape) => 'full'
pca = PCA(n_components=50)
pca.fit(X)
pca_test = PCA(n_components=50, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# n_components >= 1 and n_components < .8 * min(X.shape) => 'randomized'
pca = PCA(n_components=10, random_state=0)
pca.fit(X)
pca_test = PCA(n_components=10, svd_solver='randomized', random_state=0)
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
def test_deprecation_randomized_pca():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
depr_message = ("Class RandomizedPCA is deprecated; RandomizedPCA was "
"deprecated in 0.18 and will be "
"removed in 0.20. Use PCA(svd_solver='randomized') "
"instead. The new implementation DOES NOT store "
"whiten ``components_``. Apply transform to get them.")
def fit_deprecated(X):
global Y
rpca = RandomizedPCA(random_state=0)
Y = rpca.fit_transform(X)
assert_warns_message(DeprecationWarning, depr_message, fit_deprecated, X)
Y_pca = PCA(svd_solver='randomized', random_state=0).fit_transform(X)
assert_array_almost_equal(Y, Y_pca)
def test_pca_spase_input():
X = np.random.RandomState(0).rand(5, 4)
X = sp.sparse.csr_matrix(X)
assert(sp.sparse.issparse(X))
for svd_solver in solver_list:
pca = PCA(n_components=3, svd_solver=svd_solver)
assert_raises(TypeError, pca.fit, X)
| bsd-3-clause |
b1quint/samfp | samfp/old/x3d_animate.py | 1 | 2479 | #!/usr/bin/env python
# -*- coding: utf8 -*-
from __future__ import division, print_function
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams['image.origin'] = 'lower'
mpl.rcParams['image.interpolation'] = 'nearest'
mpl.rcParams['image.cmap'] = 'gray_r'
from scipy import ndimage
from matplotlib import animation
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
import astropy.visualization as viz
import numpy as np
import os
__author__ = 'Bruno Quint'
def main():
# Input Data ---
path = "/home/bquint/Dropbox/temp/30Dor_NII/"
filename = "30DorNII_3D_WCL.fits"
# Create/Read Cube ---
cube = DataCube(os.path.join(path, filename))
print(cube)
# Create figure to animate ---
fig = plt.figure(figsize=(5,5), frameon=False)
ax = fig.add_axes([0,0,1,1])
ax.axis('off')
n1 = viz.ImageNormalize(cube.get_frame(0), interval=viz.ZScaleInterval())
n2 = viz.ImageNormalize(cube.get_frame(20), interval=viz.ZScaleInterval())
im = ax.imshow(cube.get_frame(0), animated=True, vmin=n1.vmin, vmax=n2.vmax)
def update(i):
im.set_array(cube.get_frame(i))
return im,
ani = animation.FuncAnimation(fig, update, interval=100, repeat=True, frames=cube.depth)
ani.save(cube.filename.replace('.fits', '.gif'), writer="imagemagick", fps=30)
plt.show()
class DataCube(object):
def __init__(self, filename):
cube = pyfits.getdata(filename)
self.min = cube.min()
self.max = cube.max()
self.mean = cube.mean()
self.std = cube.std()
self.median = np.median(cube)
self.filename = filename
self.depth, self.heigh, self.width = cube.shape
del cube
return
def __str__(self):
s = (
"Cube Description: \n"
" Min = {min:.2f}\n"
" Max = {max:.2f}\n"
" Mean = {mean:.2f}\n"
" Median = {median:.2f}\n"
" STD = {std:.2f}\n"
).format(**self.__dict__)
return s
def get_frame(self, i):
temp = pyfits.getdata(self.filename)[i]
temp = ndimage.median_filter(temp, 3)
return temp
def get_cube(self):
return pyfits.getdata(self.filename)
def get_max_frame(self):
temp = pyfits.getdata(self.filename)
temp = np.sum(temp, axis=2)
temp = np.sum(temp, axis=1)
temp = np.argmax(temp)
return temp
if __name__ == '__main__':
main()
| bsd-3-clause |
pearsonlab/thunder | doc/source/conf.py | 7 | 8102 | # -*- coding: utf-8 -*-
#
# Thunder documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 16 17:00:45 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import matplotlib as mpl
import sphinx_rtd_theme
mpl.use("Agg")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.viewcode',
]
# Generate the API documentation when building
autosummary_generate = True
numpydoc_show_class_members = False
autodoc_default_flags = ['members','imported-members']
autoclass_content = 'class'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Thunder'
copyright = u'2014, Jeremy Freeman'
html_logo = "header-logo-small.svg"
html_show_sphinx = False
html_show_copyright = False
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
sys.path.insert(0, os.path.abspath(os.path.pardir))
import thunder
version = thunder.__version__
# The full version, including alpha/beta/rc tags.
release = thunder.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_templates']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_favicon = "favicon.ico"
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Thunderdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Thunder.tex', u'Thunder Documentation',
u'Jeremy Freeman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'thunder', u'Thunder Documentation',
[u'Jeremy Freeman'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Thunder', u'Thunder Documentation',
u'Jeremy Freeman', 'Thunder', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| apache-2.0 |
hainm/statsmodels | statsmodels/sandbox/panel/mixed.py | 31 | 21019 | """
Mixed effects models
Author: Jonathan Taylor
Author: Josef Perktold
License: BSD-3
Notes
------
It's pretty slow if the model is misspecified, in my first example convergence
in loglike is not reached within 2000 iterations. Added stop criteria based
on convergence of parameters instead.
With correctly specified model, convergence is fast, in 6 iterations in
example.
"""
from __future__ import print_function
import numpy as np
import numpy.linalg as L
from statsmodels.base.model import LikelihoodModelResults
from statsmodels.tools.decorators import cache_readonly
class Unit(object):
"""
Individual experimental unit for
EM implementation of (repeated measures)
mixed effects model.
\'Maximum Likelihood Computations with Repeated Measures:
Application of the EM Algorithm\'
Nan Laird; Nicholas Lange; Daniel Stram
Journal of the American Statistical Association,
Vol. 82, No. 397. (Mar., 1987), pp. 97-105.
Parameters
----------
endog : ndarray, (nobs,)
response, endogenous variable
exog_fe : ndarray, (nobs, k_vars_fe)
explanatory variables as regressors or fixed effects,
should include exog_re to correct mean of random
coefficients, see Notes
exog_re : ndarray, (nobs, k_vars_re)
explanatory variables or random effects or coefficients
Notes
-----
If the exog_re variables are not included in exog_fe, then the
mean of the random constants or coefficients are not centered.
The covariance matrix of the random parameter estimates are not
centered in this case. (That's how it looks to me. JP)
"""
def __init__(self, endog, exog_fe, exog_re):
self.Y = endog
self.X = exog_fe
self.Z = exog_re
self.n = endog.shape[0]
def _compute_S(self, D, sigma):
"""covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.3) from Laird, Lange, Stram (see help(Unit))
"""
self.S = (np.identity(self.n) * sigma**2 +
np.dot(self.Z, np.dot(D, self.Z.T)))
def _compute_W(self):
"""inverse covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.2) from Laird, Lange, Stram (see help(Unit))
"""
self.W = L.inv(self.S)
def compute_P(self, Sinv):
"""projection matrix (nobs_i, nobs_i) (M in regression ?) (JP check, guessing)
Display (3.10) from Laird, Lange, Stram (see help(Unit))
W - W X Sinv X' W'
"""
t = np.dot(self.W, self.X)
self.P = self.W - np.dot(np.dot(t, Sinv), t.T)
def _compute_r(self, alpha):
"""residual after removing fixed effects
Display (3.5) from Laird, Lange, Stram (see help(Unit))
"""
self.r = self.Y - np.dot(self.X, alpha)
def _compute_b(self, D):
"""coefficients for random effects/coefficients
Display (3.4) from Laird, Lange, Stram (see help(Unit))
D Z' W r
"""
self.b = np.dot(D, np.dot(np.dot(self.Z.T, self.W), self.r))
def fit(self, a, D, sigma):
"""
Compute unit specific parameters in
Laird, Lange, Stram (see help(Unit)).
Displays (3.2)-(3.5).
"""
self._compute_S(D, sigma) #random effect plus error covariance
self._compute_W() #inv(S)
self._compute_r(a) #residual after removing fixed effects/exogs
self._compute_b(D) #? coefficients on random exog, Z ?
def compute_xtwy(self):
"""
Utility function to compute X^tWY (transposed ?) for Unit instance.
"""
return np.dot(np.dot(self.W, self.Y), self.X) #is this transposed ?
def compute_xtwx(self):
"""
Utility function to compute X^tWX for Unit instance.
"""
return np.dot(np.dot(self.X.T, self.W), self.X)
def cov_random(self, D, Sinv=None):
"""
Approximate covariance of estimates of random effects. Just after
Display (3.10) in Laird, Lange, Stram (see help(Unit)).
D - D' Z' P Z D
Notes
-----
In example where the mean of the random coefficient is not zero, this
is not a covariance but a non-centered moment. (proof by example)
"""
if Sinv is not None:
self.compute_P(Sinv)
t = np.dot(self.Z, D)
return D - np.dot(np.dot(t.T, self.P), t)
def logL(self, a, ML=False):
"""
Individual contributions to the log-likelihood, tries to return REML
contribution by default though this requires estimated
fixed effect a to be passed as an argument.
no constant with pi included
a is not used if ML=true (should be a=None in signature)
If ML is false, then the residuals are calculated for the given fixed
effects parameters a.
"""
if ML:
return (np.log(L.det(self.W)) - (self.r * np.dot(self.W, self.r)).sum()) / 2.
else:
if a is None:
raise ValueError('need fixed effect a for REML contribution to log-likelihood')
r = self.Y - np.dot(self.X, a)
return (np.log(L.det(self.W)) - (r * np.dot(self.W, r)).sum()) / 2.
def deviance(self, ML=False):
'''deviance defined as 2 times the negative loglikelihood
'''
return - 2 * self.logL(ML=ML)
class OneWayMixed(object):
"""
Model for
EM implementation of (repeated measures)
mixed effects model.
\'Maximum Likelihood Computations with Repeated Measures:
Application of the EM Algorithm\'
Nan Laird; Nicholas Lange; Daniel Stram
Journal of the American Statistical Association,
Vol. 82, No. 397. (Mar., 1987), pp. 97-105.
Parameters
----------
units : list of units
the data for the individual units should be attached to the units
response, fixed and random : formula expression, called as argument to Formula
*available results and alias*
(subject to renaming, and coversion to cached attributes)
params() -> self.a : coefficient for fixed effects or exog
cov_params() -> self.Sinv : covariance estimate of fixed effects/exog
bse() : standard deviation of params
cov_random -> self.D : estimate of random effects covariance
params_random_units -> [self.units[...].b] : random coefficient for each unit
*attributes*
(others)
self.m : number of units
self.p : k_vars_fixed
self.q : k_vars_random
self.N : nobs (total)
Notes
-----
Fit returns a result instance, but not all results that use the inherited
methods have been checked.
Parameters need to change: drop formula and we require a naming convention for
the units (currently Y,X,Z). - endog, exog_fe, endog_re ?
logL does not include constant, e.g. sqrt(pi)
llf is for MLE not for REML
convergence criteria for iteration
Currently convergence in the iterative solver is reached if either the loglikelihood
*or* the fixed effects parameter don't change above tolerance.
In some examples, the fixed effects parameters converged to 1e-5 within 150 iterations
while the log likelihood did not converge within 2000 iterations. This might be
the case if the fixed effects parameters are well estimated, but there are still
changes in the random effects. If params_rtol and params_atol are set at a higher
level, then the random effects might not be estimated to a very high precision.
The above was with a misspecified model, without a constant. With a
correctly specified model convergence is fast, within a few iterations
(6 in example).
"""
def __init__(self, units):
self.units = units
self.m = len(self.units)
self.n_units = self.m
self.N = sum(unit.X.shape[0] for unit in self.units)
self.nobs = self.N #alias for now
# Determine size of fixed effects
d = self.units[0].X
self.p = d.shape[1] # d.shape = p
self.k_exog_fe = self.p #alias for now
self.a = np.zeros(self.p, np.float64)
# Determine size of D, and sensible initial estimates
# of sigma and D
d = self.units[0].Z
self.q = d.shape[1] # Z.shape = q
self.k_exog_re = self.q #alias for now
self.D = np.zeros((self.q,)*2, np.float64)
self.sigma = 1.
self.dev = np.inf #initialize for iterations, move it?
def _compute_a(self):
"""fixed effects parameters
Display (3.1) of
Laird, Lange, Stram (see help(Mixed)).
"""
for unit in self.units:
unit.fit(self.a, self.D, self.sigma)
S = sum([unit.compute_xtwx() for unit in self.units])
Y = sum([unit.compute_xtwy() for unit in self.units])
self.Sinv = L.pinv(S)
self.a = np.dot(self.Sinv, Y)
def _compute_sigma(self, ML=False):
"""
Estimate sigma. If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.6) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.8).
sigma is the standard deviation of the noise (residual)
"""
sigmasq = 0.
for unit in self.units:
if ML:
W = unit.W
else:
unit.compute_P(self.Sinv)
W = unit.P
t = unit.r - np.dot(unit.Z, unit.b)
sigmasq += np.power(t, 2).sum()
sigmasq += self.sigma**2 * np.trace(np.identity(unit.n) -
self.sigma**2 * W)
self.sigma = np.sqrt(sigmasq / self.N)
def _compute_D(self, ML=False):
"""
Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.7) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.9).
"""
D = 0.
for unit in self.units:
if ML:
W = unit.W
else:
unit.compute_P(self.Sinv)
W = unit.P
D += np.multiply.outer(unit.b, unit.b)
t = np.dot(unit.Z, self.D)
D += self.D - np.dot(np.dot(t.T, W), t)
self.D = D / self.m
def cov_fixed(self):
"""
Approximate covariance of estimates of fixed effects.
Just after Display (3.10) in Laird, Lange, Stram (see help(Mixed)).
"""
return self.Sinv
#----------- alias (JP) move to results class ?
def cov_random(self):
"""
Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma, else return the REML estimate.
see _compute_D, alias for self.D
"""
return self.D
@property
def params(self):
'''
estimated coefficients for exogeneous variables or fixed effects
see _compute_a, alias for self.a
'''
return self.a
@property
def params_random_units(self):
'''random coefficients for each unit
'''
return np.array([unit.b for unit in self.units])
def cov_params(self):
'''
estimated covariance for coefficients for exogeneous variables or fixed effects
see cov_fixed, and Sinv in _compute_a
'''
return self.cov_fixed()
@property
def bse(self):
'''
standard errors of estimated coefficients for exogeneous variables (fixed)
'''
return np.sqrt(np.diag(self.cov_params()))
#----------- end alias
def deviance(self, ML=False):
'''deviance defined as 2 times the negative loglikelihood
'''
return -2 * self.logL(ML=ML)
def logL(self, ML=False):
"""
Return log-likelihood, REML by default.
"""
#I don't know what the difference between REML and ML is here.
logL = 0.
for unit in self.units:
logL += unit.logL(a=self.a, ML=ML)
if not ML:
logL += np.log(L.det(self.Sinv)) / 2
return logL
def initialize(self):
S = sum([np.dot(unit.X.T, unit.X) for unit in self.units])
Y = sum([np.dot(unit.X.T, unit.Y) for unit in self.units])
self.a = L.lstsq(S, Y)[0]
D = 0
t = 0
sigmasq = 0
for unit in self.units:
unit.r = unit.Y - np.dot(unit.X, self.a)
if self.q > 1:
unit.b = L.lstsq(unit.Z, unit.r)[0]
else:
Z = unit.Z.reshape((unit.Z.shape[0], 1))
unit.b = L.lstsq(Z, unit.r)[0]
sigmasq += (np.power(unit.Y, 2).sum() -
(self.a * np.dot(unit.X.T, unit.Y)).sum() -
(unit.b * np.dot(unit.Z.T, unit.r)).sum())
D += np.multiply.outer(unit.b, unit.b)
t += L.pinv(np.dot(unit.Z.T, unit.Z))
#TODO: JP added df_resid check
self.df_resid = (self.N - (self.m - 1) * self.q - self.p)
sigmasq /= (self.N - (self.m - 1) * self.q - self.p)
self.sigma = np.sqrt(sigmasq)
self.D = (D - sigmasq * t) / self.m
def cont(self, ML=False, rtol=1.0e-05, params_rtol=1e-5, params_atol=1e-4):
'''convergence check for iterative estimation
'''
self.dev, old = self.deviance(ML=ML), self.dev
#self.history.append(np.hstack((self.dev, self.a)))
self.history['llf'].append(self.dev)
self.history['params'].append(self.a.copy())
self.history['D'].append(self.D.copy())
if np.fabs((self.dev - old) / self.dev) < rtol: #why is there times `*`?
#print np.fabs((self.dev - old)), self.dev, old
self.termination = 'llf'
return False
#break if parameters converged
#TODO: check termination conditions, OR or AND
if np.all(np.abs(self.a - self._a_old) < (params_rtol * self.a + params_atol)):
self.termination = 'params'
return False
self._a_old = self.a.copy()
return True
def fit(self, maxiter=100, ML=False, rtol=1.0e-05, params_rtol=1e-6, params_atol=1e-6):
#initialize for convergence criteria
self._a_old = np.inf * self.a
self.history = {'llf':[], 'params':[], 'D':[]}
for i in range(maxiter):
self._compute_a() #a, Sinv : params, cov_params of fixed exog
self._compute_sigma(ML=ML) #sigma MLE or REML of sigma ?
self._compute_D(ML=ML) #D : covariance of random effects, MLE or REML
if not self.cont(ML=ML, rtol=rtol, params_rtol=params_rtol,
params_atol=params_atol):
break
else: #if end of loop is reached without break
self.termination = 'maxiter'
print('Warning: maximum number of iterations reached')
self.iterations = i
results = OneWayMixedResults(self)
#compatibility functions for fixed effects/exog
results.scale = 1
results.normalized_cov_params = self.cov_params()
return results
class OneWayMixedResults(LikelihoodModelResults):
'''Results class for OneWayMixed models
'''
def __init__(self, model):
#TODO: check, change initialization to more standard pattern
self.model = model
self.params = model.params
#need to overwrite this because we don't have a standard
#model.loglike yet
#TODO: what todo about REML loglike, logL is not normalized
@cache_readonly
def llf(self):
return self.model.logL(ML=True)
@property
def params_random_units(self):
return self.model.params_random_units
def cov_random(self):
return self.model.cov_random()
def mean_random(self, idx='lastexog'):
if idx == 'lastexog':
meanr = self.params[-self.model.k_exog_re:]
elif isinstance(idx, list):
if not len(idx) == self.model.k_exog_re:
raise ValueError('length of idx different from k_exog_re')
else:
meanr = self.params[idx]
else:
meanr = np.zeros(self.model.k_exog_re)
return meanr
def std_random(self):
return np.sqrt(np.diag(self.cov_random()))
def plot_random_univariate(self, bins=None, use_loc=True):
'''create plot of marginal distribution of random effects
Parameters
----------
bins : int or bin edges
option for bins in matplotlibs hist method. Current default is not
very sophisticated. All distributions use the same setting for
bins.
use_loc : bool
If True, then the distribution with mean given by the fixed
effect is used.
Returns
-------
fig : matplotlib figure instance
figure with subplots
Notes
-----
What can make this fancier?
Bin edges will not make sense if loc or scale differ across random
effect distributions.
'''
#outsource this
import matplotlib.pyplot as plt
from scipy.stats import norm as normal
fig = plt.figure()
k = self.model.k_exog_re
if k > 3:
rows, cols = int(np.ceil(k * 0.5)), 2
else:
rows, cols = k, 1
if bins is None:
#bins = self.model.n_units // 20 #TODO: just roughly, check
# bins = np.sqrt(self.model.n_units)
bins = 5 + 2 * self.model.n_units**(1./3.)
if use_loc:
loc = self.mean_random()
else:
loc = [0]*k
scale = self.std_random()
for ii in range(k):
ax = fig.add_subplot(rows, cols, ii)
freq, bins_, _ = ax.hist(loc[ii] + self.params_random_units[:,ii],
bins=bins, normed=True)
points = np.linspace(bins_[0], bins_[-1], 200)
#ax.plot(points, normal.pdf(points, loc=loc, scale=scale))
#loc of sample is approx. zero, with Z appended to X
#alternative, add fixed to mean
ax.set_title('Random Effect %d Marginal Distribution' % ii)
ax.plot(points,
normal.pdf(points, loc=loc[ii], scale=scale[ii]),
'r')
return fig
def plot_scatter_pairs(self, idx1, idx2, title=None, ax=None):
'''create scatter plot of two random effects
Parameters
----------
idx1, idx2 : int
indices of the two random effects to display, corresponding to
columns of exog_re
title : None or string
If None, then a default title is added
ax : None or matplotlib axis instance
If None, then a figure with one axis is created and returned.
If ax is not None, then the scatter plot is created on it, and
this axis instance is returned.
Returns
-------
ax_or_fig : axis or figure instance
see ax parameter
Notes
-----
Still needs ellipse from estimated parameters
'''
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax_or_fig = fig
re1 = self.params_random_units[:,idx1]
re2 = self.params_random_units[:,idx2]
ax.plot(re1, re2, 'o', alpha=0.75)
if title is None:
title = 'Random Effects %d and %d' % (idx1, idx2)
ax.set_title(title)
ax_or_fig = ax
return ax_or_fig
def plot_scatter_all_pairs(self, title=None):
from statsmodels.graphics.plot_grids import scatter_ellipse
if self.model.k_exog_re < 2:
raise ValueError('less than two variables available')
return scatter_ellipse(self.params_random_units,
ell_kwds={'color':'r'})
#ell_kwds not implemented yet
# #note I have written this already as helper function, get it
# import matplotlib.pyplot as plt
# #from scipy.stats import norm as normal
# fig = plt.figure()
# k = self.model.k_exog_re
# n_plots = k * (k - 1) // 2
# if n_plots > 3:
# rows, cols = int(np.ceil(n_plots * 0.5)), 2
# else:
# rows, cols = n_plots, 1
#
# count = 1
# for ii in range(k):
# for jj in range(ii):
# ax = fig.add_subplot(rows, cols, count)
# self.plot_scatter_pairs(ii, jj, title=None, ax=ax)
# count += 1
#
# return fig
if __name__ == '__main__':
#see examples/ex_mixed_lls_1.py
pass
| bsd-3-clause |
rseubert/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
SSQ/Udacity-MLND-P1-Model-Evaluation-and-Validation | visuals.py | 19 | 5012 | ###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import matplotlib.pyplot as pl
import numpy as np
import sklearn.learning_curve as curves
from sklearn.tree import DecisionTreeRegressor
from sklearn.cross_validation import ShuffleSplit, train_test_split
def ModelLearning(X, y):
""" Calculates the performance of several models with varying sizes of training data.
The learning and testing scores for each model are then plotted. """
# Create 10 cross-validation sets for training and testing
cv = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.2, random_state = 0)
# Generate the training set sizes increasing by 50
train_sizes = np.rint(np.linspace(1, X.shape[0]*0.8 - 1, 9)).astype(int)
# Create the figure window
fig = pl.figure(figsize=(10,7))
# Create three different models based on max_depth
for k, depth in enumerate([1,3,6,10]):
# Create a Decision tree regressor at max_depth = depth
regressor = DecisionTreeRegressor(max_depth = depth)
# Calculate the training and testing scores
sizes, train_scores, test_scores = curves.learning_curve(regressor, X, y, \
cv = cv, train_sizes = train_sizes, scoring = 'r2')
# Find the mean and standard deviation for smoothing
train_std = np.std(train_scores, axis = 1)
train_mean = np.mean(train_scores, axis = 1)
test_std = np.std(test_scores, axis = 1)
test_mean = np.mean(test_scores, axis = 1)
# Subplot the learning curve
ax = fig.add_subplot(2, 2, k+1)
ax.plot(sizes, train_mean, 'o-', color = 'r', label = 'Training Score')
ax.plot(sizes, test_mean, 'o-', color = 'g', label = 'Testing Score')
ax.fill_between(sizes, train_mean - train_std, \
train_mean + train_std, alpha = 0.15, color = 'r')
ax.fill_between(sizes, test_mean - test_std, \
test_mean + test_std, alpha = 0.15, color = 'g')
# Labels
ax.set_title('max_depth = %s'%(depth))
ax.set_xlabel('Number of Training Points')
ax.set_ylabel('Score')
ax.set_xlim([0, X.shape[0]*0.8])
ax.set_ylim([-0.05, 1.05])
# Visual aesthetics
ax.legend(bbox_to_anchor=(1.05, 2.05), loc='lower left', borderaxespad = 0.)
fig.suptitle('Decision Tree Regressor Learning Performances', fontsize = 16, y = 1.03)
fig.tight_layout()
fig.show()
def ModelComplexity(X, y):
""" Calculates the performance of the model as model complexity increases.
The learning and testing errors rates are then plotted. """
# Create 10 cross-validation sets for training and testing
cv = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.2, random_state = 0)
# Vary the max_depth parameter from 1 to 10
max_depth = np.arange(1,11)
# Calculate the training and testing scores
train_scores, test_scores = curves.validation_curve(DecisionTreeRegressor(), X, y, \
param_name = "max_depth", param_range = max_depth, cv = cv, scoring = 'r2')
# Find the mean and standard deviation for smoothing
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
# Plot the validation curve
pl.figure(figsize=(7, 5))
pl.title('Decision Tree Regressor Complexity Performance')
pl.plot(max_depth, train_mean, 'o-', color = 'r', label = 'Training Score')
pl.plot(max_depth, test_mean, 'o-', color = 'g', label = 'Validation Score')
pl.fill_between(max_depth, train_mean - train_std, \
train_mean + train_std, alpha = 0.15, color = 'r')
pl.fill_between(max_depth, test_mean - test_std, \
test_mean + test_std, alpha = 0.15, color = 'g')
# Visual aesthetics
pl.legend(loc = 'lower right')
pl.xlabel('Maximum Depth')
pl.ylabel('Score')
pl.ylim([-0.05,1.05])
pl.show()
def PredictTrials(X, y, fitter, data):
""" Performs trials of fitting and predicting data. """
# Store the predicted prices
prices = []
for k in range(10):
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, \
test_size = 0.2, random_state = k)
# Fit the data
reg = fitter(X_train, y_train)
# Make a prediction
pred = reg.predict([data[0]])[0]
prices.append(pred)
# Result
print "Trial {}: ${:,.2f}".format(k+1, pred)
# Display price range
print "\nRange in prices: ${:,.2f}".format(max(prices) - min(prices)) | mit |
shenzebang/scikit-learn | sklearn/feature_selection/rfe.py | 137 | 17066 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
haphaeu/yoshimi | plot_clipboard.py | 1 | 1075 | # -*- coding: utf-8 -*-
"""
Plot data from clipboard.
Use:
1. Select the data as text or spreadsheet and copy them into the clipboard
3. Run the lines below "pd.read_clipboard()" for each variable you want.
This will import the data into a Pandas DataFrame. If the header row
is also copied into clipboard, it will be the name of the dataframe column
with the data.
4. Plot them using pyplot by selecting the columns you want plotted.
To import data from Orcaflex, just extract data as values and copy it.
This script is not supposed to be run. Import from clipboard should be executed
line by line.
Created on Fri Nov 3 08:13:17 2017
@author: rarossi
"""
# %%
import pandas as pd
from matplotlib import pyplot as plt
# %%
input('Copy data to clipboard and press any key\n')
correct = pd.read_clipboard()
input('Copy data to clipboard and press any key\n')
wrong = pd.read_clipboard()
# %%
plt.plot(correct.Time, correct.Tension, label='correct')
plt.plot(wrong.Time, wrong.Tension, label='wrong')
plt.grid()
plt.legend(loc='best')
| lgpl-3.0 |
AlexanderFabisch/scikit-learn | sklearn/tree/tests/test_tree.py | 13 | 52365 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_decision_path, name)
| bsd-3-clause |
abhishekkrthakur/scikit-learn | sklearn/hmm.py | 12 | 48722 | # Hidden Markov Models
#
# Author: Ron Weiss <[email protected]>
# and Shiqiao Du <[email protected]>
# API changes: Jaques Grobler <[email protected]>
"""
The :mod:`sklearn.hmm` module implements hidden Markov models.
**Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known
numerical stability issues. This module will be removed in version 0.17.
It has been moved to a separate repository:
https://github.com/hmmlearn/hmmlearn
"""
import string
import numpy as np
from .utils import check_random_state, deprecated
from .utils.extmath import logsumexp
from .utils.validation import check_is_fitted
from .base import BaseEstimator
from .mixture import (
GMM, log_multivariate_normal_density, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from . import cluster
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
@deprecated("WARNING: The HMM module and its functions will be removed in 0.17 "
"as it no longer falls within the project's scope and API. "
"It has been moved to a separate repository: "
"https://github.com/hmmlearn/hmmlearn")
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
WARNING: The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
@deprecated("WARNING: The HMM module and its function will be removed in 0.17"
"as it no longer falls within the project's scope and API. "
"It has been moved to a separate repository: "
"https://github.com/hmmlearn/hmmlearn")
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float32).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM.
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
_, posteriors = self.score_samples(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
self._init(obs, self.init_params)
logprob = []
for i in range(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_components, n_components))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lnP, lneta)
stats['trans'] += np.exp(np.minimum(logsumexp(lneta, 0), 700))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
n_components : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
means : array, shape (`n_components`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_components`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GaussianHMM
>>> GaussianHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_components=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_prior = means_prior
self.means_weight = means_weight
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_components, self.n_features)):
raise ValueError('means must have shape '
'(n_components, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_components
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_components)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
check_is_fitted(self, '_means_')
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if (hasattr(self, 'n_features')
and self.n_features != obs[0].shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (obs[0].shape[1],
self.n_features))
self.n_features = obs[0].shape[1]
if 'm' in params:
self._means_ = cluster.KMeans(
n_clusters=self.n_components).fit(obs[0]).cluster_centers_
if 'c' in params:
cv = np.cov(obs[0].T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_components)
self._covars_[self._covars_ == 0] = 1e-5
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['obs'] = np.zeros((self.n_components, self.n_features))
stats['obs**2'] = np.zeros((self.n_components, self.n_features))
stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in range(self.n_components):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
prior = self.means_prior
weight = self.means_weight
if prior is None:
weight = 0
prior = 0
self._means_ = (weight * prior + stats['obs']) / (weight + denom)
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
means_prior = self.means_prior
means_weight = self.means_weight
if means_prior is None:
means_weight = 0
means_prior = 0
meandiff = self._means_ - means_prior
if self._covariance_type in ('spherical', 'diag'):
cv_num = (means_weight * (meandiff) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / np.maximum(cv_den, 1e-5)
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_components, self.n_features,
self.n_features))
for c in range(self.n_components):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (means_weight * np.outer(meandiff[c],
meandiff[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
return super(GaussianHMM, self).fit(obs)
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_components`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_components, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_components, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
check_is_fitted(self, 'emissionprob_')
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
emissionprob = normalize(self.random_state.rand(self.n_components,
self.n_symbols), 1)
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_components, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = np.asarray(obs).flatten()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def fit(self, obs, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
"""
err_msg = ("Input must be both positive integer array and "
"every element must be continuous, but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return _BaseHMM.fit(self, obs, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_components`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
init_params : string, optional
Controls which parameters are initialized prior to training.
Can contain any combination of 's' for startprob, 't' for transmat, 'm'
for means, 'c' for covars, and 'w' for GMM mixing weights. Defaults to
all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat,
'm' for means, and 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GMMHMM
>>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in range(self.n_components):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
allobs = np.concatenate(obs, 0)
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(allobs)
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, lgmm_posteriors = g.score_samples(obs)
lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
| bsd-3-clause |
iandriver/RNA-sequence-tools | RNA_Seq_analysis/make_monocle_data_js.py | 2 | 3820 | import os
import cPickle as pickle
import pandas as pd
import matplotlib
matplotlib.use('QT4Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from matplotlib.ticker import LinearLocator
import seaborn as sns
import numpy as np
from operator import itemgetter
#the file path where gene list will be and where new list will output
path_to_file = '/Volumes/Seq_data/cuffnorm_js_SC_1_2_3_5'
#name of file containing gene
gene_file_source = 'go_search_genes_lung_all.txt'
base_name = 'js_SC_1_2_3_5'
#load file gene
by_cell = pd.DataFrame.from_csv(os.path.join(path_to_file, base_name+'_outlier_filtered.txt'), sep='\t')
by_gene = by_cell.transpose()
#create list of genes
gene_list = by_cell.index.tolist()
#create cell list
cell_list = [x for x in list(by_cell.columns.values)]
df_by_gene1 = pd.DataFrame(by_gene, columns=gene_list, index=cell_list)
df_by_cell1 = pd.DataFrame(by_cell, columns=cell_list, index=gene_list)
def make_new_matrix(org_matrix_by_cell, gene_list_file):
split_on='_'
gene_df = pd.read_csv(os.path.join(path_to_file, gene_list_file), delimiter= '\t')
gene_list = gene_df['GeneID'].tolist()
group_list = gene_df['GroupID'].tolist()
gmatrix_df = org_matrix_by_cell[gene_list]
cmatrix_df = gmatrix_df.transpose()
score_df = pd.DataFrame(zip(gene_list, group_list), columns=['GeneID', 'GroupID'])
sample_data = pd.read_csv(os.path.join(path_to_file, 'samples.table'), delimiter= '\t', index_col=0)
by_sample = sample_data.transpose()
map_data = pd.read_csv(os.path.join(path_to_file, 'results_'+base_name+'_align.txt'), delimiter= '\t', index_col=0)
by_cell_map = map_data.transpose()
loading_data = pd.read_csv(os.path.join(path_to_file, 'SC.1.2.3.5_Sample_Groupings.txt'), delimiter= '\t', index_col=0)
l_data = loading_data.transpose()
print l_data
cell_list = gmatrix_df.index.tolist()
cell_data = []
cell_label_dict ={'BU3':('BU3'), 'ips17':('ips17')}
new_cell_list = []
old_cell_list = []
for cell in cell_list:
match = False
try:
timepoint = l_data[cell]['Timepoint']
cell_type = l_data[cell]['Type']
tracking_id = '_'.join([timepoint, cell, cell_type])
match = True
except KeyError:
print cell
pass
if match:
old_cell_list.append(cell)
new_cell_list.append('_'.join([timepoint, cell, cell_type]))
pdgfra_level = cmatrix_df[cell]['Pdgfra']
if int(pdgfra_level) >= 60:
Pdgfra='high'
elif int(pdgfra_level) < 60 and int(pdgfra_level) >= 5 :
Pdgfra='med'
elif int(pdgfra_level) < 5:
Pdgfra='low'
if match:
single_cell = 'yes'
else:
single_cell = 'no'
print by_cell_map[cell]
total_mass = by_sample[cell+'_0'][1]
input_mass = by_cell_map[cell][0]
per_mapped = by_cell_map[cell][4]
c_data_tup = (tracking_id,total_mass,input_mass,per_mapped,cell_type,timepoint,Pdgfra,single_cell)
print c_data_tup
cell_data.append(c_data_tup)
score_df.to_csv(os.path.join(path_to_file, 'gene_feature_data.txt'), sep = '\t', index=False)
new_cmatrix_df = cmatrix_df[old_cell_list]
new_cmatrix_df.columns = new_cell_list
new_cmatrix_df.to_csv(os.path.join(path_to_file, 'goterms_monocle_count_matrix.txt'), sep = '\t', index_col=0)
cell_data_df = pd.DataFrame(cell_data, columns=['tracking_id','total_mass','input_mass','per_mapped','cell_type','timepoint','Pdgfra','single_cell'])
cell_data_df.to_csv(os.path.join(path_to_file, 'cell_feature_data.txt'), sep = '\t', index=False)
make_new_matrix(df_by_gene1, gene_file_source)
| mit |
nburn42/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 14 | 62939 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import copy
import os
import tempfile
import numpy as np
import six
from google.protobuf import message
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.meta_graph_transform import meta_graph_transform
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary as core_summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existence of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if tensor_util.is_tensor(x) or y is not None and tensor_util.is_tensor(y):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
@deprecated(None, 'Please specify feature columns explicitly.')
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
@deprecated(None, 'Please specify feature columns explicitly.')
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2', 'VarHandleOp'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError('Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics,
predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError('Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics,
labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
results = []
for k, v in sorted(dictionary.items()):
if isinstance(v, float) or isinstance(v, np.float32) or isinstance(
v, int) or isinstance(v, np.int64) or isinstance(v, np.int32):
results.append('%s = %s' % (k, v))
else:
results.append('Type of %s = %s' % (k, type(v)))
return ', '.join(results)
def _write_dict_to_summary(output_dir, dictionary, current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = core_summary.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))
elif isinstance(dictionary[key], six.string_types):
try:
summ = summary_pb2.Summary.FromString(dictionary[key])
for i, _ in enumerate(summ.value):
summ.value[i].tag = key
summary_proto.value.extend(summ.value)
except message.DecodeError:
logging.warn('Skipping summary for %s, cannot parse string to Summary.',
key)
continue
elif isinstance(dictionary[key], np.ndarray):
value = summary_proto.value.add()
value.tag = key
value.node_name = key
tensor_proto = tensor_util.make_tensor_proto(dictionary[key])
value.tensor.CopyFrom(tensor_proto)
logging.info(
'Summary for np.ndarray is not visible in Tensorboard by default. '
'Consider using a Tensorboard plugin for visualization (see '
'https://github.com/tensorflow/tensorboard-plugin-example/blob/master/README.md'
' for more information).')
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int or np.ndarray or a serialized string of Summary.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
GraphRewriteSpec = collections.namedtuple('GraphRewriteSpec',
['tags', 'transforms'])
class BaseEstimator(sklearn.BaseEstimator, evaluable.Evaluable,
trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Users should not instantiate or subclass this class. Instead, use an
`Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overridden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
@deprecated(None, 'Please replace uses of any Estimator from tf.contrib.learn'
' with an Estimator from tf.estimator.*')
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
'model_dir are set both in constructor and RunConfig, but with '
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
# pylint: enable=g-doc-exception
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@property
def model_fn(self):
"""Returns the model_fn which is bound to self.params.
Returns:
The model_fn with the following signature:
`def model_fn(features, labels, mode, metrics)`
"""
def public_model_fn(features, labels, mode, config):
return self._call_model_fn(features, labels, mode, config=config)
return public_model_fn
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def fit(self,
x=None,
y=None,
input_fn=None,
steps=None,
batch_size=None,
monitors=None,
max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def partial_fit(self,
x=None,
y=None,
input_fn=None,
steps=1,
batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(
x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=monitors)
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics, name)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('batch_size', None), ('as_iterable', True))
def predict(self,
x=None,
input_fn=None,
batch_size=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
iterate_batches: If True, yield the whole batch at once instead of
decomposing the batch into individual samples. Only relevant when
as_iterable is True.
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable,
iterate_batches=iterate_batches)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(
self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.', str(labels),
str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval'
if not name else 'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps == 0:
logging.warning('evaluation steps are 0. If `input_fn` does not raise '
'`OutOfRangeError`, the evaluation will never stop. '
'Use steps=None if intended.')
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
training_util.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions)
if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
training_util._get_or_create_global_step_read() # pylint: disable=protected-access
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend(hooks)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
self._config.keep_checkpoint_every_n_hours),
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' % (model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode, metrics=None, config=None):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
metrics: Dict of metrics.
config: RunConfig.
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
if config:
kwargs['config'] = config
else:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
model_fn_ops = model_fn_results
else:
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
model_fn_ops = model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(
_make_metrics_ops(metrics, features, labels,
model_fn_ops.predictions))
return model_fn_ops
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.EVAL, metrics)
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(self,
export_dir_base,
serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None,
graph_rewrite_specs=(GraphRewriteSpec(
(tag_constants.SERVING,), ()),),
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
graph_rewrite_specs: an iterable of `GraphRewriteSpec`. Each element will
produce a separate MetaGraphDef within the exported SavedModel, tagged
and rewritten as specified. Defaults to a single entry using the
default serving tag ("serve") and no rewriting.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
# pylint: enable=line-too-long
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# We'll write the SavedModel to a temporary directory and then atomically
# rename it at the end. This helps to avoid corrupt / incomplete outputs,
# which could otherwise occur if the job is preempted or otherwise fails
# in the middle of SavedModel creation.
temp_export_dir = saved_model_export_utils.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
# Build the base graph
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
init_op = control_flow_ops.group(variables.local_variables_initializer(),
resources.initialize_resources(
resources.shared_resources()),
lookup_ops.tables_initializer())
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
# Export the first MetaGraphDef with variables, assets etc.
with tf_session.Session('') as session:
# pylint: disable=protected-access
saveables = variables._all_saveable_objects()
# pylint: enable=protected-access
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
elif saveables:
saver_for_restore = saver.Saver(saveables, sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# Perform the export
if not graph_rewrite_specs or graph_rewrite_specs[0].transforms:
raise ValueError('The first element of graph_rewrite_specs '
'must specify no transforms.')
untransformed_tags = graph_rewrite_specs[0].tags
# TODO(soergel): switch to main_op or otherwise update when dust settles
builder.add_meta_graph_and_variables(
session,
untransformed_tags,
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op,
strip_default_attrs=strip_default_attrs)
# pylint: disable=protected-access
base_meta_graph_def = builder._saved_model.meta_graphs[0]
# pylint: enable=protected-access
if graph_rewrite_specs[1:]:
# Prepare the input_names and output_names needed for the
# meta_graph_transform call below.
input_names = [
tensor.name
for input_dict in input_alternatives.values()
for tensor in input_dict.values()
]
output_names = [
tensor.name
for output_alternative in output_alternatives.values()
for tensor in output_alternative[1].values()
]
# Write the additional MetaGraphDefs
for graph_rewrite_spec in graph_rewrite_specs[1:]:
# TODO(soergel) consider moving most of this to saved_model.builder_impl
# as e.g. builder.add_rewritten_meta_graph(rewritten_graph_def, tags)
transformed_meta_graph_def = meta_graph_transform.meta_graph_transform(
base_meta_graph_def, input_names, output_names,
graph_rewrite_spec.transforms, graph_rewrite_spec.tags)
# pylint: disable=protected-access
meta_graph_def = builder._saved_model.meta_graphs.add()
# pylint: enable=protected-access
meta_graph_def.CopyFrom(transformed_meta_graph_def)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(
compat.as_bytes(temp_export_dir), compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(
compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
builder.save(as_text)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please switch to the Estimator interface.')
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(
x,
y,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(
input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None, name=None):
input_fn, feed_fn = _get_input_fn(
x,
y,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=False,
epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x,
None,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=False,
epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate([output[key] for output in results], axis=0)
for key in results[0]
}
| apache-2.0 |
MatthewRueben/multiple-explorers | classes/environment.py | 1 | 13566 | #!/usr/bin/env python
from geography import Bounds2D, Location, POI
from rovers import Rover
from roverSettingsStruct import RoverSettings
import random
import itertools
from matplotlib import pyplot
import copy
class World():
def __init__(self, world_bounds, N_poi, poi_bounds, rover_settings, rover_start, rovHeadings):
""" Inputs "world_bounds" and "poi_bounds" are of class "2DBounds". """
# Rover settings attributes:
# .rewardType
# .moveRandomly
# .numAgents
# .sensorRange
# .sensorFov
# .sensorNoiseInt
N_rovers = rover_settings.numAgents
self.world_bounds = world_bounds
self.poi_bounds = poi_bounds
self.rover_start = rover_start
# Init POIs
self.POIs = []
total_val = N_poi * 10
halfVal = total_val / 2.0
leftover_val = halfVal
# hack to make one large valued, and the others no more than half its value
# poiValueList = [float(random.randint(0, halfVal)) for x in range(N_poi-1)]
# for poiValue in poiValueList:
# self.POIs.append(POI(poiValue, d_min=5.0))
# self.POIs.append(POI(total_val, d_min=5.0))
# print poiValueList
bigPOI = POI(halfVal, d_min=5.0)
self.POIs.append(bigPOI)
for poi_index in range(N_poi-1):
# V_choice = random.uniform(V_bounds[0], V_bounds[1])
#poi_value = random.randint(0, leftover_val)
poi_value = 450.0 # <---- HACK!
leftover_val -= poi_value
poi = POI(poi_value, d_min=5.0) # assign POI value & minimum observation distance
# poi = POI(45.0, d_min=5.0)
self.POIs.append(poi)
# Init rovers
self.rovers = []
for rover_index, heading in itertools.izip(range(N_rovers), rovHeadings):
rover = Rover(name='Fred',
x=self.rover_start.x,
y=self.rover_start.y,
heading=heading,
num_sensors=rover_settings.sensorFov,
observation_range=10,
sensor_range=rover_settings.sensorRange,
sensor_noise=rover_settings.sensorNoiseInt,
num_POI=100)
self.rovers.append(rover)
def resetWithClusters(self, headings):
''' Resets with the clusters either against the left wall, right wall, top wall, or bottom wall. '''
clusterLocations = self.buildClusterLocations(self.poi_bounds, len(self.POIs))
for poi, clusterLoc in itertools.izip(self.POIs, clusterLocations):
# poi.place_randomly(self.poi_bounds) # assign POI location
poi.place_location(clusterLoc)
for rover, heading in itertools.izip(self.rovers, headings):
# reset agents to be center of world
rover.reset(self.rover_start.x,
self.rover_start.y,
heading)
def buildClusterLocations(self, bounds, numPois):
quad = random.random()
clusterList = [float(random.randint(0, 60)) for x in range(numPois)]
clusterLocations = []
# if quad < .25:
# # up wall
# y = 100.0
# for cluster in clusterList:
# clusterLocations.append(copy.deepcopy(Location(cluster, y)))
# elif quad < .5:
# # bottom wall
# y = 0
# for cluster in clusterList:
# clusterLocations.append(copy.deepcopy(Location(cluster, y)))
# elif quad < .75:
# # left wall
# x = 0
# for cluster in clusterList:
# clusterLocations.append(copy.deepcopy(Location(x, cluster)))
# else:
# right wall
x = 50
for cluster in clusterList:
clusterLocations.append(copy.deepcopy(Location(x, cluster)))
return clusterLocations
def reset(self, headings):
# for poi in self.POIs:
# poi.place_randomly(self.poi_bounds)
for rover, heading in itertools.izip(self.rovers, headings):
# reset agents to be center of world
rover.reset(self.rover_start.x,
self.rover_start.y,
heading)
def initPOILocs(self):
for poi in self.POIs:
poi.place_randomly(self.poi_bounds)
def get_rewards(self):
rewards = {'POI': [],
'GLOBAL': 0,
'LOCAL': [0]*len(self.rovers),
'DIFFERENCE': [0]*len(self.rovers),
'DIFFERENCE_PO': [0]*len(self.rovers)}
rover_closest_list = []
# Calculate GLOBAL reward
for poi_index in range(len(self.POIs)): # for each POI, figure out which rover is closest and get the appropriate reward
delta_min, rover_closest = self.find_closest(poi_index)
# print 'Closest rover: ', rover_closest
rover_closest_list.append(rover_closest)
poi_reward = self.POIs[poi_index].V / delta_min # the entire reward for this POI
# print ' Poi reward: ', poi_reward
# print
rewards['POI'].append(poi_reward) # keep track of the reward for each POI
rewards['GLOBAL'] += poi_reward
# Calculate LOCAL reward
for rover_index, rover in enumerate(self.rovers):
rewards['LOCAL'][rover_index] = 0
for poi_index, poi in enumerate(self.POIs): # for each POI...
delta_min = 100.0 # start arbitrarily high
for step_index, location in enumerate(rover.location_history): # check each of the rover's steps
delta = (location - poi) # observation distance
if delta < delta_min: # the closest distance counts, even if it's closer than the minimum observation distance
delta_min = delta
delta_min = max(delta_min ** 2, poi.d_min ** 2) # delta is actually the SQUARED Euclidean distance
poi_reward = poi.V / delta_min # the entire reward for this POI (for this rover only)
rewards['LOCAL'][rover_index] += poi_reward
# Calculate DIFFERENCE reward (with counterfactual c = 0)
for my_rover_index, rover in enumerate(self.rovers):
G_without = rewards['GLOBAL'] # Set G(Z_-i) = G(Z)
closest_to = [poi_index for poi_index, (rover_index, step_index) in enumerate(rover_closest_list) if rover_index == my_rover_index] # find which POIs this rover was closest to
for poi_index in closest_to: # for each of those POIs...
G_without -= rewards['POI'][poi_index] # Subtract its old reward
delta_min_new, rover_closest_new = self.find_closest(poi_index, [my_rover_index]) # Find the next-closest rover to it
#print (rover_closest_list[poi_index], rover_closest_new)
poi_reward_new = self.POIs[poi_index].V / delta_min_new # Calculate its new reward
G_without += poi_reward_new # Add it back in (G_without should be getting smaller)
rewards['DIFFERENCE'][my_rover_index] = rewards['GLOBAL'] - G_without # Calculate D = G(Z) - G(Z_-i)
# print rewards['DIFFERENCE']
# print 'Any DIFFERENCE rewards less than zero?', any([el < 0 for el in rewards['DIFFERENCE']])
# Calculate DIFFERENCE reward with PARTIAL OBSERVABILITY (and c = 0)
"""
# for each rover
# find which rovers this rover can see
# it can see itself! very important
# start with the full-observability POI rewards
# for each POI
# Partial Observability
G_PO -= rewards['POI'][poi_index] # Subtract its old reward
delta_min_new, rover_closest_new = self.find_closest(poi_index, [my_rover_index]) # Find the next-closest rover to it
poi_reward_new = self.POIs[poi_index].V / delta_min_new # Calculate its new reward
G_PO += poi_reward_new # Add it back in (G_without should be getting smaller)
# Without this agent
G_PO_without
delta_min_new, rover_closest_new = self.find_closest(poi_index, [my_rover_index]) # Find the next-closest rover to it
poi_reward_new = self.POIs[poi_index].V / delta_min_new # Calculate its new reward
G_PO_without
rewards['DIFFERENCE_PO'][my_rover_index] = G_PO - G_PO_without # Calculate D_PO
"""
return rewards, rover_closest_list
def find_closest(self, poi_index, not_these_rovers=[]):
""" Finds closest rover to the specified POI.
Returns that rover's index as well as the distance metric. """
poi = self.POIs[poi_index]
delta_min = 100.0 # start arbitrarily high
rover_closest = None
step_closest = None
for rover_index, rover in enumerate(self.rovers):
# Check observation distances for the rover locations we aren't skipping
if rover_index not in not_these_rovers:
for step_index, location in enumerate(self.rovers[rover_index].location_history):
delta = (location - poi) # observation distance
if delta < delta_min: # the closest rover counts, even if it's closer than the minimum observation distance
delta_min = delta
rover_closest = (rover_index, step_index)
delta_min = max(delta_min ** 2, poi.d_min ** 2) # delta is actually the SQUARED Euclidean distance
return delta_min, rover_closest
def test_plot(self, rover_closest_list=[]):
import time
pyplot.ion()
# Plot each rover's trajectory, one by one
for this_rover_index, rover in enumerate(self.rovers):
pyplot.cla() # clear axis
pyplot.title('Rover #' + str(this_rover_index + 1))
# Plot the world, with POIs
for poi in self.POIs:
pyplot.plot(poi.location.x, poi.location.y, 'k*')
pyplot.axis([self.world_bounds.x_lower, self.world_bounds.x_upper,
self.world_bounds.y_lower, self.world_bounds.y_upper])
trajectory_x = [step.x for step in rover.location_history]
trajectory_y = [step.y for step in rover.location_history]
pyplot.plot(trajectory_x, trajectory_y, 'ro-')
# Draw lines to indicate whenever the rover became the closest observer of a POI
if rover_closest_list:
closest_to = [(poi_index, step_index) for poi_index, (rover_index, step_index) in enumerate(rover_closest_list) if rover_index == this_rover_index] # find which POIs this rover was closest to
for (poi_index, step_index) in closest_to: # for each of those POIs...
pyplot.plot([trajectory_x[step_index], self.POIs[poi_index].location.x],
[trajectory_y[step_index], self.POIs[poi_index].location.y])
pyplot.draw()
time.sleep(1.0)
def plot_all(self, rover_closest_list=[]):
pyplot.ion()
# Which step are we at?
step = str(len(self.rovers[0].location_history))
if int(step) < 10:
step = '0' + step
# Get the rewards thus far.
rewards, rover_closest_list = self.get_rewards()
# Plot each rover's trajectory, one by one
pyplot.cla() # clear axis
pyplot.title('Step #' + str(step) + ', System Reward = ' + str(rewards['GLOBAL']))
for this_rover_index, rover in enumerate(self.rovers):
# Plot the world
fig = pyplot.gcf()
pyplot.axis([self.world_bounds.x_lower, self.world_bounds.x_upper,
self.world_bounds.y_lower, self.world_bounds.y_upper])
# Plot rovers
trajectory_x = [point.x for point in rover.location_history]
trajectory_y = [point.y for point in rover.location_history]
pyplot.plot(trajectory_x, trajectory_y, 'r.-')
pyplot.plot(trajectory_x[-1], trajectory_y[-1], 'ro')
for poi_index, poi in enumerate(self.POIs):
#pyplot.plot(poi.location.x, poi.location.y, 'k*')
# Check if a rover has been within the minimum observation distance of this POI
delta_min, rover_closest = self.find_closest(poi_index)
if delta_min < 1.05 * (poi.d_min ** 2): # if within 5% of min. obs. distance (since an == relation might fail due to float math)
color_choice = 'g'
else:
color_choice = '0.5' # lightish gray
# Draw inside circle of POI -- bigger is better
radius = poi.V / 450.0 * 4
circle1 = pyplot.Circle((poi.location.x, poi.location.y), radius, color=color_choice, fill=True)
fig.gca().add_artist(circle1)
# Draw outside circle of POI at minimum observation distance
circle2 = pyplot.Circle((poi.location.x, poi.location.y), 5, color=color_choice, fill=False)
fig.gca().add_artist(circle2)
pyplot.draw()
fig.savefig('Learned01Step' + str(step) + '.png')
| mit |
slundberg/shap | shap/explainers/_tree.py | 1 | 92486 | import numpy as np
import scipy.special
import multiprocessing
import sys
import json
import os
import struct
import itertools
from distutils.version import LooseVersion
from ._explainer import Explainer
from ..utils import assert_import, record_import_error, safe_isinstance
from ..utils._legacy import DenseData
from .._explanation import Explanation
from .. import maskers
import warnings
import pandas as pd
warnings.formatwarning = lambda msg, *args, **kwargs: str(msg) + '\n' # ignore everything except the message
# pylint: disable=unsubscriptable-object
try:
from .. import _cext
except ImportError as e:
record_import_error("cext", "C extension was not built during install!", e)
try:
import pyspark
except ImportError as e:
record_import_error("pyspark", "PySpark could not be imported!", e)
output_transform_codes = {
"identity": 0,
"logistic": 1,
"logistic_nlogloss": 2,
"squared_loss": 3
}
feature_perturbation_codes = {
"interventional": 0,
"tree_path_dependent": 1,
"global_path_dependent": 2
}
class Tree(Explainer):
""" Uses Tree SHAP algorithms to explain the output of ensemble tree models.
Tree SHAP is a fast and exact method to estimate SHAP values for tree models and ensembles of trees,
under several different possible assumptions about feature dependence. It depends on fast C++
implementations either inside an externel model package or in the local compiled C extention.
"""
def __init__(self, model, data = None, model_output="raw", feature_perturbation="interventional", feature_names=None, **deprecated_options):
""" Build a new Tree explainer for the passed model.
Parameters
----------
model : model object
The tree based machine learning model that we want to explain. XGBoost, LightGBM, CatBoost, Pyspark
and most tree-based scikit-learn models are supported.
data : numpy.array or pandas.DataFrame
The background dataset to use for integrating out features. This argument is optional when
feature_perturbation="tree_path_dependent", since in that case we can use the number of training
samples that went down each tree path as our background dataset (this is recorded in the model object).
feature_perturbation : "interventional" (default) or "tree_path_dependent" (default when data=None)
Since SHAP values rely on conditional expectations we need to decide how to handle correlated
(or otherwise dependent) input features. The "interventional" approach breaks the dependencies between
features according to the rules dictated by causal inference (Janzing et al. 2019). Note that the
"interventional" option requires a background dataset and its runtime scales linearly with the size
of the background dataset you use. Anywhere from 100 to 1000 random background samples are good
sizes to use. The "tree_path_dependent" approach is to just follow the trees and use the number
of training examples that went down each leaf to represent the background distribution. This approach
does not require a background dataset and so is used by default when no background dataset is provided.
model_output : "raw", "probability", "log_loss", or model method name
What output of the model should be explained. If "raw" then we explain the raw output of the
trees, which varies by model. For regression models "raw" is the standard output, for binary
classification in XGBoost this is the log odds ratio. If model_output is the name of a supported
prediction method on the model object then we explain the output of that model method name.
For example model_output="predict_proba" explains the result of calling model.predict_proba.
If "probability" then we explain the output of the model transformed into probability space
(note that this means the SHAP values now sum to the probability output of the model). If "logloss"
then we explain the log base e of the model loss function, so that the SHAP values sum up to the
log loss of the model for each sample. This is helpful for breaking down model performance by feature.
Currently the probability and logloss options are only supported when feature_dependence="independent".
Examples
--------
See `Tree explainer examples <https://shap.readthedocs.io/en/latest/api_examples/explainers/Tree.html>`_
"""
if feature_names is not None:
self.data_feature_names=feature_names
elif safe_isinstance(data, "pandas.core.frame.DataFrame"):
self.data_feature_names = list(data.columns)
masker = data
super(Tree, self).__init__(model, masker, feature_names=feature_names)
if type(self.masker) is maskers.Independent:
data = self.masker.data
elif masker is not None:
raise Exception("Unsupported masker type: %s!" % str(type(self.masker)))
if getattr(self.masker, "clustering", None) is not None:
raise Exception("TreeExplainer does not support clustered data inputs! Please use shap.Explainer or pass an unclustered masker!")
# check for deprecated options
if model_output == "margin":
warnings.warn("model_output = \"margin\" has been renamed to model_output = \"raw\"")
model_output = "raw"
if model_output == "logloss":
warnings.warn("model_output = \"logloss\" has been renamed to model_output = \"log_loss\"")
model_output = "log_loss"
if "feature_dependence" in deprecated_options:
dep_val = deprecated_options["feature_dependence"]
if dep_val == "independent" and feature_perturbation == "interventional":
warnings.warn("feature_dependence = \"independent\" has been renamed to feature_perturbation" \
" = \"interventional\"! See GitHub issue #882.")
elif feature_perturbation != "interventional":
warnings.warn("feature_dependence = \"independent\" has been renamed to feature_perturbation" \
" = \"interventional\", you can't supply both options! See GitHub issue #882.")
if dep_val == "tree_path_dependent" and feature_perturbation == "interventional":
raise Exception("The feature_dependence option has been renamed to feature_perturbation! " \
"Please update the option name before calling TreeExplainer. See GitHub issue #882.")
if feature_perturbation == "independent":
raise Exception("feature_perturbation = \"independent\" is not a valid option value, please use " \
"feature_perturbation = \"interventional\" instead. See GitHub issue #882.")
if safe_isinstance(data, "pandas.core.frame.DataFrame"):
self.data = data.values
elif isinstance(data, DenseData):
self.data = data.data
else:
self.data = data
if self.data is None:
feature_perturbation = "tree_path_dependent"
#warnings.warn("Setting feature_perturbation = \"tree_path_dependent\" because no background data was given.")
elif feature_perturbation == "interventional" and self.data.shape[0] > 1000:
warnings.warn("Passing "+str(self.data.shape[0]) + " background samples may lead to slow runtimes. Consider "
"using shap.sample(data, 100) to create a smaller background data set.")
self.data_missing = None if self.data is None else pd.isna(self.data)
self.feature_perturbation = feature_perturbation
self.expected_value = None
self.model = TreeEnsemble(model, self.data, self.data_missing, model_output)
self.model_output = model_output
#self.model_output = self.model.model_output # this allows the TreeEnsemble to translate model outputs types by how it loads the model
if feature_perturbation not in feature_perturbation_codes:
raise ValueError("Invalid feature_perturbation option!")
# check for unsupported combinations of feature_perturbation and model_outputs
if feature_perturbation == "tree_path_dependent":
if self.model.model_output != "raw":
raise ValueError("Only model_output=\"raw\" is supported for feature_perturbation=\"tree_path_dependent\"")
elif data is None:
raise ValueError("A background dataset must be provided unless you are using feature_perturbation=\"tree_path_dependent\"!")
if self.model.model_output != "raw":
if self.model.objective is None and self.model.tree_output is None:
raise Exception("Model does not have a known objective or output type! When model_output is " \
"not \"raw\" then we need to know the model's objective or link function.")
# A bug in XGBoost fixed in v0.81 makes XGBClassifier fail to give margin outputs
if safe_isinstance(model, "xgboost.sklearn.XGBClassifier") and self.model.model_output != "raw":
import xgboost
if LooseVersion(xgboost.__version__) < LooseVersion('0.81'):
raise RuntimeError("A bug in XGBoost fixed in v0.81 makes XGBClassifier fail to give margin outputs! Please upgrade to XGBoost >= v0.81!")
# compute the expected value if we have a parsed tree for the cext
if self.model.model_output == "log_loss":
self.expected_value = self.__dynamic_expected_value
elif data is not None:
try:
self.expected_value = self.model.predict(self.data).mean(0)
except ValueError:
raise Exception("Currently TreeExplainer can only handle models with categorical splits when " \
"feature_perturbation=\"tree_path_dependent\" and no background data is passed. Please try again using " \
"shap.TreeExplainer(model, feature_perturbation=\"tree_path_dependent\").")
if hasattr(self.expected_value, '__len__') and len(self.expected_value) == 1:
self.expected_value = self.expected_value[0]
elif hasattr(self.model, "node_sample_weight"):
self.expected_value = self.model.values[:,0].sum(0)
if self.expected_value.size == 1:
self.expected_value = self.expected_value[0]
self.expected_value += self.model.base_offset
if self.model.model_output != "raw":
self.expected_value = None # we don't handle transforms in this case right now...
# if our output format requires binary classification to be represented as two outputs then we do that here
if self.model.model_output == "probability_doubled" and self.expected_value is not None:
self.expected_value = [1-self.expected_value, self.expected_value]
def __dynamic_expected_value(self, y):
""" This computes the expected value conditioned on the given label value.
"""
return self.model.predict(self.data, np.ones(self.data.shape[0]) * y).mean(0)
def __call__(self, X, y=None, interactions=False, check_additivity=True):
if safe_isinstance(X, "pandas.core.frame.DataFrame"):
feature_names = list(X.columns)
X = X.values
else:
feature_names = getattr(self, "data_feature_names", None)
if not interactions:
v = self.shap_values(X, y=y, from_call=True, check_additivity=check_additivity)
output_shape = tuple()
if type(v) is list:
output_shape = (len(v),)
v = np.stack(v, axis=-1) # put outputs at the end
# the explanation object expects an expected value for each row
if hasattr(self.expected_value, "__len__"):
ev_tiled = np.tile(self.expected_value, (v.shape[0],1))
else:
ev_tiled = np.tile(self.expected_value, v.shape[0])
e = Explanation(v, base_values=ev_tiled, data=X, feature_names=feature_names)
else:
v = self.shap_interaction_values(X)
e = Explanation(v, base_values=self.expected_value, data=X, feature_names=feature_names, interaction_order=2)
return e
def _validate_inputs(self, X, y, tree_limit, check_additivity):
# see if we have a default tree_limit in place.
if tree_limit is None:
tree_limit = -1 if self.model.tree_limit is None else self.model.tree_limit
if tree_limit < 0 or tree_limit > self.model.values.shape[0]:
tree_limit = self.model.values.shape[0]
# convert dataframes
if safe_isinstance(X, "pandas.core.series.Series"):
X = X.values
elif safe_isinstance(X, "pandas.core.frame.DataFrame"):
X = X.values
flat_output = False
if len(X.shape) == 1:
flat_output = True
X = X.reshape(1, X.shape[0])
if X.dtype != self.model.input_dtype:
X = X.astype(self.model.input_dtype)
X_missing = np.isnan(X, dtype=np.bool)
assert isinstance(X, np.ndarray), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 2, "Passed input data matrix X must have 1 or 2 dimensions!"
if self.model.model_output == "log_loss":
assert y is not None, "Both samples and labels must be provided when model_output = " \
"\"log_loss\" (i.e. `explainer.shap_values(X, y)`)!"
assert X.shape[0] == len(
y), "The number of labels (%d) does not match the number of samples to explain (" \
"%d)!" % (
len(y), X.shape[0])
if self.feature_perturbation == "tree_path_dependent":
assert self.model.fully_defined_weighting, "The background dataset you provided does " \
"not cover all the leaves in the model, " \
"so TreeExplainer cannot run with the " \
"feature_perturbation=\"tree_path_dependent\" option! " \
"Try providing a larger background " \
"dataset, or using " \
"feature_perturbation=\"interventional\"."
if check_additivity and self.model.model_type == "pyspark":
warnings.warn(
"check_additivity requires us to run predictions which is not supported with "
"spark, "
"ignoring."
" Set check_additivity=False to remove this warning")
check_additivity = False
return X, y, X_missing, flat_output, tree_limit, check_additivity
def shap_values(self, X, y=None, tree_limit=None, approximate=False, check_additivity=True, from_call=False):
""" Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array, pandas.DataFrame or catboost.Pool (for catboost)
A matrix of samples (# samples x # features) on which to explain the model's output.
y : numpy.array
An array of label values for each sample. Used when explaining loss functions.
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
approximate : bool
Run fast, but only roughly approximate the Tree SHAP values. This runs a method
previously proposed by Saabas which only considers a single feature ordering. Take care
since this does not have the consistency guarantees of Shapley values and places too
much weight on lower splits in the tree.
check_additivity : bool
Run a validation check that the sum of the SHAP values equals the output of the model. This
check takes only a small amount of time, and will catch potential unforeseen errors.
Note that this check only runs right now when explaining the margin of the model.
Returns
-------
array or list
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored in the expected_value
attribute of the explainer when it is constant). For models with vector outputs this returns
a list of such matrices, one for each output.
"""
# see if we have a default tree_limit in place.
if tree_limit is None:
tree_limit = -1 if self.model.tree_limit is None else self.model.tree_limit
# shortcut using the C++ version of Tree SHAP in XGBoost, LightGBM, and CatBoost
if self.feature_perturbation == "tree_path_dependent" and self.model.model_type != "internal" and self.data is None:
model_output_vals = None
phi = None
if self.model.model_type == "xgboost":
import xgboost
if not isinstance(X, xgboost.core.DMatrix):
X = xgboost.DMatrix(X)
if tree_limit == -1:
tree_limit = 0
try:
phi = self.model.original_model.predict(
X, ntree_limit=tree_limit, pred_contribs=True,
approx_contribs=approximate, validate_features=False
)
except ValueError as e:
raise ValueError("This reshape error is often caused by passing a bad data matrix to SHAP. " \
"See https://github.com/slundberg/shap/issues/580") from e
if check_additivity and self.model.model_output == "raw":
model_output_vals = self.model.original_model.predict(
X, ntree_limit=tree_limit, output_margin=True,
validate_features=False
)
elif self.model.model_type == "lightgbm":
assert not approximate, "approximate=True is not supported for LightGBM models!"
phi = self.model.original_model.predict(X, num_iteration=tree_limit, pred_contrib=True)
# Note: the data must be joined on the last axis
if self.model.original_model.params['objective'] == 'binary':
if not from_call:
warnings.warn('LightGBM binary classifier with TreeExplainer shap values output has changed to a list of ndarray')
phi = np.concatenate((0-phi, phi), axis=-1)
if phi.shape[1] != X.shape[1] + 1:
try:
phi = phi.reshape(X.shape[0], phi.shape[1]//(X.shape[1]+1), X.shape[1]+1)
except ValueError as e:
raise Exception("This reshape error is often caused by passing a bad data matrix to SHAP. " \
"See https://github.com/slundberg/shap/issues/580") from e
elif self.model.model_type == "catboost": # thanks to the CatBoost team for implementing this...
assert not approximate, "approximate=True is not supported for CatBoost models!"
assert tree_limit == -1, "tree_limit is not yet supported for CatBoost models!"
import catboost
if type(X) != catboost.Pool:
X = catboost.Pool(X, cat_features=self.model.cat_feature_indices)
phi = self.model.original_model.get_feature_importance(data=X, fstr_type='ShapValues')
# note we pull off the last column and keep it as our expected_value
if phi is not None:
if len(phi.shape) == 3:
self.expected_value = [phi[0, i, -1] for i in range(phi.shape[1])]
out = [phi[:, i, :-1] for i in range(phi.shape[1])]
else:
self.expected_value = phi[0, -1]
out = phi[:, :-1]
if check_additivity and model_output_vals is not None:
self.assert_additivity(out, model_output_vals)
return out
X, y, X_missing, flat_output, tree_limit, check_additivity = self._validate_inputs(X, y,
tree_limit,
check_additivity)
transform = self.model.get_transform()
# run the core algorithm using the C extension
assert_import("cext")
phi = np.zeros((X.shape[0], X.shape[1]+1, self.model.num_outputs))
if not approximate:
_cext.dense_tree_shap(
self.model.children_left, self.model.children_right, self.model.children_default,
self.model.features, self.model.thresholds, self.model.values, self.model.node_sample_weight,
self.model.max_depth, X, X_missing, y, self.data, self.data_missing, tree_limit,
self.model.base_offset, phi, feature_perturbation_codes[self.feature_perturbation],
output_transform_codes[transform], False
)
else:
_cext.dense_tree_saabas(
self.model.children_left, self.model.children_right, self.model.children_default,
self.model.features, self.model.thresholds, self.model.values,
self.model.max_depth, tree_limit, self.model.base_offset, output_transform_codes[transform],
X, X_missing, y, phi
)
out = self._get_shap_output(phi, flat_output)
if check_additivity and self.model.model_output == "raw":
self.assert_additivity(out, self.model.predict(X))
return out
# we pull off the last column and keep it as our expected_value
def _get_shap_output(self, phi, flat_output):
if self.model.num_outputs == 1:
if self.expected_value is None and self.model.model_output != "log_loss":
self.expected_value = phi[0, -1, 0]
if flat_output:
out = phi[0, :-1, 0]
else:
out = phi[:, :-1, 0]
else:
if self.expected_value is None and self.model.model_output != "log_loss":
self.expected_value = [phi[0, -1, i] for i in range(phi.shape[2])]
if flat_output:
out = [phi[0, :-1, i] for i in range(self.model.num_outputs)]
else:
out = [phi[:, :-1, i] for i in range(self.model.num_outputs)]
# if our output format requires binary classificaiton to be represented as two outputs then we do that here
if self.model.model_output == "probability_doubled":
out = [-out, out]
return out
def shap_interaction_values(self, X, y=None, tree_limit=None):
""" Estimate the SHAP interaction values for a set of samples.
Parameters
----------
X : numpy.array, pandas.DataFrame or catboost.Pool (for catboost)
A matrix of samples (# samples x # features) on which to explain the model's output.
y : numpy.array
An array of label values for each sample. Used when explaining loss functions (not yet supported).
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
Returns
-------
array or list
For models with a single output this returns a tensor of SHAP values
(# samples x # features x # features). The matrix (# features x # features) for each sample sums
to the difference between the model output for that sample and the expected value of the model output
(which is stored in the expected_value attribute of the explainer). Each row of this matrix sums to the
SHAP value for that feature for that sample. The diagonal entries of the matrix represent the
"main effect" of that feature on the prediction and the symmetric off-diagonal entries represent the
interaction effects between all pairs of features for that sample. For models with vector outputs
this returns a list of tensors, one for each output.
"""
assert self.model.model_output == "raw", "Only model_output = \"raw\" is supported for SHAP interaction values right now!"
#assert self.feature_perturbation == "tree_path_dependent", "Only feature_perturbation = \"tree_path_dependent\" is supported for SHAP interaction values right now!"
transform = "identity"
# see if we have a default tree_limit in place.
if tree_limit is None:
tree_limit = -1 if self.model.tree_limit is None else self.model.tree_limit
# shortcut using the C++ version of Tree SHAP in XGBoost
if self.model.model_type == "xgboost" and self.feature_perturbation == "tree_path_dependent":
import xgboost
if not isinstance(X, xgboost.core.DMatrix):
X = xgboost.DMatrix(X)
if tree_limit == -1:
tree_limit = 0
phi = self.model.original_model.predict(X, ntree_limit=tree_limit, pred_interactions=True, validate_features=False)
# note we pull off the last column and keep it as our expected_value
if len(phi.shape) == 4:
self.expected_value = [phi[0, i, -1, -1] for i in range(phi.shape[1])]
return [phi[:, i, :-1, :-1] for i in range(phi.shape[1])]
else:
self.expected_value = phi[0, -1, -1]
return phi[:, :-1, :-1]
X, y, X_missing, flat_output, tree_limit, _ = self._validate_inputs(X, y, tree_limit, False)
# run the core algorithm using the C extension
assert_import("cext")
phi = np.zeros((X.shape[0], X.shape[1]+1, X.shape[1]+1, self.model.num_outputs))
_cext.dense_tree_shap(
self.model.children_left, self.model.children_right, self.model.children_default,
self.model.features, self.model.thresholds, self.model.values, self.model.node_sample_weight,
self.model.max_depth, X, X_missing, y, self.data, self.data_missing, tree_limit,
self.model.base_offset, phi, feature_perturbation_codes[self.feature_perturbation],
output_transform_codes[transform], True
)
return self._get_shap_interactions_output(phi,flat_output)
# we pull off the last column and keep it as our expected_value
def _get_shap_interactions_output(self, phi, flat_output):
if self.model.num_outputs == 1:
self.expected_value = phi[0, -1, -1, 0]
if flat_output:
out = phi[0, :-1, :-1, 0]
else:
out = phi[:, :-1, :-1, 0]
else:
self.expected_value = [phi[0, -1, -1, i] for i in range(phi.shape[3])]
if flat_output:
out = [phi[0, :-1, :-1, i] for i in range(self.model.num_outputs)]
else:
out = [phi[:, :-1, :-1, i] for i in range(self.model.num_outputs)]
return out
def assert_additivity(self, phi, model_output):
def check_sum(sum_val, model_output):
diff = np.abs(sum_val - model_output)
if np.max(diff / (np.abs(sum_val) + 1e-2)) > 1e-2:
ind = np.argmax(diff)
err_msg = "Additivity check failed in TreeExplainer! Please ensure the data matrix you passed to the " \
"explainer is the same shape that the model was trained on. If your data shape is correct " \
"then please report this on GitHub."
if self.feature_perturbation != "interventional":
err_msg += " Consider retrying with the feature_perturbation='interventional' option."
err_msg += " This check failed because for one of the samples the sum of the SHAP values" \
" was %f, while the model output was %f. If this difference is acceptable" \
" you can set check_additivity=False to disable this check." % (sum_val[ind], model_output[ind])
raise Exception(err_msg)
if type(phi) is list:
for i in range(len(phi)):
check_sum(self.expected_value[i] + phi[i].sum(-1), model_output[:,i])
else:
check_sum(self.expected_value + phi.sum(-1), model_output)
@staticmethod
def supports_model_with_masker(model, masker):
""" Determines if this explainer can handle the given model.
This is an abstract static method meant to be implemented by each subclass.
"""
if not isinstance(masker, (maskers.Independent)) and masker is not None:
return False
try:
TreeEnsemble(model)
except:
return False
return True
class TreeEnsemble:
""" An ensemble of decision trees.
This object provides a common interface to many different types of models.
"""
def __init__(self, model, data=None, data_missing=None, model_output=None):
self.model_type = "internal"
self.trees = None
self.base_offset = 0
self.model_output = model_output
self.objective = None # what we explain when explaining the loss of the model
self.tree_output = None # what are the units of the values in the leaves of the trees
self.internal_dtype = np.float64
self.input_dtype = np.float64 # for sklearn we need to use np.float32 to always get exact matches to their predictions
self.data = data
self.data_missing = data_missing
self.fully_defined_weighting = True # does the background dataset land in every leaf (making it valid for the tree_path_dependent method)
self.tree_limit = None # used for limiting the number of trees we use by default (like from early stopping)
self.num_stacked_models = 1 # If this is greater than 1 it means we have multiple stacked models with the same number of trees in each model (XGBoost multi-output style)
self.cat_feature_indices = None # If this is set it tells us which features are treated categorically
# we use names like keras
objective_name_map = {
"mse": "squared_error",
"variance": "squared_error",
"friedman_mse": "squared_error",
"reg:linear": "squared_error",
"reg:squarederror": "squared_error",
"regression": "squared_error",
"regression_l2": "squared_error",
"mae": "absolute_error",
"gini": "binary_crossentropy",
"entropy": "binary_crossentropy",
"reg:logistic": "binary_crossentropy",
"binary:logistic": "binary_crossentropy",
"binary_logloss": "binary_crossentropy",
"binary": "binary_crossentropy"
}
tree_output_name_map = {
"regression": "raw_value",
"regression_l2": "squared_error",
"reg:linear": "raw_value",
"reg:squarederror": "raw_value",
"reg:logistic": "log_odds",
"binary:logistic": "log_odds",
"binary_logloss": "log_odds",
"binary": "log_odds"
}
if type(model) is dict and "trees" in model:
# This allows a dictionary to be passed that represents the model.
# this dictionary has several numerica paramters and also a list of trees
# where each tree is a dictionary describing that tree
if "internal_dtype" in model:
self.internal_dtype = model["internal_dtype"]
if "input_dtype" in model:
self.input_dtype = model["input_dtype"]
if "objective" in model:
self.objective = model["objective"]
if "tree_output" in model:
self.tree_output = model["tree_output"]
if "base_offset" in model:
self.base_offset = model["base_offset"]
self.trees = [SingleTree(t, data=data, data_missing=data_missing) for t in model["trees"]]
elif type(model) is list and type(model[0]) == SingleTree: # old-style direct-load format
self.trees = model
elif safe_isinstance(model, ["sklearn.ensemble.RandomForestRegressor", "sklearn.ensemble.forest.RandomForestRegressor", "econml.grf._base_grf.BaseGRF"]):
assert hasattr(model, "estimators_"), "Model has no `estimators_`! Have you called `model.fit`?"
self.internal_dtype = model.estimators_[0].tree_.value.dtype.type
self.input_dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [SingleTree(e.tree_, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "raw_value"
elif safe_isinstance(model, ["sklearn.ensemble.IsolationForest", "sklearn.ensemble._iforest.IsolationForest"]):
self.dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [IsoTree(e.tree_, f, scaling=scaling, data=data, data_missing=data_missing) for e, f in zip(model.estimators_, model.estimators_features_)]
self.tree_output = "raw_value"
elif safe_isinstance(model, ["pyod.models.iforest.IForest"]):
self.dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [IsoTree(e.tree_, f, scaling=scaling, data=data, data_missing=data_missing) for e, f in zip(model.detector_.estimators_, model.detector_.estimators_features_)]
self.tree_output = "raw_value"
elif safe_isinstance(model, "skopt.learning.forest.RandomForestRegressor"):
assert hasattr(model, "estimators_"), "Model has no `estimators_`! Have you called `model.fit`?"
self.internal_dtype = model.estimators_[0].tree_.value.dtype.type
self.input_dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [SingleTree(e.tree_, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "raw_value"
elif safe_isinstance(model, ["sklearn.ensemble.ExtraTreesRegressor", "sklearn.ensemble.forest.ExtraTreesRegressor"]):
assert hasattr(model, "estimators_"), "Model has no `estimators_`! Have you called `model.fit`?"
self.internal_dtype = model.estimators_[0].tree_.value.dtype.type
self.input_dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [SingleTree(e.tree_, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "raw_value"
elif safe_isinstance(model, "skopt.learning.forest.ExtraTreesRegressor"):
assert hasattr(model, "estimators_"), "Model has no `estimators_`! Have you called `model.fit`?"
self.internal_dtype = model.estimators_[0].tree_.value.dtype.type
self.input_dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [SingleTree(e.tree_, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "raw_value"
elif safe_isinstance(model, ["sklearn.tree.DecisionTreeRegressor", "sklearn.tree.tree.DecisionTreeRegressor", "econml.grf._base_grftree.GRFTree"]):
self.internal_dtype = model.tree_.value.dtype.type
self.input_dtype = np.float32
self.trees = [SingleTree(model.tree_, data=data, data_missing=data_missing)]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "raw_value"
elif safe_isinstance(model, ["sklearn.tree.DecisionTreeClassifier", "sklearn.tree.tree.DecisionTreeClassifier"]):
self.internal_dtype = model.tree_.value.dtype.type
self.input_dtype = np.float32
self.trees = [SingleTree(model.tree_, normalize=True, data=data, data_missing=data_missing)]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "probability"
elif safe_isinstance(model, ["sklearn.ensemble.RandomForestClassifier", "sklearn.ensemble.forest.RandomForestClassifier"]):
assert hasattr(model, "estimators_"), "Model has no `estimators_`! Have you called `model.fit`?"
self.internal_dtype = model.estimators_[0].tree_.value.dtype.type
self.input_dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [SingleTree(e.tree_, normalize=True, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "probability"
elif safe_isinstance(model, ["sklearn.ensemble.ExtraTreesClassifier", "sklearn.ensemble.forest.ExtraTreesClassifier"]): # TODO: add unit test for this case
assert hasattr(model, "estimators_"), "Model has no `estimators_`! Have you called `model.fit`?"
self.internal_dtype = model.estimators_[0].tree_.value.dtype.type
self.input_dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [SingleTree(e.tree_, normalize=True, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "probability"
elif safe_isinstance(model, ["sklearn.ensemble.GradientBoostingRegressor", "sklearn.ensemble.gradient_boosting.GradientBoostingRegressor"]):
self.input_dtype = np.float32
# currently we only support the mean and quantile estimators
if safe_isinstance(model.init_, ["sklearn.ensemble.MeanEstimator", "sklearn.ensemble.gradient_boosting.MeanEstimator"]):
self.base_offset = model.init_.mean
elif safe_isinstance(model.init_, ["sklearn.ensemble.QuantileEstimator", "sklearn.ensemble.gradient_boosting.QuantileEstimator"]):
self.base_offset = model.init_.quantile
elif safe_isinstance(model.init_, "sklearn.dummy.DummyRegressor"):
self.base_offset = model.init_.constant_[0]
else:
assert False, "Unsupported init model type: " + str(type(model.init_))
self.trees = [SingleTree(e.tree_, scaling=model.learning_rate, data=data, data_missing=data_missing) for e in model.estimators_[:,0]]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "raw_value"
elif safe_isinstance(model, ["sklearn.ensemble.HistGradientBoostingRegressor"]):
import sklearn
if self.model_output == "predict":
self.model_output = "raw"
self.input_dtype = sklearn.ensemble._hist_gradient_boosting.common.X_DTYPE
self.base_offset = model._baseline_prediction
self.trees = []
for p in model._predictors:
nodes = p[0].nodes
# each node has values: ('value', 'count', 'feature_idx', 'threshold', 'missing_go_to_left', 'left', 'right', 'gain', 'depth', 'is_leaf', 'bin_threshold')
tree = {
"children_left": np.array([-1 if n[9] else n[5] for n in nodes]),
"children_right": np.array([-1 if n[9] else n[6] for n in nodes]),
"children_default": np.array([-1 if n[9] else (n[5] if n[4] else n[6]) for n in nodes]),
"features": np.array([-2 if n[9] else n[2] for n in nodes]),
"thresholds": np.array([n[3] for n in nodes], dtype=np.float64),
"values": np.array([[n[0]] for n in nodes], dtype=np.float64),
"node_sample_weight": np.array([n[1] for n in nodes], dtype=np.float64),
}
self.trees.append(SingleTree(tree, data=data, data_missing=data_missing))
self.objective = objective_name_map.get(model.loss, None)
self.tree_output = "raw_value"
elif safe_isinstance(model, ["sklearn.ensemble.HistGradientBoostingClassifier"]):
import sklearn
self.base_offset = model._baseline_prediction
if hasattr(self.base_offset, "__len__") and self.model_output != "raw":
raise Exception("Multi-output HistGradientBoostingClassifier models are not yet supported unless model_output=\"raw\". See GitHub issue #1028")
self.input_dtype = sklearn.ensemble._hist_gradient_boosting.common.X_DTYPE
self.num_stacked_models = len(model._predictors[0])
if self.model_output == "predict_proba":
if self.num_stacked_models == 1:
self.model_output = "probability_doubled" # with predict_proba we need to double the outputs to match
else:
self.model_output = "probability"
self.trees = []
for p in model._predictors:
for i in range(self.num_stacked_models):
nodes = p[i].nodes
# each node has values: ('value', 'count', 'feature_idx', 'threshold', 'missing_go_to_left', 'left', 'right', 'gain', 'depth', 'is_leaf', 'bin_threshold')
tree = {
"children_left": np.array([-1 if n[9] else n[5] for n in nodes]),
"children_right": np.array([-1 if n[9] else n[6] for n in nodes]),
"children_default": np.array([-1 if n[9] else (n[5] if n[4] else n[6]) for n in nodes]),
"features": np.array([-2 if n[9] else n[2] for n in nodes]),
"thresholds": np.array([n[3] for n in nodes], dtype=np.float64),
"values": np.array([[n[0]] for n in nodes], dtype=np.float64),
"node_sample_weight": np.array([n[1] for n in nodes], dtype=np.float64),
}
self.trees.append(SingleTree(tree, data=data, data_missing=data_missing))
self.objective = objective_name_map.get(model.loss, None)
self.tree_output = "log_odds"
elif safe_isinstance(model, ["sklearn.ensemble.GradientBoostingClassifier","sklearn.ensemble._gb.GradientBoostingClassifier", "sklearn.ensemble.gradient_boosting.GradientBoostingClassifier"]):
self.input_dtype = np.float32
# TODO: deal with estimators for each class
if model.estimators_.shape[1] > 1:
assert False, "GradientBoostingClassifier is only supported for binary classification right now!"
# currently we only support the logs odds estimator
if safe_isinstance(model.init_, ["sklearn.ensemble.LogOddsEstimator", "sklearn.ensemble.gradient_boosting.LogOddsEstimator"]):
self.base_offset = model.init_.prior
self.tree_output = "log_odds"
elif safe_isinstance(model.init_, "sklearn.dummy.DummyClassifier"):
self.base_offset = scipy.special.logit(model.init_.class_prior_[1]) # with two classes the trees only model the second class. # pylint: disable=no-member
self.tree_output = "log_odds"
else:
assert False, "Unsupported init model type: " + str(type(model.init_))
self.trees = [SingleTree(e.tree_, scaling=model.learning_rate, data=data, data_missing=data_missing) for e in model.estimators_[:,0]]
self.objective = objective_name_map.get(model.criterion, None)
elif "pyspark.ml" in str(type(model)):
assert_import("pyspark")
self.model_type = "pyspark"
# model._java_obj.getImpurity() can be gini, entropy or variance.
self.objective = objective_name_map.get(model._java_obj.getImpurity(), None)
if "Classification" in str(type(model)):
normalize = True
self.tree_output = "probability"
else:
normalize = False
self.tree_output = "raw_value"
# Spark Random forest, create 1 weighted (avg) tree per sub-model
if safe_isinstance(model, "pyspark.ml.classification.RandomForestClassificationModel") \
or safe_isinstance(model, "pyspark.ml.regression.RandomForestRegressionModel"):
sum_weight = sum(model.treeWeights) # output is average of trees
self.trees = [SingleTree(tree, normalize=normalize, scaling=model.treeWeights[i]/sum_weight) for i, tree in enumerate(model.trees)]
# Spark GBT, create 1 weighted (learning rate) tree per sub-model
elif safe_isinstance(model, "pyspark.ml.classification.GBTClassificationModel") \
or safe_isinstance(model, "pyspark.ml.regression.GBTRegressionModel"):
self.objective = "squared_error" # GBT subtree use the variance
self.tree_output = "raw_value"
self.trees = [SingleTree(tree, normalize=False, scaling=model.treeWeights[i]) for i, tree in enumerate(model.trees)]
# Spark Basic model (single tree)
elif safe_isinstance(model, "pyspark.ml.classification.DecisionTreeClassificationModel") \
or safe_isinstance(model, "pyspark.ml.regression.DecisionTreeRegressionModel"):
self.trees = [SingleTree(model, normalize=normalize, scaling=1)]
else:
assert False, "Unsupported Spark model type: " + str(type(model))
elif safe_isinstance(model, "xgboost.core.Booster"):
import xgboost
self.original_model = model
self.model_type = "xgboost"
xgb_loader = XGBTreeModelLoader(self.original_model)
self.trees = xgb_loader.get_trees(data=data, data_missing=data_missing)
self.base_offset = xgb_loader.base_score
self.objective = objective_name_map.get(xgb_loader.name_obj, None)
self.tree_output = tree_output_name_map.get(xgb_loader.name_obj, None)
if xgb_loader.num_class > 0:
self.num_stacked_models = xgb_loader.num_class
elif safe_isinstance(model, "xgboost.sklearn.XGBClassifier"):
import xgboost
self.input_dtype = np.float32
self.model_type = "xgboost"
self.original_model = model.get_booster()
xgb_loader = XGBTreeModelLoader(self.original_model)
self.trees = xgb_loader.get_trees(data=data, data_missing=data_missing)
self.base_offset = xgb_loader.base_score
self.objective = objective_name_map.get(xgb_loader.name_obj, None)
self.tree_output = tree_output_name_map.get(xgb_loader.name_obj, None)
self.tree_limit = getattr(model, "best_ntree_limit", None)
if xgb_loader.num_class > 0:
self.num_stacked_models = xgb_loader.num_class
if self.model_output == "predict_proba":
if self.num_stacked_models == 1:
self.model_output = "probability_doubled" # with predict_proba we need to double the outputs to match
else:
self.model_output = "probability"
elif safe_isinstance(model, "xgboost.sklearn.XGBRegressor"):
import xgboost
self.original_model = model.get_booster()
self.model_type = "xgboost"
xgb_loader = XGBTreeModelLoader(self.original_model)
self.trees = xgb_loader.get_trees(data=data, data_missing=data_missing)
self.base_offset = xgb_loader.base_score
self.objective = objective_name_map.get(xgb_loader.name_obj, None)
self.tree_output = tree_output_name_map.get(xgb_loader.name_obj, None)
self.tree_limit = getattr(model, "best_ntree_limit", None)
if xgb_loader.num_class > 0:
self.num_stacked_models = xgb_loader.num_class
elif safe_isinstance(model, "xgboost.sklearn.XGBRanker"):
import xgboost
self.original_model = model.get_booster()
self.model_type = "xgboost"
xgb_loader = XGBTreeModelLoader(self.original_model)
self.trees = xgb_loader.get_trees(data=data, data_missing=data_missing)
self.base_offset = xgb_loader.base_score
# Note: for ranker, leaving tree_output and objective as None as they
# are not implemented in native code yet
self.tree_limit = getattr(model, "best_ntree_limit", None)
if xgb_loader.num_class > 0:
self.num_stacked_models = xgb_loader.num_class
elif safe_isinstance(model, "lightgbm.basic.Booster"):
assert_import("lightgbm")
self.model_type = "lightgbm"
self.original_model = model
tree_info = self.original_model.dump_model()["tree_info"]
try:
self.trees = [SingleTree(e, data=data, data_missing=data_missing) for e in tree_info]
except:
self.trees = None # we get here because the cext can't handle categorical splits yet
self.objective = objective_name_map.get(model.params.get("objective", "regression"), None)
self.tree_output = tree_output_name_map.get(model.params.get("objective", "regression"), None)
elif safe_isinstance(model, "gpboost.basic.Booster"):
assert_import("gpboost")
self.model_type = "gpboost"
self.original_model = model
tree_info = self.original_model.dump_model()["tree_info"]
try:
self.trees = [SingleTree(e, data=data, data_missing=data_missing) for e in tree_info]
except:
self.trees = None # we get here because the cext can't handle categorical splits yet
self.objective = objective_name_map.get(model.params.get("objective", "regression"), None)
self.tree_output = tree_output_name_map.get(model.params.get("objective", "regression"), None)
elif safe_isinstance(model, "lightgbm.sklearn.LGBMRegressor"):
assert_import("lightgbm")
self.model_type = "lightgbm"
self.original_model = model.booster_
tree_info = self.original_model.dump_model()["tree_info"]
try:
self.trees = [SingleTree(e, data=data, data_missing=data_missing) for e in tree_info]
except:
self.trees = None # we get here because the cext can't handle categorical splits yet
self.objective = objective_name_map.get(model.objective, None)
self.tree_output = tree_output_name_map.get(model.objective, None)
if model.objective is None:
self.objective = "squared_error"
self.tree_output = "raw_value"
elif safe_isinstance(model, "lightgbm.sklearn.LGBMRanker"):
assert_import("lightgbm")
self.model_type = "lightgbm"
self.original_model = model.booster_
tree_info = self.original_model.dump_model()["tree_info"]
try:
self.trees = [SingleTree(e, data=data, data_missing=data_missing) for e in tree_info]
except:
self.trees = None # we get here because the cext can't handle categorical splits yet
# Note: for ranker, leaving tree_output and objective as None as they
# are not implemented in native code yet
elif safe_isinstance(model, "lightgbm.sklearn.LGBMClassifier"):
assert_import("lightgbm")
self.model_type = "lightgbm"
if model.n_classes_ > 2:
self.num_stacked_models = model.n_classes_
self.original_model = model.booster_
tree_info = self.original_model.dump_model()["tree_info"]
try:
self.trees = [SingleTree(e, data=data, data_missing=data_missing) for e in tree_info]
except:
self.trees = None # we get here because the cext can't handle categorical splits yet
self.objective = objective_name_map.get(model.objective, None)
self.tree_output = tree_output_name_map.get(model.objective, None)
if model.objective is None:
self.objective = "binary_crossentropy"
self.tree_output = "log_odds"
elif safe_isinstance(model, "catboost.core.CatBoostRegressor"):
assert_import("catboost")
self.model_type = "catboost"
self.original_model = model
self.cat_feature_indices = model.get_cat_feature_indices()
elif safe_isinstance(model, "catboost.core.CatBoostClassifier"):
assert_import("catboost")
self.model_type = "catboost"
self.original_model = model
self.input_dtype = np.float32
try:
cb_loader = CatBoostTreeModelLoader(model)
self.trees = cb_loader.get_trees(data=data, data_missing=data_missing)
except:
self.trees = None # we get here because the cext can't handle categorical splits yet
self.tree_output = "log_odds"
self.objective = "binary_crossentropy"
self.cat_feature_indices = model.get_cat_feature_indices()
elif safe_isinstance(model, "catboost.core.CatBoost"):
assert_import("catboost")
self.model_type = "catboost"
self.original_model = model
self.cat_feature_indices = model.get_cat_feature_indices()
elif safe_isinstance(model, "imblearn.ensemble._forest.BalancedRandomForestClassifier"):
self.input_dtype = np.float32
scaling = 1.0 / len(model.estimators_) # output is average of trees
self.trees = [SingleTree(e.tree_, normalize=True, scaling=scaling, data=data, data_missing=data_missing) for e in model.estimators_]
self.objective = objective_name_map.get(model.criterion, None)
self.tree_output = "probability"
elif safe_isinstance(model, "ngboost.ngboost.NGBoost") or safe_isinstance(model, "ngboost.api.NGBRegressor") or safe_isinstance(model, "ngboost.api.NGBClassifier"):
assert model.base_models, "The NGBoost model has empty `base_models`! Have you called `model.fit`?"
if self.model_output == "raw":
param_idx = 0 # default to the first parameter of the output distribution
warnings.warn("Translating model_ouput=\"raw\" to model_output=0 for the 0-th parameter in the distribution. Use model_output=0 directly to avoid this warning.")
elif type(self.model_output) is int:
param_idx = self.model_output
self.model_output = "raw" # note that after loading we have a new model_output type
assert safe_isinstance(model.base_models[0][param_idx], ["sklearn.tree.DecisionTreeRegressor", "sklearn.tree.tree.DecisionTreeRegressor"]), "You must use default_tree_learner!"
shap_trees = [trees[param_idx] for trees in model.base_models]
self.internal_dtype = shap_trees[0].tree_.value.dtype.type
self.input_dtype = np.float32
scaling = - model.learning_rate * np.array(model.scalings) # output is weighted average of trees
self.trees = [SingleTree(e.tree_, scaling=s, data=data, data_missing=data_missing) for e,s in zip(shap_trees,scaling)]
self.objective = objective_name_map.get(shap_trees[0].criterion, None)
self.tree_output = "raw_value"
self.base_offset = model.init_params[param_idx]
else:
raise Exception("Model type not yet supported by TreeExplainer: " + str(type(model)))
# build a dense numpy version of all the tree objects
if self.trees is not None and self.trees:
max_nodes = np.max([len(t.values) for t in self.trees])
assert len(np.unique([t.values.shape[1] for t in self.trees])) == 1, "All trees in the ensemble must have the same output dimension!"
num_trees = len(self.trees)
if self.num_stacked_models > 1:
assert len(self.trees) % self.num_stacked_models == 0, "Only stacked models with equal numbers of trees are supported!"
assert self.trees[0].values.shape[1] == 1, "Only stacked models with single outputs per model are supported!"
self.num_outputs = self.num_stacked_models
else:
self.num_outputs = self.trees[0].values.shape[1]
# important to be -1 in unused sections!! This way we can tell which entries are valid.
self.children_left = -np.ones((num_trees, max_nodes), dtype=np.int32)
self.children_right = -np.ones((num_trees, max_nodes), dtype=np.int32)
self.children_default = -np.ones((num_trees, max_nodes), dtype=np.int32)
self.features = -np.ones((num_trees, max_nodes), dtype=np.int32)
self.thresholds = np.zeros((num_trees, max_nodes), dtype=self.internal_dtype)
self.values = np.zeros((num_trees, max_nodes, self.num_outputs), dtype=self.internal_dtype)
self.node_sample_weight = np.zeros((num_trees, max_nodes), dtype=self.internal_dtype)
for i in range(num_trees):
self.children_left[i,:len(self.trees[i].children_left)] = self.trees[i].children_left
self.children_right[i,:len(self.trees[i].children_right)] = self.trees[i].children_right
self.children_default[i,:len(self.trees[i].children_default)] = self.trees[i].children_default
self.features[i,:len(self.trees[i].features)] = self.trees[i].features
self.thresholds[i,:len(self.trees[i].thresholds)] = self.trees[i].thresholds
if self.num_stacked_models > 1:
# stack_pos = int(i // (num_trees / self.num_stacked_models))
stack_pos = i % self.num_stacked_models
self.values[i,:len(self.trees[i].values[:,0]),stack_pos] = self.trees[i].values[:,0]
else:
self.values[i,:len(self.trees[i].values)] = self.trees[i].values
self.node_sample_weight[i,:len(self.trees[i].node_sample_weight)] = self.trees[i].node_sample_weight
# ensure that the passed background dataset lands in every leaf
if np.min(self.trees[i].node_sample_weight) <= 0:
self.fully_defined_weighting = False
self.num_nodes = np.array([len(t.values) for t in self.trees], dtype=np.int32)
self.max_depth = np.max([t.max_depth for t in self.trees])
# make sure the base offset is a 1D array
if not hasattr(self.base_offset, "__len__") or len(self.base_offset) == 0:
self.base_offset = (np.ones(self.num_outputs) * self.base_offset).astype(self.internal_dtype)
self.base_offset = self.base_offset.flatten()
assert len(self.base_offset) == self.num_outputs
def get_transform(self):
""" A consistent interface to make predictions from this model.
"""
if self.model_output == "raw":
transform = "identity"
elif self.model_output == "probability" or self.model_output == "probability_doubled":
if self.tree_output == "log_odds":
transform = "logistic"
elif self.tree_output == "probability":
transform = "identity"
else:
raise Exception("model_output = \"probability\" is not yet supported when model.tree_output = \"" + self.tree_output + "\"!")
elif self.model_output == "log_loss":
if self.objective == "squared_error":
transform = "squared_loss"
elif self.objective == "binary_crossentropy":
transform = "logistic_nlogloss"
else:
raise Exception("model_output = \"log_loss\" is not yet supported when model.objective = \"" + self.objective + "\"!")
else:
raise Exception("Unrecognized model_output parameter value: %s! If model.%s is a valid function open a github issue to ask that this method be supported. If you want 'predict_proba' just use 'probability' for now." % (str(self.model_output), str(self.model_output)))
return transform
def predict(self, X, y=None, output=None, tree_limit=None):
""" A consistent interface to make predictions from this model.
Parameters
----------
tree_limit : None (default) or int
Limit the number of trees used by the model. By default None means no use the limit of the
original model, and -1 means no limit.
"""
if output is None:
output = self.model_output
if self.model_type == "pyspark":
#import pyspark
# TODO: support predict for pyspark
raise NotImplementedError("Predict with pyspark isn't implemented. Don't run 'interventional' as feature_perturbation.")
# see if we have a default tree_limit in place.
if tree_limit is None:
tree_limit = -1 if self.tree_limit is None else self.tree_limit
# convert dataframes
if safe_isinstance(X, "pandas.core.series.Series"):
X = X.values
elif safe_isinstance(X, "pandas.core.frame.DataFrame"):
X = X.values
flat_output = False
if len(X.shape) == 1:
flat_output = True
X = X.reshape(1, X.shape[0])
if X.dtype.type != self.input_dtype:
X = X.astype(self.input_dtype)
X_missing = np.isnan(X, dtype=np.bool)
assert isinstance(X, np.ndarray), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 2, "Passed input data matrix X must have 1 or 2 dimensions!"
if tree_limit < 0 or tree_limit > self.values.shape[0]:
tree_limit = self.values.shape[0]
if output == "logloss":
assert y is not None, "Both samples and labels must be provided when explaining the loss (i.e. `explainer.shap_values(X, y)`)!"
assert X.shape[0] == len(y), "The number of labels (%d) does not match the number of samples to explain (%d)!" % (len(y), X.shape[0])
transform = self.get_transform()
assert_import("cext")
output = np.zeros((X.shape[0], self.num_outputs))
_cext.dense_tree_predict(
self.children_left, self.children_right, self.children_default,
self.features, self.thresholds, self.values,
self.max_depth, tree_limit, self.base_offset, output_transform_codes[transform],
X, X_missing, y, output
)
# drop dimensions we don't need
if flat_output:
if self.num_outputs == 1:
return output.flatten()[0]
else:
return output.reshape(-1, self.num_outputs)
else:
if self.num_outputs == 1:
return output.flatten()
else:
return output
class SingleTree:
""" A single decision tree.
The primary point of this object is to parse many different tree types into a common format.
"""
def __init__(self, tree, normalize=False, scaling=1.0, data=None, data_missing=None):
assert_import("cext")
if safe_isinstance(tree, ["sklearn.tree._tree.Tree", "econml.tree._tree.Tree"]):
self.children_left = tree.children_left.astype(np.int32)
self.children_right = tree.children_right.astype(np.int32)
self.children_default = self.children_left # missing values not supported in sklearn
self.features = tree.feature.astype(np.int32)
self.thresholds = tree.threshold.astype(np.float64)
self.values = tree.value.reshape(tree.value.shape[0], tree.value.shape[1] * tree.value.shape[2])
if normalize:
self.values = (self.values.T / self.values.sum(1)).T
self.values = self.values * scaling
self.node_sample_weight = tree.weighted_n_node_samples.astype(np.float64)
elif type(tree) is dict and 'features' in tree:
self.children_left = tree["children_left"].astype(np.int32)
self.children_right = tree["children_right"].astype(np.int32)
self.children_default = tree["children_default"].astype(np.int32)
self.features = tree["features"].astype(np.int32)
self.thresholds = tree["thresholds"]
self.values = tree["values"] * scaling
self.node_sample_weight = tree["node_sample_weight"]
# deprecated dictionary support (with sklearn singlular style "feature" and "value" names)
elif type(tree) is dict and 'children_left' in tree:
self.children_left = tree["children_left"].astype(np.int32)
self.children_right = tree["children_right"].astype(np.int32)
self.children_default = tree["children_default"].astype(np.int32)
self.features = tree["feature"].astype(np.int32)
self.thresholds = tree["threshold"]
self.values = tree["value"] * scaling
self.node_sample_weight = tree["node_sample_weight"]
elif safe_isinstance(tree, "pyspark.ml.classification.DecisionTreeClassificationModel") \
or safe_isinstance(tree, "pyspark.ml.regression.DecisionTreeRegressionModel"):
#model._java_obj.numNodes() doesn't give leaves, need to recompute the size
def getNumNodes(node, size):
size = size + 1
if node.subtreeDepth() == 0:
return size
else:
size = getNumNodes(node.leftChild(), size)
return getNumNodes(node.rightChild(), size)
num_nodes = getNumNodes(tree._java_obj.rootNode(), 0)
self.children_left = np.full(num_nodes, -2, dtype=np.int32)
self.children_right = np.full(num_nodes, -2, dtype=np.int32)
self.children_default = np.full(num_nodes, -2, dtype=np.int32)
self.features = np.full(num_nodes, -2, dtype=np.int32)
self.thresholds = np.full(num_nodes, -2, dtype=np.float64)
self.values = [-2]*num_nodes
self.node_sample_weight = np.full(num_nodes, -2, dtype=np.float64)
def buildTree(index, node):
index = index + 1
if tree._java_obj.getImpurity() == 'variance':
self.values[index] = [node.prediction()] #prediction for the node
else:
self.values[index] = [e for e in node.impurityStats().stats()] #for gini: NDarray(numLabel): 1 per label: number of item for each label which went through this node
self.node_sample_weight[index] = node.impurityStats().count() #weighted count of element trough this node
if node.subtreeDepth() == 0:
return index
else:
self.features[index] = node.split().featureIndex() #index of the feature we split on, not available for leaf, int
if str(node.split().getClass()).endswith('tree.CategoricalSplit'):
#Categorical split isn't implemented, TODO: could fake it by creating a fake node to split on the exact value?
raise NotImplementedError('CategoricalSplit are not yet implemented')
self.thresholds[index] = node.split().threshold() #threshold for the feature, not available for leaf, float
self.children_left[index] = index + 1
idx = buildTree(index, node.leftChild())
self.children_right[index] = idx + 1
idx = buildTree(idx, node.rightChild())
return idx
buildTree(-1, tree._java_obj.rootNode())
#default Not supported with mlib? (TODO)
self.children_default = self.children_left
self.values = np.asarray(self.values)
if normalize:
self.values = (self.values.T / self.values.sum(1)).T
self.values = self.values * scaling
elif type(tree) == dict and 'tree_structure' in tree: # LightGBM model dump
start = tree['tree_structure']
num_parents = tree['num_leaves']-1
self.children_left = np.empty((2*num_parents+1), dtype=np.int32)
self.children_right = np.empty((2*num_parents+1), dtype=np.int32)
self.children_default = np.empty((2*num_parents+1), dtype=np.int32)
self.features = np.empty((2*num_parents+1), dtype=np.int32)
self.thresholds = np.empty((2*num_parents+1), dtype=np.float64)
self.values = [-2]*(2*num_parents+1)
self.node_sample_weight = np.empty((2*num_parents+1), dtype=np.float64)
visited, queue = [], [start]
while queue:
vertex = queue.pop(0)
if 'split_index' in vertex.keys():
if vertex['split_index'] not in visited:
if 'split_index' in vertex['left_child'].keys():
self.children_left[vertex['split_index']] = vertex['left_child']['split_index']
else:
self.children_left[vertex['split_index']] = vertex['left_child']['leaf_index']+num_parents
if 'split_index' in vertex['right_child'].keys():
self.children_right[vertex['split_index']] = vertex['right_child']['split_index']
else:
self.children_right[vertex['split_index']] = vertex['right_child']['leaf_index']+num_parents
if vertex['default_left']:
self.children_default[vertex['split_index']] = self.children_left[vertex['split_index']]
else:
self.children_default[vertex['split_index']] = self.children_right[vertex['split_index']]
self.features[vertex['split_index']] = vertex['split_feature']
self.thresholds[vertex['split_index']] = vertex['threshold']
self.values[vertex['split_index']] = [vertex['internal_value']]
self.node_sample_weight[vertex['split_index']] = vertex['internal_count']
visited.append(vertex['split_index'])
queue.append(vertex['left_child'])
queue.append(vertex['right_child'])
else:
self.children_left[vertex['leaf_index']+num_parents] = -1
self.children_right[vertex['leaf_index']+num_parents] = -1
self.children_default[vertex['leaf_index']+num_parents] = -1
self.features[vertex['leaf_index']+num_parents] = -1
self.children_left[vertex['leaf_index']+num_parents] = -1
self.children_right[vertex['leaf_index']+num_parents] = -1
self.children_default[vertex['leaf_index']+num_parents] = -1
self.features[vertex['leaf_index']+num_parents] = -1
self.thresholds[vertex['leaf_index']+num_parents] = -1
self.values[vertex['leaf_index']+num_parents] = [vertex['leaf_value']]
self.node_sample_weight[vertex['leaf_index']+num_parents] = vertex['leaf_count']
self.values = np.asarray(self.values)
self.values = np.multiply(self.values, scaling)
elif type(tree) == dict and 'nodeid' in tree:
""" Directly create tree given the JSON dump (with stats) of a XGBoost model.
"""
def max_id(node):
if "children" in node:
return max(node["nodeid"], *[max_id(n) for n in node["children"]])
else:
return node["nodeid"]
m = max_id(tree) + 1
self.children_left = -np.ones(m, dtype=np.int32)
self.children_right = -np.ones(m, dtype=np.int32)
self.children_default = -np.ones(m, dtype=np.int32)
self.features = -np.ones(m, dtype=np.int32)
self.thresholds = np.zeros(m, dtype=np.float64)
self.values = np.zeros((m, 1), dtype=np.float64)
self.node_sample_weight = np.empty(m, dtype=np.float64)
def extract_data(node, tree):
i = node["nodeid"]
tree.node_sample_weight[i] = node["cover"]
if "children" in node:
tree.children_left[i] = node["yes"]
tree.children_right[i] = node["no"]
tree.children_default[i] = node["missing"]
tree.features[i] = node["split"]
tree.thresholds[i] = node["split_condition"]
for n in node["children"]:
extract_data(n, tree)
elif "leaf" in node:
tree.values[i] = node["leaf"] * scaling
extract_data(tree, self)
elif type(tree) == str:
""" Build a tree from a text dump (with stats) of xgboost.
"""
nodes = [t.lstrip() for t in tree[:-1].split("\n")]
nodes_dict = {}
for n in nodes: nodes_dict[int(n.split(":")[0])] = n.split(":")[1]
m = max(nodes_dict.keys())+1
children_left = -1*np.ones(m,dtype="int32")
children_right = -1*np.ones(m,dtype="int32")
children_default = -1*np.ones(m,dtype="int32")
features = -2*np.ones(m,dtype="int32")
thresholds = -1*np.ones(m,dtype="float64")
values = 1*np.ones(m,dtype="float64")
node_sample_weight = np.zeros(m,dtype="float64")
values_lst = list(nodes_dict.values())
keys_lst = list(nodes_dict.keys())
for i in range(0,len(keys_lst)):
value = values_lst[i]
key = keys_lst[i]
if ("leaf" in value):
# Extract values
val = float(value.split("leaf=")[1].split(",")[0])
node_sample_weight_val = float(value.split("cover=")[1])
# Append to lists
values[key] = val
node_sample_weight[key] = node_sample_weight_val
else:
c_left = int(value.split("yes=")[1].split(",")[0])
c_right = int(value.split("no=")[1].split(",")[0])
c_default = int(value.split("missing=")[1].split(",")[0])
feat_thres = value.split(" ")[0]
if ("<" in feat_thres):
feature = int(feat_thres.split("<")[0][2:])
threshold = float(feat_thres.split("<")[1][:-1])
if ("=" in feat_thres):
feature = int(feat_thres.split("=")[0][2:])
threshold = float(feat_thres.split("=")[1][:-1])
node_sample_weight_val = float(value.split("cover=")[1].split(",")[0])
children_left[key] = c_left
children_right[key] = c_right
children_default[key] = c_default
features[key] = feature
thresholds[key] = threshold
node_sample_weight[key] = node_sample_weight_val
self.children_left = children_left
self.children_right = children_right
self.children_default = children_default
self.features = features
self.thresholds = thresholds
self.values = values[:,np.newaxis] * scaling
self.node_sample_weight = node_sample_weight
else:
raise Exception("Unknown input to SingleTree constructor: " + str(tree))
# Re-compute the number of samples that pass through each node if we are given data
if data is not None and data_missing is not None:
self.node_sample_weight[:] = 0.0
_cext.dense_tree_update_weights(
self.children_left, self.children_right, self.children_default, self.features,
self.thresholds, self.values, 1, self.node_sample_weight, data, data_missing
)
# we compute the expectations to make sure they follow the SHAP logic
self.max_depth = _cext.compute_expectations(
self.children_left, self.children_right, self.node_sample_weight,
self.values
)
class IsoTree(SingleTree):
"""
In sklearn the tree of the Isolation Forest does not calculated in a good way.
"""
def __init__(self, tree, tree_features, normalize=False, scaling=1.0, data=None, data_missing=None):
super(IsoTree, self).__init__(tree, normalize, scaling, data, data_missing)
if safe_isinstance(tree, "sklearn.tree._tree.Tree"):
from sklearn.ensemble._iforest import _average_path_length # pylint: disable=no-name-in-module
def _recalculate_value(tree, i , level):
if tree.children_left[i] == -1 and tree.children_right[i] == -1:
value = level + _average_path_length(np.array([tree.n_node_samples[i]]))[0]
self.values[i, 0] = value
return value * tree.n_node_samples[i]
else:
value_left = _recalculate_value(tree, tree.children_left[i] , level + 1)
value_right = _recalculate_value(tree, tree.children_right[i] , level + 1)
self.values[i, 0] = (value_left + value_right) / tree.n_node_samples[i]
return value_left + value_right
_recalculate_value(tree, 0, 0)
if normalize:
self.values = (self.values.T / self.values.sum(1)).T
self.values = self.values * scaling
# re-number the features if each tree gets a different set of features
self.features = np.where(self.features >= 0, tree_features[self.features], self.features)
def get_xgboost_json(model):
""" This gets a JSON dump of an XGBoost model while ensuring the features names are their indexes.
"""
fnames = model.feature_names
model.feature_names = None
json_trees = model.get_dump(with_stats=True, dump_format="json")
model.feature_names = fnames
# this fixes a bug where XGBoost can return invalid JSON
json_trees = [t.replace(": inf,", ": 1000000000000.0,") for t in json_trees]
json_trees = [t.replace(": -inf,", ": -1000000000000.0,") for t in json_trees]
return json_trees
class XGBTreeModelLoader(object):
""" This loads an XGBoost model directly from a raw memory dump.
We can't use the JSON dump because due to numerical precision issues those
tree can actually be wrong when feature values land almost on a threshold.
"""
def __init__(self, xgb_model):
# new in XGBoost 1.1, 'binf' is appended to the buffer
self.buf = xgb_model.save_raw().lstrip(b'binf')
self.pos = 0
# load the model parameters
self.base_score = self.read('f')
self.num_feature = self.read('I')
self.num_class = self.read('i')
self.contain_extra_attrs = self.read('i')
self.contain_eval_metrics = self.read('i')
self.read_arr('i', 29) # reserved
self.name_obj_len = self.read('Q')
self.name_obj = self.read_str(self.name_obj_len)
self.name_gbm_len = self.read('Q')
self.name_gbm = self.read_str(self.name_gbm_len)
# new in XGBoost 1.0 is that the base_score is saved untransformed (https://github.com/dmlc/xgboost/pull/5101)
# so we have to transform it depending on the objective
import xgboost
if LooseVersion(xgboost.__version__).version[0] >= 1:
if self.name_obj in ["binary:logistic", "reg:logistic"]:
self.base_score = scipy.special.logit(self.base_score) # pylint: disable=no-member
assert self.name_gbm == "gbtree", "Only the 'gbtree' model type is supported, not '%s'!" % self.name_gbm
# load the gbtree specific parameters
self.num_trees = self.read('i')
self.num_roots = self.read('i')
self.num_feature = self.read('i')
self.pad_32bit = self.read('i')
self.num_pbuffer_deprecated = self.read('Q')
self.num_output_group = self.read('i')
self.size_leaf_vector = self.read('i')
self.read_arr('i', 32) # reserved
# load each tree
self.num_roots = np.zeros(self.num_trees, dtype=np.int32)
self.num_nodes = np.zeros(self.num_trees, dtype=np.int32)
self.num_deleted = np.zeros(self.num_trees, dtype=np.int32)
self.max_depth = np.zeros(self.num_trees, dtype=np.int32)
self.num_feature = np.zeros(self.num_trees, dtype=np.int32)
self.size_leaf_vector = np.zeros(self.num_trees, dtype=np.int32)
self.node_parents = []
self.node_cleft = []
self.node_cright = []
self.node_sindex = []
self.node_info = []
self.loss_chg = []
self.sum_hess = []
self.base_weight = []
self.leaf_child_cnt = []
for i in range(self.num_trees):
# load the per-tree params
self.num_roots[i] = self.read('i')
self.num_nodes[i] = self.read('i')
self.num_deleted[i] = self.read('i')
self.max_depth[i] = self.read('i')
self.num_feature[i] = self.read('i')
self.size_leaf_vector[i] = self.read('i')
# load the nodes
self.read_arr('i', 31) # reserved
self.node_parents.append(np.zeros(self.num_nodes[i], dtype=np.int32))
self.node_cleft.append(np.zeros(self.num_nodes[i], dtype=np.int32))
self.node_cright.append(np.zeros(self.num_nodes[i], dtype=np.int32))
self.node_sindex.append(np.zeros(self.num_nodes[i], dtype=np.uint32))
self.node_info.append(np.zeros(self.num_nodes[i], dtype=np.float32))
for j in range(self.num_nodes[i]):
self.node_parents[-1][j] = self.read('i')
self.node_cleft[-1][j] = self.read('i')
self.node_cright[-1][j] = self.read('i')
self.node_sindex[-1][j] = self.read('I')
self.node_info[-1][j] = self.read('f')
# load the stat nodes
self.loss_chg.append(np.zeros(self.num_nodes[i], dtype=np.float32))
self.sum_hess.append(np.zeros(self.num_nodes[i], dtype=np.float32))
self.base_weight.append(np.zeros(self.num_nodes[i], dtype=np.float32))
self.leaf_child_cnt.append(np.zeros(self.num_nodes[i], dtype=np.int))
for j in range(self.num_nodes[i]):
self.loss_chg[-1][j] = self.read('f')
self.sum_hess[-1][j] = self.read('f')
self.base_weight[-1][j] = self.read('f')
self.leaf_child_cnt[-1][j] = self.read('i')
def get_trees(self, data=None, data_missing=None):
shape = (self.num_trees, self.num_nodes.max())
self.children_default = np.zeros(shape, dtype=np.int)
self.features = np.zeros(shape, dtype=np.int)
self.thresholds = np.zeros(shape, dtype=np.float32)
self.values = np.zeros((shape[0], shape[1], 1), dtype=np.float32)
trees = []
for i in range(self.num_trees):
for j in range(self.num_nodes[i]):
if np.right_shift(self.node_sindex[i][j], np.uint32(31)) != 0:
self.children_default[i,j] = self.node_cleft[i][j]
else:
self.children_default[i,j] = self.node_cright[i][j]
self.features[i,j] = self.node_sindex[i][j] & ((np.uint32(1) << np.uint32(31)) - np.uint32(1))
if self.node_cleft[i][j] >= 0:
# Xgboost uses < for thresholds where shap uses <=
# Move the threshold down by the smallest possible increment
self.thresholds[i, j] = np.nextafter(self.node_info[i][j], - np.float32(np.inf))
else:
self.values[i,j] = self.node_info[i][j]
l = len(self.node_cleft[i])
trees.append(SingleTree({
"children_left": self.node_cleft[i],
"children_right": self.node_cright[i],
"children_default": self.children_default[i,:l],
"feature": self.features[i,:l],
"threshold": self.thresholds[i,:l],
"value": self.values[i,:l],
"node_sample_weight": self.sum_hess[i]
}, data=data, data_missing=data_missing))
return trees
def read(self, dtype):
size = struct.calcsize(dtype)
val = struct.unpack(dtype, self.buf[self.pos:self.pos+size])[0]
self.pos += size
return val
def read_arr(self, dtype, n_items):
format = "%d%s" % (n_items, dtype)
size = struct.calcsize(format)
val = struct.unpack(format, self.buf[self.pos:self.pos+size])[0]
self.pos += size
return val
def read_str(self, size):
val = self.buf[self.pos:self.pos+size].decode('utf-8')
self.pos += size
return val
def print_info(self):
print("--- global parmeters ---")
print("base_score =", self.base_score)
print("num_feature =", self.num_feature)
print("num_class =", self.num_class)
print("contain_extra_attrs =", self.contain_extra_attrs)
print("contain_eval_metrics =", self.contain_eval_metrics)
print("name_obj_len =", self.name_obj_len)
print("name_obj =", self.name_obj)
print("name_gbm_len =", self.name_gbm_len)
print("name_gbm =", self.name_gbm)
print()
print("--- gbtree specific parameters ---")
print("num_trees =", self.num_trees)
print("num_roots =", self.num_roots)
print("num_feature =", self.num_feature)
print("pad_32bit =", self.pad_32bit)
print("num_pbuffer_deprecated =", self.num_pbuffer_deprecated)
print("num_output_group =", self.num_output_group)
print("size_leaf_vector =", self.size_leaf_vector)
class CatBoostTreeModelLoader:
def __init__(self, cb_model):
# cb_model.save_model("cb_model.json", format="json")
# self.loaded_cb_model = json.load(open("cb_model.json", "r"))
import tempfile
tmp_file = tempfile.NamedTemporaryFile()
cb_model.save_model(tmp_file.name, format="json")
self.loaded_cb_model = json.load(open(tmp_file.name, "r"))
tmp_file.close()
# load the CatBoost oblivious trees specific parameters
self.num_trees = len(self.loaded_cb_model['oblivious_trees'])
self.max_depth = self.loaded_cb_model['model_info']['params']['tree_learner_options']['depth']
def get_trees(self, data=None, data_missing=None):
# load each tree
trees = []
for tree_index in range(self.num_trees):
# load the per-tree params
#depth = len(self.loaded_cb_model['oblivious_trees'][tree_index]['splits'])
# load the nodes
# Re-compute the number of samples that pass through each node if we are given data
leaf_weights = self.loaded_cb_model['oblivious_trees'][tree_index]['leaf_weights']
leaf_weights_unraveled = [0] * (len(leaf_weights) - 1) + leaf_weights
leaf_weights_unraveled[0] = sum(leaf_weights)
for index in range(len(leaf_weights) - 2, 0, -1):
leaf_weights_unraveled[index] = leaf_weights_unraveled[2 * index + 1] + leaf_weights_unraveled[2 * index + 2]
leaf_values = self.loaded_cb_model['oblivious_trees'][tree_index]['leaf_values']
leaf_values_unraveled = [0] * (len(leaf_values) - 1) + leaf_values
children_left = [i * 2 + 1 for i in range(len(leaf_values) - 1)]
children_left += [-1] * len(leaf_values)
children_right = [i * 2 for i in range(1, len(leaf_values))]
children_right += [-1] * len(leaf_values)
children_default = [i * 2 + 1 for i in range(len(leaf_values) - 1)]
children_default += [-1] * len(leaf_values)
# load the split features and borders
# split features and borders go from leafs to the root
split_features_index = []
borders = []
# split features and borders go from leafs to the root
for elem in self.loaded_cb_model['oblivious_trees'][tree_index]['splits']:
split_type = elem.get('split_type')
if split_type == 'FloatFeature':
split_feature_index = elem.get('float_feature_index')
borders.append(elem['border'])
elif split_type == 'OneHotFeature':
split_feature_index = elem.get('cat_feature_index')
borders.append(elem['value'])
else:
split_feature_index = elem.get('ctr_target_border_idx')
borders.append(elem['border'])
split_features_index.append(split_feature_index)
split_features_index_unraveled = []
for counter, feature_index in enumerate(split_features_index[::-1]):
split_features_index_unraveled += [feature_index] * (2 ** counter)
split_features_index_unraveled += [0] * len(leaf_values)
borders_unraveled = []
for counter, border in enumerate(borders[::-1]):
borders_unraveled += [border] * (2 ** counter)
borders_unraveled += [0] * len(leaf_values)
trees.append(SingleTree({"children_left": np.array(children_left),
"children_right": np.array(children_right),
"children_default": np.array(children_default),
"feature": np.array(split_features_index_unraveled),
"threshold": np.array(borders_unraveled),
"value": np.array(leaf_values_unraveled).reshape((-1,1)),
"node_sample_weight": np.array(leaf_weights_unraveled),
}, data=data, data_missing=data_missing))
return trees
| mit |
0asa/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
jamesafoster/CompSkillsF16 | summarize_Pandas_readtable.py | 2 | 2096 | #!/usr/bin/env python
# demonstration of data exploration code for Comp Bio course, Fall 2016
# James A. Foster
# WARNING: not completely error checked
'''
Usage:
summarize_Pandas.py inputFile
where inputfile is a tab delimited summary of a Hiseq dataset, as in Homework 5
Questions to answer:
- how many times does THIS gene appear in THAT country?
- what is average GC content for THIS gene in THAT country?
'''
import sys
import pandas as pd
from pandas import Series, DataFrame
# read data from input file
try:
allDataFrame = pd.read_table( sys.argv[1], comment='#', usecols=[ 'Gene','Country','gcContent' ] )
except OSError as err:
print( "**ERROR** Cannot open %s, error: %s" % ( sys.argv[1], err ) )
except:
print( "**ERROR** unknown error with %s: %s" % ( sys.argv[1], sys.exc_info()[0] ) )
allDataFrame['GeneFamily'] = [x[:x.find('_')] for x in allDataFrame['Gene'] ]
allDataFrame['highGC'] = allDataFrame.gcContent > 0.4
allDataFrame['lowGC'] = allDataFrame.gcContent < 0.4
print( allDataFrame.describe(include='all') )
# summarize GC content by country
gcByCountry = DataFrame( index={x for x in allDataFrame.Country},
columns=['mean','std'] )
for nextCountry in sorted( gcByCountry.index ): # sorted( set( allDataFrame.Country ) ):
#same as: nextMean, nextSTD = allDataFrame[ allDataFrame.Country == nextCountry ].describe().ix[['mean','std'],'gcContent']
thisRow = allDataFrame[ allDataFrame.Country == nextCountry ]
theseStats = thisRow.describe()
nextMean, nextSTD = theseStats.ix[['mean','std'],'gcContent']
gcByCountry.ix[ nextCountry ] = [nextMean, nextSTD ]
# summarize GC content by gene family
gcByGeneFamily = DataFrame( index={x for x in allDataFrame.GeneFamily},
columns=['mean','std'] )
for nextGF in sorted( gcByGeneFamily.index ):
theseStats = allDataFrame[ allDataFrame.GeneFamily == nextGF ].describe()
nextMean, nextSTD = theseStats.ix[['mean','std'],'gcContent']
gcByGeneFamily.ix[ nextGF ] = [ nextMean, nextSTD ]
print( gcByCountry )
print( gcByGeneFamily )
| gpl-3.0 |
evanbiederstedt/RRBSfun | trees/chrom_scripts/cll_chr18.py | 1 | 8247 | import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(cw154))
print(len(trito))
totalfiles = cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr18"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ['RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("cll_chr18.phy", header=None, index=None)
print(tott.shape)
| mit |
peterfpeterson/mantid | qt/applications/workbench/workbench/widgets/plotselector/presenter.py | 3 | 15280 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
import os
import re
from .model import PlotSelectorModel
from .view import PlotSelectorView, Column
class PlotSelectorPresenter(object):
"""
Presenter for the plot selector widget. This class can be
responsible for the creation of the model and view, passing in
the GlobalFigureManager as an argument, or the presenter and view
can be passed as arguments (only intended for testing).
"""
def __init__(self, global_figure_manager, view=None, model=None):
"""
Initialise the presenter, creating the view and model, and
setting the initial plot list
:param global_figure_manager: The GlobalFigureManager class
:param view: Optional - a view to use instead of letting the
class create one (intended for testing)
:param model: Optional - a model to use instead of letting
the class create one (intended for testing)
"""
# Create model and view, or accept mocked versions
if view is None:
self.view = PlotSelectorView(self)
else:
self.view = view
if model is None:
self.model = PlotSelectorModel(self, global_figure_manager)
else:
self.model = model
# Make sure the plot list is up to date
self.update_plot_list()
def get_plot_name_from_number(self, plot_number):
return self.model.get_plot_name_from_number(plot_number)
# ------------------------ Plot Updates ------------------------
def update_plot_list(self):
"""
Updates the plot list in the model and the view. Filter text
is applied to the updated selection if required.
"""
plot_list = self.model.get_plot_list()
self.view.set_plot_list(plot_list)
def append_to_plot_list(self, plot_number):
"""
Appends the plot name to the end of the plot list
:param plot_number: The unique number in GlobalFigureManager
"""
self.view.append_to_plot_list(plot_number)
self.view.set_visibility_icon(plot_number, self.model.is_visible(plot_number))
def remove_from_plot_list(self, plot_number):
"""
Removes the plot name from the plot list
:param plot_number: The unique number in GlobalFigureManager
"""
self.view.remove_from_plot_list(plot_number)
def rename_in_plot_list(self, plot_number, new_name):
"""
Replaces a name in the plot list
:param plot_number: The unique number in GlobalFigureManager
:param new_name: The new name for the plot
"""
self.view.rename_in_plot_list(plot_number, new_name)
# ----------------------- Plot Filtering ------------------------
def filter_text_changed(self):
"""
Called by the view when the filter text is changed (e.g. by
typing or clearing the text)
"""
if self.view.get_filter_text():
self.view.filter_plot_list()
else:
self.view.unhide_all_plots()
def is_shown_by_filter(self, plot_number):
"""
:param plot_number: The unique number in GlobalFigureManager
:return: True if shown, or False if filtered out
"""
filter_text = self.view.get_filter_text()
plot_name = self.get_plot_name_from_number(plot_number)
return filter_text.lower() in plot_name.lower()
# ------------------------ Plot Showing ------------------------
def show_single_selected(self):
"""
When a list item is double clicked the view calls this method
to bring the selected plot to the front
"""
plot_number = self.view.get_currently_selected_plot_number()
self._make_plot_active(plot_number)
def show_multiple_selected(self):
"""
Shows multiple selected plots, e.g. from pressing the 'Show'
button with multiple selected plots
"""
selected_plots = self.view.get_all_selected_plot_numbers()
for plot_number in selected_plots:
self._make_plot_active(plot_number)
def _make_plot_active(self, plot_number):
"""
Make the plot with the given name active - bring it to the
front and make it the choice for overplotting
:param plot_number: The unique number in GlobalFigureManager
"""
try:
self.model.show_plot(plot_number)
except ValueError as e:
print(e)
def set_active_font(self, plot_number):
"""
Set the icon for the active plot to be colored
:param plot_number: The unique number in GlobalFigureManager
"""
active_plot_number = self.view.active_plot_number
if active_plot_number > 0:
try:
self.view.set_active_font(active_plot_number, False)
except ValueError:
# The last active plot could have been closed
# already, so there is nothing to do
pass
self.view.set_active_font(plot_number, True)
self.view.active_plot_number = plot_number
# ------------------------ Plot Hiding -------------------------
def hide_selected_plots(self):
"""
Hide all plots that are selected in the view
"""
selected_plots = self.view.get_all_selected_plot_numbers()
for plot_number in selected_plots:
self._hide_plot(plot_number)
def _hide_plot(self, plot_number):
"""
Hides a single plot
"""
try:
self.model.hide_plot(plot_number)
except ValueError as e:
print(e)
def toggle_plot_visibility(self, plot_number):
"""
Toggles a plot between hidden and shown
:param plot_number: The unique number in GlobalFigureManager
"""
if self.model.is_visible(plot_number):
self._hide_plot(plot_number)
else:
self._make_plot_active(plot_number)
self.update_visibility_icon(plot_number)
def update_visibility_icon(self, plot_number):
"""
Updates the icon to indicate a plot as hidden or visible
:param plot_number: The unique number in GlobalFigureManager
"""
try:
is_visible = self.model.is_visible(plot_number)
self.view.set_visibility_icon(plot_number, is_visible)
except ValueError:
# There is a chance the plot was closed, which calls an
# update to this method. If we can not get the visibility
# status it is safe to assume the plot has been closed.
pass
# ------------------------ Plot Renaming ------------------------
def rename_figure(self, plot_number, new_name):
"""
Replaces a name in the plot list
:param plot_number: The unique number in GlobalFigureManager
:param new_name: The new plot name
"""
try:
self.model.rename_figure(plot_number, new_name)
except ValueError as e:
# We need to undo the rename in the view
self.view.rename_in_plot_list(plot_number, new_name)
print(e)
# ------------------------ Plot Closing -------------------------
def close_action_called(self):
"""
This is called by the view when closing plots is requested
(e.g. pressing close or delete).
"""
selected_plots = self.view.get_all_selected_plot_numbers()
self._close_plots(selected_plots)
def close_single_plot(self, plot_number):
"""
This is used to close plots when a close action is called
that does not refer to the selected plot(s)
:param plot_number: The unique number in GlobalFigureManager
"""
self._close_plots([plot_number])
def _close_plots(self, list_of_plot_numbers):
"""
Accepts a list of plot names to close
:param list_of_plots: A list of strings containing plot names
"""
for plot_number in list_of_plot_numbers:
try:
self.model.close_plot(plot_number)
except ValueError as e:
print(e)
# ----------------------- Plot Sorting --------------------------
def set_sort_order(self, is_ascending):
"""
Sets the sort order in the view
:param is_ascending: If true ascending order, else descending
"""
self.view.set_sort_order(is_ascending)
def set_sort_type(self, sort_type):
"""
Sets the sort order in the view
:param sort_type: A Column enum with the column to sort on
"""
self.view.set_sort_type(sort_type)
self.update_last_active_order()
def update_last_active_order(self):
"""
Update the sort keys in the view. This is only required when
changes to the last shown order occur in the model, when
renaming the key is set already
"""
if self.view.sort_type() == Column.LastActive:
self._set_last_active_order()
def _set_last_active_order(self):
"""
Set the last shown order in the view. This checks the sorting
currently set and then sets the sort keys to the appropriate
values
"""
last_active_values = self.model.last_active_values()
self.view.set_last_active_values(last_active_values)
def get_initial_last_active_value(self, plot_number):
"""
Gets the initial last active value for a plot just added, in
this case it is assumed to not have been shown
:param plot_number: The unique number in GlobalFigureManager
:return: A string with the last active value
"""
return '_' + self.model.get_plot_name_from_number(plot_number)
def get_renamed_last_active_value(self, plot_number, old_last_active_value):
"""
Gets the initial last active value for a plot that was
renamed. If the plot had a numeric value, i.e. has been shown
this is retained, else it is set
:param plot_number: The unique number in GlobalFigureManager
:param old_last_active_value: The previous last active value
"""
if old_last_active_value.isdigit():
return old_last_active_value
else:
return self.get_initial_last_active_value(plot_number)
# ---------------------- Plot Exporting -------------------------
def export_plots_called(self, extension):
"""
Export plots called from the view, then a single or multiple
plots exported depending on the number currently selected
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
plot_numbers = self.view.get_all_selected_plot_numbers()
if len(plot_numbers) == 1:
self._export_single_plot(plot_numbers[0], extension)
elif len(plot_numbers) > 1:
self._export_multiple_plots(plot_numbers, extension)
def _export_single_plot(self, plot_number, extension):
"""
Called when a single plot is selected to export - prompts for
a filename then tries to save the plot
:param plot_number: The unique number in GlobalFigureManager
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
absolute_path = self.view.get_file_name_for_saving(extension)
if not absolute_path[-4:] == extension:
absolute_path += extension
try:
self.model.export_plot(plot_number, absolute_path)
except ValueError as e:
print(e)
def _export_multiple_plots(self, plot_numbers, extension):
"""
Export all selected plots in the plot_numbers list, first
prompting for a save directory then sanitising plot names to
unique, usable file names
:param plot_numbers: A list of plot numbers to export
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
dir_name = self.view.get_directory_name_for_saving()
# A temporary dictionary holding plot numbers as keys, plot
# names as values
plots = {}
for plot_number in plot_numbers:
plot_name = self.model.get_plot_name_from_number(plot_number)
plot_name = self._replace_special_characters(plot_name)
if plot_name in plots.values():
plot_name = self._make_unique_name(plot_name, plots)
plots[plot_number] = plot_name
self._export_plot(plot_number, plot_name, dir_name, extension)
def _replace_special_characters(self, string):
"""
Removes any characters that are not valid in file names
across all operating systems ('/' for Linux/Mac), more for
Windows
:param string: The string to replace characters in
:return: The string with special characters replace by '-'
"""
return re.sub(r'[<>:"/|\\?*]', r'-', string)
def _make_unique_name(self, name, dictionary):
"""
Given a name and a dictionary, make a unique name that does
not already exist in the dictionary values by appending
' (1)', ' (2)', ' (3)' etc. to the end of the name
:param name: A string with the non-unique name
:param dictionary: A dictionary with string values
:return : The unique plot name
"""
i = 1
while True:
plot_name_attempt = name + ' ({})'.format(str(i))
if plot_name_attempt not in dictionary.values():
break
i += 1
return plot_name_attempt
def _export_plot(self, plot_number, plot_name, dir_name, extension):
"""
Given a plot number, plot name, directory and extension
construct the absolute path name and call the model to save
the figure
:param plot_number: The unique number in GlobalFigureManager
:param plot_name: The name to use for saving
:param dir_name: The directory to save to
:param extension: The file extension as a string including
a '.', for example '.png' (must be a type
supported by matplotlib)
"""
if dir_name:
filename = os.path.join(dir_name, plot_name + extension)
try:
self.model.export_plot(plot_number, filename)
except ValueError as e:
print(e)
| gpl-3.0 |
semio/ddf_utils | ddf_utils/chef/procedure/extract_concepts.py | 1 | 4444 | # -*- coding: utf-8 -*-
"""extract_concepts procedure for recipes"""
import logging
import numpy as np
import pandas as pd
from typing import List
from .. helpers import debuggable
from .. model.ingredient import Ingredient, ConceptIngredient
from .. model.chef import Chef
logger = logging.getLogger('extract_concepts')
@debuggable
def extract_concepts(chef: Chef, ingredients: List[Ingredient], result,
join=None, overwrite=None, include_keys=False) -> ConceptIngredient:
"""extract concepts from other ingredients.
.. highlight:: yaml
Procedure format:
::
procedure: extract_concepts
ingredients: # list of ingredient id
- ingredient_id_1
- ingredient_id_2
result: str # new ingredient id
options:
join: # optional
base: str # base concept ingredient id
type: {'full_outer', 'ingredients_outer'} # default is full_outer
overwrite: # overwrite some concept types
country: entity_set
year: time
include_keys: true # if we should include the primaryKeys concepts
Parameters
----------
ingredients
any numbers of ingredient that needs to extract concepts from
Keyword Args
------------
join : dict, optional
the base ingredient to join
overwrite : dict, optional
overwrite concept types for some concepts
include_keys : bool, optional
if we shuld include the primaryKeys of the ingredients, default to false
See Also
--------
:py:func:`ddf_utils.transformer.extract_concepts` : related function in transformer
module
Note
----
- all concepts in ingredients in the ``ingredients`` parameter will be extracted
to a new concept ingredient
- ``join`` option is optional; if present then the ``base`` will merge with concepts
from ``ingredients``
- ``full_outer`` join means get the union of concepts; ``ingredients_outer`` means
only keep concepts from ``ingredients``
"""
# ingredients = [chef.dag.get_node(x).evaluate() for x in ingredients]
logger.info("extract concepts: {}".format([x.id for x in ingredients]))
if join:
base = chef.dag.get_node(join['base']).evaluate()
try:
join_type = join['type']
except KeyError:
join_type = 'full_outer'
concepts = base.get_data()['concept'].set_index('concept')
else:
concepts = pd.DataFrame([], columns=['concept', 'concept_type']).set_index('concept')
join_type = 'full_outer'
new_concepts = set()
for i in ingredients:
data = i.get_data()
if i.dtype in ['concepts', 'entities']:
pks = [i.key]
else:
pks = i.key
for k, df in data.items():
if include_keys:
cols = df.columns
else:
cols = [x for x in df.columns if x not in pks]
cat_cols = df.select_dtypes(include=['category']).columns
for col in cols:
if col.startswith('is--'):
continue
new_concepts.add(col)
if col in concepts.index:
continue
# if df.dtypes[col] == 'category': # doesn't work
if col in cat_cols:
concepts.loc[col, 'concept_type'] = 'string'
else:
concepts.loc[col, 'concept_type'] = 'measure'
if join_type == 'ingredients_outer':
# ingredients_outer join: only keep concepts appears in ingredients
concepts = concepts.loc[new_concepts]
# add name column if there isn't one
if 'name' not in concepts.columns:
concepts['name'] = np.nan
if 'name' not in concepts.index.values:
concepts.loc['name', 'concept_type'] = 'string'
concepts.loc['name', 'name'] = 'Name'
concepts['name'] = concepts['name'].fillna(
concepts.index.to_series().map(lambda x: str(x).replace('_', ' ').title()))
# overwrite some of the types
if overwrite:
for k, v in overwrite.items():
concepts.loc[k, 'concept_type'] = v
if not result:
result = 'concepts_extracted'
return ConceptIngredient.from_procedure_result(result, 'concept',
data_computed={'concept': concepts.reset_index()})
| mit |
eistre91/ThinkStats2 | code/chap13soln.py | 68 | 2961 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import thinkplot
import thinkstats2
import survival
def CleanData(resp):
"""Cleans respondent data.
resp: DataFrame
"""
resp.cmdivorcx.replace([9998, 9999], np.nan, inplace=True)
resp['notdivorced'] = resp.cmdivorcx.isnull().astype(int)
resp['duration'] = (resp.cmdivorcx - resp.cmmarrhx) / 12.0
resp['durationsofar'] = (resp.cmintvw - resp.cmmarrhx) / 12.0
month0 = pandas.to_datetime('1899-12-15')
dates = [month0 + pandas.DateOffset(months=cm)
for cm in resp.cmbirth]
resp['decade'] = (pandas.DatetimeIndex(dates).year - 1900) // 10
def ResampleDivorceCurve(resps):
"""Plots divorce curves based on resampled data.
resps: list of respondent DataFrames
"""
for _ in range(41):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pandas.concat(samples, ignore_index=True)
PlotDivorceCurveByDecade(sample, color='#225EA8', alpha=0.1)
thinkplot.Show(xlabel='years',
axis=[0, 28, 0, 1])
def ResampleDivorceCurveByDecade(resps):
"""Plots divorce curves for each birth cohort.
resps: list of respondent DataFrames
"""
for i in range(41):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pandas.concat(samples, ignore_index=True)
groups = sample.groupby('decade')
if i == 0:
survival.AddLabelsByDecade(groups, alpha=0.7)
EstimateSurvivalByDecade(groups, alpha=0.1)
thinkplot.Save(root='survival7',
xlabel='years',
axis=[0, 28, 0, 1])
def EstimateSurvivalByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for name, group in groups:
print(name, len(group))
_, sf = EstimateSurvival(group)
thinkplot.Plot(sf, **options)
def EstimateSurvival(resp):
"""Estimates the survival curve.
resp: DataFrame of respondents
returns: pair of HazardFunction, SurvivalFunction
"""
complete = resp[resp.notdivorced == 0].duration
ongoing = resp[resp.notdivorced == 1].durationsofar
hf = survival.EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return hf, sf
def main():
resp6 = survival.ReadFemResp2002()
CleanData(resp6)
married6 = resp6[resp6.evrmarry==1]
resp7 = survival.ReadFemResp2010()
CleanData(resp7)
married7 = resp7[resp7.evrmarry==1]
ResampleDivorceCurveByDecade([married6, married7])
if __name__ == '__main__':
main()
| gpl-3.0 |
jimmylai/slideshare | python_demo/fabfile.py | 1 | 1463 | #!/usr/bin/env python
# -*- encoding: utf8 -*-
'''Program
'''
from fabric.api import local, prefix, sudo, hosts, run
__author__ = 'noahsark'
def search(word=None):
if word is None:
print 'Please specify `word` to search as argument.'
else:
local('find | xargs -i grep -H --color %s {}' % word)
def doc():
with prefix('cd doc'):
local('make html')
def test():
local('nosetests --with-doctest --with-xunit --traverse-namespace --with-coverage '
'--cover-package=python_demo')
def clean():
'''remove pyc files.'''
local('find | grep \.pyc$ | xargs -i rm -f {}')
def ci():
test()
doc()
local('clonedigger --ignore-dir=classifier .')
@hosts('localhost')
def setup():
# packages with system package support
packages = ['numpy', 'scipy', 'matplotlib', 'pandas', 'coverage', 'nose',
'sphinx', 'nltk', 'nose', 'xlwt', 'xlrd', 'jinja2', 'psutil']
sudo('apt-get install -y python-virtualenv')
sudo('apt-get install -y python-pip')
run('virtualenv --no-site-packages env')
for package in packages:
sudo('apt-get build-dep -y python-%s' % package)
with prefix('. env/bin/activate'):
run('pip install %s' % package)
# packages has no system packages
packages = ['ipython', 'scikit-learn', 'pep8']
for package in packages:
with prefix('. env/bin/activate'):
run('pip install %s' % package)
| apache-2.0 |
paulmueller/PyCorrFit | tests/test_simple.py | 2 | 1310 | import numpy as np
from pycorrfit.correlation import Correlation
from pycorrfit.fit import Fit
def create_corr():
corr = Correlation()
tau = np.exp(np.linspace(np.log(1e-3), np.log(1e6), 10))
data = corr.fit_model(corr.fit_parameters, tau)
noise = (np.random.random(data.shape[0])-.5)*.0005
data += noise
corr.correlation = np.dstack((tau, data))[0]
return corr
def test_simple_corr():
corr = create_corr()
oldparms = corr.fit_parameters.copy()
temp = corr.fit_parameters
temp[0] *= 2
temp[-1] *= .1
Fit(corr)
res = oldparms - corr.fit_parameters
assert np.allclose(res, np.zeros_like(res), atol=0.010)
if __name__ == "__main__":
import matplotlib.pylab as plt
corr = create_corr()
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.set_xscale("log")
ax2.set_xscale("log")
print(corr.fit_parameters)
temp = corr.fit_parameters
temp[0] *= 2
temp[-1] *= .1
ax1.plot(corr.correlation_fit[:, 0], corr.correlation_fit[:, 1])
ax1.plot(corr.modeled_fit[:, 0], corr.modeled_fit[:, 1])
print(corr.fit_parameters)
Fit(corr)
print(corr.fit_parameters)
ax2.plot(corr.correlation_fit[:, 0], corr.correlation_fit[:, 1])
ax2.plot(corr.modeled_fit[:, 0], corr.modeled_fit[:, 1])
plt.show()
| gpl-2.0 |
MJuddBooth/pandas | pandas/tests/arrays/categorical/test_missing.py | 3 | 3075 | # -*- coding: utf-8 -*-
import collections
import numpy as np
import pytest
from pandas.compat import lrange
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas import Categorical, Index, isna
import pandas.util.testing as tm
class TestCategoricalMissing(object):
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
tm.assert_numpy_array_equal(isna(cat), labels == -1)
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0],
dtype=np.int8))
c[1] = np.nan
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0],
dtype=np.int8))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0],
dtype=np.int8))
def test_set_dtype_nans(self):
c = Categorical(['a', 'b', np.nan])
result = c._set_dtype(CategoricalDtype(['a', 'c']))
tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1],
dtype='int8'))
def test_set_item_nan(self):
cat = Categorical([1, 2, 3])
cat[1] = np.nan
exp = Categorical([1, np.nan, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(cat, exp)
@pytest.mark.parametrize('fillna_kwargs, msg', [
(dict(value=1, method='ffill'),
"Cannot specify both 'value' and 'method'."),
(dict(),
"Must specify a fill 'value' or 'method'."),
(dict(method='bad'),
"Invalid fill method. Expecting .* bad"),
])
def test_fillna_raises(self, fillna_kwargs, msg):
# https://github.com/pandas-dev/pandas/issues/19682
cat = Categorical([1, 2, 3])
with pytest.raises(ValueError, match=msg):
cat.fillna(**fillna_kwargs)
@pytest.mark.parametrize("named", [True, False])
def test_fillna_iterable_category(self, named):
# https://github.com/pandas-dev/pandas/issues/21097
if named:
Point = collections.namedtuple("Point", "x y")
else:
Point = lambda *args: args # tuple
cat = Categorical([Point(0, 0), Point(0, 1), None])
result = cat.fillna(Point(0, 0))
expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)])
tm.assert_categorical_equal(result, expected)
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | sklearn/neighbors/approximate.py | 128 | 22351 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
jaantollander/Pointwise-Convergence | src_legacy/fourier_series/fourier_dataframe.py | 4 | 2447 | # coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
from math import ceil
from src_legacy.fourier_series.series.base import SeriesExpansionBase
class FourierDataFrame:
"""
Generates Fourier series data into pandas DataFrame.
http://pandas.pydata.org/pandas-docs/stable/internals.html#subclassing-pandas-data-structures
"""
start_index = 1
def __init__(self, series_expansion, size):
self.size = size
self.shape = []
if isinstance(series_expansion, SeriesExpansionBase):
self.series_expansion = series_expansion
# if self.size > self.series_expansion.degree:
# self.size = self.series_expansion.degree
self.gen = self.series_expansion.generator()
self.inputs = self.series_expansion.inputs
self.shape.append(self.size)
self.shape.append(np.product(self.inputs.shape))
self.functions = self.series_expansion.functions
else:
raise ValueError()
self.count = 0
self.max_count = ceil(self.series_expansion.degree / self.size)
self.columns = pd.MultiIndex.from_product(iterables=self.inputs,
names=self.inputs.symbols)
self.index = np.arange(self.start_index, self.size + self.start_index,
dtype=np.int64)
self.index = pd.Index(data=self.index, name='degree')
def empty_dataframe(self):
data = {}
index = self.index + self.count * self.size
for name in self.functions:
array = np.zeros(shape=self.shape, dtype=np.float64)
data[name] = pd.DataFrame(data=array, index=index,
columns=self.columns)
return data
def generate(self):
data = self.empty_dataframe()
for i, (degree, values) in enumerate(self.gen):
print('deg:', degree)
for name, value in zip(self.functions, values):
arr = value.flatten()
data[name].iloc[i, :] = arr
if i == self.size-1:
break
self.count += 1
return data
def generator(self):
while self.count != self.max_count:
yield self.generate()
| mit |
TimoRoth/oggm | oggm/tests/test_benchmarks.py | 2 | 20750 | # Python imports
import unittest
import numpy as np
import os
import shutil
import xarray as xr
import pytest
import oggm
from scipy import optimize as optimization
salem = pytest.importorskip('salem')
gpd = pytest.importorskip('geopandas')
# Locals
import oggm.cfg as cfg
from oggm import tasks, utils, workflow
from oggm.workflow import execute_entity_task
from oggm.tests.funcs import get_test_dir, apply_test_ref_tstars
from oggm.utils import get_demo_file
from oggm.core import gis, centerlines
from oggm.core.massbalance import ConstantMassBalance
pytestmark = pytest.mark.test_env("benchmark")
do_plot = False
class TestSouthGlacier(unittest.TestCase):
# Test case obtained from ITMIX
# Data available at:
# oggm-sample-data/tree/master/benchmarks/south_glacier
#
# Citation:
#
# Flowers, G.E., N. Roux, S. Pimentel, and C.G. Schoof (2011). Present
# dynamics and future prognosis of a slowly surging glacier.
# The Cryosphere, 5, 299-313. DOI: 10.5194/tc-5-299-2011, 2011.
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
self.clean_dir()
# Init
cfg.initialize()
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['working_dir'] = self.testdir
cfg.PATHS['dem_file'] = get_demo_file('dem_SouthGlacier.tif')
cfg.PARAMS['border'] = 10
apply_test_ref_tstars()
self.tf = get_demo_file('cru_ts4.01.1901.2016.SouthGlacier.tmp.dat.nc')
self.pf = get_demo_file('cru_ts4.01.1901.2016.SouthGlacier.pre.dat.nc')
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
def get_ref_data(self, gdir):
# Reference data
df = salem.read_shapefile(get_demo_file('IceThick_SouthGlacier.shp'))
coords = np.array([p.xy for p in df.geometry]).squeeze()
df['lon'] = coords[:, 0]
df['lat'] = coords[:, 1]
df = df[['lon', 'lat', 'thick']]
ii, jj = gdir.grid.transform(df['lon'], df['lat'], crs=salem.wgs84,
nearest=True)
df['i'] = ii
df['j'] = jj
df['ij'] = ['{:04d}_{:04d}'.format(i, j) for i, j in zip(ii, jj)]
return df.groupby('ij').mean()
def test_mb(self):
# This is a function to produce the MB function needed by Anna
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
mbref = salem.GeoTiff(get_demo_file('mb_SouthGlacier.tif'))
demref = salem.GeoTiff(get_demo_file('dem_SouthGlacier.tif'))
mbref = mbref.get_vardata()
mbref[mbref == -9999] = np.NaN
demref = demref.get_vardata()[np.isfinite(mbref)]
mbref = mbref[np.isfinite(mbref)] * 1000
# compute the bias to make it 0 SMB on the 2D DEM
rho = cfg.PARAMS['ice_density']
mbmod = ConstantMassBalance(gdirs[0], bias=0)
mymb = mbmod.get_annual_mb(demref) * cfg.SEC_IN_YEAR * rho
mbmod = ConstantMassBalance(gdirs[0], bias=np.average(mymb))
mymb = mbmod.get_annual_mb(demref) * cfg.SEC_IN_YEAR * rho
np.testing.assert_allclose(np.average(mymb), 0., atol=1e-3)
# Same for ref
mbref = mbref - np.average(mbref)
np.testing.assert_allclose(np.average(mbref), 0., atol=1e-3)
# Fit poly
p = np.polyfit(demref, mbref, deg=2)
poly = np.poly1d(p)
myfit = poly(demref)
np.testing.assert_allclose(np.average(myfit), 0., atol=1e-3)
if do_plot:
import matplotlib.pyplot as plt
plt.scatter(mbref, demref, s=5,
label='Obs (2007-2012), shifted to Avg(SMB) = 0')
plt.scatter(mymb, demref, s=5, label='OGGM MB at t*')
plt.scatter(myfit, demref, s=5, label='Polyfit', c='C3')
plt.xlabel('MB (mm w.e yr-1)')
plt.ylabel('Altidude (m)')
plt.legend()
plt.show()
def test_inversion_attributes(self):
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
# Tested tasks
task_list = [
tasks.gridded_attributes,
tasks.gridded_mb_attributes,
]
for task in task_list:
execute_entity_task(task, gdirs)
# Check certain things
gdir = gdirs[0]
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
# The max catchment area should be area of glacier
assert (ds['catchment_area'].max() ==
ds['glacier_mask'].sum() * gdir.grid.dx**2)
assert (ds['catchment_area_on_catch'].max() ==
ds['glacier_mask'].sum() * gdir.grid.dx**2)
# In the lowest parts of the glaciers the data should be equivalent
ds_low = ds.isel(y=ds.y < 6741500)
np.testing.assert_allclose(ds_low['lin_mb_above_z'],
ds_low['lin_mb_above_z_on_catch'])
np.testing.assert_allclose(ds_low['oggm_mb_above_z'],
ds_low['oggm_mb_above_z_on_catch'])
# Build some loose tests based on correlation
df = self.get_ref_data(gdir)
vns = ['topo',
'slope',
'aspect',
'slope_factor',
'dis_from_border',
'catchment_area',
'catchment_area_on_catch',
'lin_mb_above_z',
'lin_mb_above_z_on_catch',
'oggm_mb_above_z',
'oggm_mb_above_z_on_catch',
]
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
for vn in vns:
df[vn] = ds[vn].isel(x=('z', df['i']), y=('z', df['j']))
# Loose tests based on correlations
cf = df.corr()
assert cf.loc['slope', 'slope_factor'] < -0.85
assert cf.loc['slope', 'thick'] < -0.4
assert cf.loc['dis_from_border', 'thick'] > 0.2
assert cf.loc['oggm_mb_above_z', 'thick'] > 0.5
assert cf.loc['lin_mb_above_z', 'thick'] > 0.5
assert cf.loc['lin_mb_above_z', 'oggm_mb_above_z'] > 0.95
def test_inversion(self):
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
# Inversion tasks
execute_entity_task(tasks.prepare_for_inversion, gdirs)
# We use the default parameters for this run
execute_entity_task(tasks.mass_conservation_inversion, gdirs)
execute_entity_task(tasks.distribute_thickness_per_altitude, gdirs,
varname_suffix='_alt')
execute_entity_task(tasks.distribute_thickness_interp, gdirs,
varname_suffix='_int')
# Reference data
gdir = gdirs[0]
df = self.get_ref_data(gdir)
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
v = ds.distributed_thickness_alt
df['oggm_alt'] = v.isel(x=('z', df['i']), y=('z', df['j']))
v = ds.distributed_thickness_int
df['oggm_int'] = v.isel(x=('z', df['i']), y=('z', df['j']))
ds['ref'] = xr.zeros_like(ds.distributed_thickness_int) * np.NaN
ds['ref'].data[df['j'], df['i']] = df['thick']
rmsd_int = ((df.oggm_int - df.thick) ** 2).mean() ** .5
rmsd_alt = ((df.oggm_int - df.thick) ** 2).mean() ** .5
assert rmsd_int < 85
assert rmsd_alt < 85
dfm = df.mean()
np.testing.assert_allclose(dfm.thick, dfm.oggm_int, 50)
np.testing.assert_allclose(dfm.thick, dfm.oggm_alt, 50)
if do_plot:
import matplotlib.pyplot as plt
df.plot(kind='scatter', x='oggm_int', y='thick')
plt.axis('equal')
df.plot(kind='scatter', x='oggm_alt', y='thick')
plt.axis('equal')
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 3))
ds.ref.plot(ax=ax1)
ds.distributed_thickness_int.plot(ax=ax2)
ds.distributed_thickness_alt.plot(ax=ax3)
plt.tight_layout()
plt.show()
@pytest.mark.slow
def test_optimize_inversion(self):
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
# Reference data
gdir = gdirs[0]
df = self.get_ref_data(gdir)
# Inversion tasks
execute_entity_task(tasks.prepare_for_inversion, gdirs)
glen_a = cfg.PARAMS['inversion_glen_a']
fs = cfg.PARAMS['inversion_fs']
def to_optimize(x):
tasks.mass_conservation_inversion(gdir,
glen_a=glen_a * x[0],
fs=fs * x[1])
tasks.distribute_thickness_per_altitude(gdir)
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
thick = ds.distributed_thickness.isel(x=('z', df['i']),
y=('z', df['j']))
out = (np.abs(thick - df.thick)).mean()
return out
opti = optimization.minimize(to_optimize, [1., 1.],
bounds=((0.01, 10), (0.01, 10)),
tol=0.1)
# Check results and save.
execute_entity_task(tasks.mass_conservation_inversion, gdirs,
glen_a=glen_a*opti['x'][0],
fs=0)
execute_entity_task(tasks.distribute_thickness_per_altitude, gdirs)
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
df['oggm'] = ds.distributed_thickness.isel(x=('z', df['i']),
y=('z', df['j']))
ds['ref'] = xr.zeros_like(ds.distributed_thickness) * np.NaN
ds['ref'].data[df['j'], df['i']] = df['thick']
rmsd = ((df.oggm - df.thick) ** 2).mean() ** .5
assert rmsd < 30
dfm = df.mean()
np.testing.assert_allclose(dfm.thick, dfm.oggm, 10)
if do_plot:
import matplotlib.pyplot as plt
df.plot(kind='scatter', x='oggm', y='thick')
plt.axis('equal')
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3))
ds.ref.plot(ax=ax1)
ds.distributed_thickness.plot(ax=ax2)
plt.tight_layout()
plt.show()
def test_workflow(self):
# This is a check that the inversion workflow works fine
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
tasks.compute_downstream_line,
tasks.compute_downstream_bedshape,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
# Inversion tasks
execute_entity_task(tasks.prepare_for_inversion, gdirs)
# We use the default parameters for this run
execute_entity_task(tasks.mass_conservation_inversion, gdirs)
execute_entity_task(tasks.filter_inversion_output, gdirs)
df = utils.compile_glacier_statistics(gdirs)
df['inv_thickness_m'] = df['inv_volume_km3'] / df['rgi_area_km2'] * 1e3
assert df.inv_thickness_m[0] < 100
df = utils.compile_fixed_geometry_mass_balance(gdirs)
assert len(df) > 100
if do_plot:
import matplotlib.pyplot as plt
from oggm.graphics import plot_inversion
plot_inversion(gdirs)
plt.show()
@pytest.mark.slow
class TestCoxeGlacier(unittest.TestCase):
# Test case for a tidewater glacier
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
self.clean_dir()
self.rgi_file = get_demo_file('rgi_RGI50-01.10299.shp')
# Init
cfg.initialize()
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['dem_file'] = get_demo_file('dem_RGI50-01.10299.tif')
cfg.PARAMS['border'] = 40
cfg.PATHS['working_dir'] = self.testdir
cfg.PARAMS['use_kcalving_for_inversion'] = True
cfg.PARAMS['use_kcalving_for_run'] = True
apply_test_ref_tstars()
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
def test_set_width(self):
entity = gpd.read_file(self.rgi_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_intersections(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
# Test that area and area-altitude elev is fine
with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc:
mask = nc.variables['glacier_mask'][:]
topo = nc.variables['topo_smoothed'][:]
rhgt = topo[np.where(mask)][:]
fls = gdir.read_pickle('inversion_flowlines')
hgt, widths = gdir.get_inversion_flowline_hw()
bs = 100
bins = np.arange(utils.nicenumber(np.min(hgt), bs, lower=True),
utils.nicenumber(np.max(hgt), bs) + 1,
bs)
h1, b = np.histogram(hgt, weights=widths, density=True, bins=bins)
h2, b = np.histogram(rhgt, density=True, bins=bins)
h1 = h1 / np.sum(h1)
h2 = h2 / np.sum(h2)
assert utils.rmsd(h1, h2) < 0.02 # less than 2% error
new_area = np.sum(widths * fls[-1].dx * gdir.grid.dx)
np.testing.assert_allclose(new_area, gdir.rgi_area_m2)
centerlines.terminus_width_correction(gdir, new_width=714)
fls = gdir.read_pickle('inversion_flowlines')
hgt, widths = gdir.get_inversion_flowline_hw()
# Check that the width is ok
np.testing.assert_allclose(fls[-1].widths[-1] * gdir.grid.dx, 714)
# Check for area distrib
bins = np.arange(utils.nicenumber(np.min(hgt), bs, lower=True),
utils.nicenumber(np.max(hgt), bs) + 1,
bs)
h1, b = np.histogram(hgt, weights=widths, density=True, bins=bins)
h2, b = np.histogram(rhgt, density=True, bins=bins)
h1 = h1 / np.sum(h1)
h2 = h2 / np.sum(h2)
assert utils.rmsd(h1, h2) < 0.02 # less than 2% error
new_area = np.sum(widths * fls[-1].dx * gdir.grid.dx)
np.testing.assert_allclose(new_area, gdir.rgi_area_m2)
def test_run(self):
entity = gpd.read_file(self.rgi_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_intersections(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
# Climate tasks -- only data IO and tstar interpolation!
tasks.process_dummy_cru_file(gdir, seed=0)
tasks.local_t_star(gdir)
tasks.mu_star_calibration(gdir)
# Inversion tasks
tasks.find_inversion_calving(gdir)
# Final preparation for the run
tasks.init_present_time_glacier(gdir)
# check that calving happens in the real context as well
tasks.run_constant_climate(gdir, bias=0, nyears=200,
temperature_bias=-0.5)
with xr.open_dataset(gdir.get_filepath('model_diagnostics')) as ds:
assert ds.calving_m3[-1] > 10
| bsd-3-clause |
fullfanta/mxnet | example/ssd/dataset/pycocotools/coco.py | 21 | 18778 | __author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
# from . import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
# rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
raise NotImplementedError("maskUtils disabled!")
else:
rle = [ann['segmentation']]
# m = maskUtils.decode(rle)
raise NotImplementedError("maskUtils disabled!")
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
# ann['area'] = maskUtils.area(ann['segmentation'])
raise NotImplementedError("maskUtils disabled!")
if not 'bbox' in ann:
# ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
raise NotImplementedError("maskUtils disabled!")
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
# rles = maskUtils.frPyObjects(segm, h, w)
# rle = maskUtils.merge(rles)
raise NotImplementedError("maskUtils disabled!")
elif type(segm['counts']) == list:
# uncompressed RLE
# rle = maskUtils.frPyObjects(segm, h, w)
raise NotImplementedError("maskUtils disabled!")
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
# m = maskUtils.decode(rle)
raise NotImplementedError("maskUtils disabled!")
return m
| apache-2.0 |
dreadsci/forget-me-not | jobs.py | 2 | 9060 | '''
BLINC Adaptive Prosthetics Toolkit
- Bionic Limbs for Improved Natural Control, blinclab.ca
[email protected]
A toolkit for running machine learning experiments on prosthetic limb data
This module file handles submitting experiments to Wesgrid
Usage: jobs.py --log_name LOG_DIR [OPTIONS]
Edit this file to set the experiment params. See class definition for job-submission parameters.
'''
from experiment import *
from local import base_dir, test_dir
import subprocess
# I think it is easier to edit this file than to use command-line args for the jobs
exp_params = {'base_dir': [base_dir],
'platform': ['calgary'],
'protocol': ['bib'],#, 'book1', 'book2', 'geo', 'news', 'obj1', 'obj2',
#'paper1', 'paper2', 'paper3', 'paper4', 'paper5', 'pic', 'progp', 'progl', 'progc'],
'model': ['FastCTW', 'PTW_FastCTW', 'FMN_FastCTW'],
'depth': [16, 32, 48, 64]
}
def pull_stats_from_file(filename):
with open('logs/{}'.format(filename), 'r') as f:
stats = f.readlines()[-7:]
if 'Starting' in stats[-1]:
return {'flaked': True}
elif 'killed' in stats[-1]:
return {'killed': stats[-1].split(':')[-1].strip()}
data = {}
for s in stats:
try:
parts = s.split(':')
data[parts[0].strip()] = parts[1].split()[0]
except IndexError:
pass
return data
def pull_params_from_name(filename):
data = {'filename': filename}
(data['protocol'], data['model'], depth, *parts) = filename.split('-')
data['depth'] = depth.split('.')[0]
return data
def get_all_logs():
data = {}
for f in os.listdir('logs'):
if '.log' in f:
name = f.split('.')[0]
params = pull_params_from_name(f)
params.update(pull_stats_from_file(f))
data[name] = Series(params)
return DataFrame(data)
def graph_results(data, protocol):
import matplotlib.pyplot as plt
subset = data[data.protocol == protocol][['model', 'depth', 'Size']].convert_objects(convert_numeric=True)
models = sorted(subset.model.unique())
depths = sorted(subset.depth.unique())
for m in models:
subset[subset.model==m].plot(x='depth', y='Size', label=m, xticks=depths)
plt.legend()
plt.title('Compressed size of {} as a function of depth for each model type'.format(protocol))
class JobSet(Structure):
"""
JobSet takes the dictionary of parameter sets defined above
and parses them into a sequence of experiment.py calls.
Usage: jobs.py --log_name DIRNAME [OPTIONS]
---log_name The directory to store logs of the submission and output
---debug Do not call qsub, just print out command
---run_now Run the experiment directly (still submitting or not according to other parameters)
---safe_mode Do not overwrite files
---num_minutes Number of minutes to request
---num_hours
"""
_fields = [Dir('log_name', required=True, keyword=False, default='logs'),
Boolean('debug', default=True, transient=True),
Boolean('run_now', default=False, transient=True),
Boolean('safe_mode', default=True, transient=True),
Integer('num_minutes', default=20, transient=True),
Integer('num_hours', default=0, transient=True),
]
def run(self, **kwargs):
# figure out which keys have multiple options
interesting_keys = [k for k in kwargs if len(kwargs[k]) > 1]
param_sets = unique_dict_sets(kwargs)
self.num_checked = 0
self.num_submitted = 0
for ps in param_sets:
self.num_checked += 1
infile = os.path.join(ps['base_dir'], ps['platform'], ps['protocol'])
# check if we need to cat the file
if not os.path.exists(infile):
paths = [os.path.join(ps['base_dir'], ps['platform'], f) for f in ps['protocol'].split('_')]
extra = "\n".join(["echo \"Checking the existence of the file {}\"".format(ps['protocol']),
"if [ ! -e {} ]".format(infile),
" then `cat {} > {}`".format(' '.join(paths), infile),
" echo \"...created\"",
"fi"
])
else:
extra = ""
#outfile = os.path.join(ps['base_dir'], ps['platform'], ps['protocol']+"_", ps['model'])
#os.makedirs(os.path.dirname(outfile), exist_ok=True)
more = True
i = 0
name = '-'.join([ps['protocol'], ps['model'], str(ps['depth'])])
#if self.safe_mode:
# print("Checking if log file for {} exists...".format(name))
#if os.path.exists(outfile):
# print("already there, turn off safe mode to overwrite.")
# continue
argstring = "compress -m {model} -d {depth} {infile} {outfile}".format(model=ps['model'],
infile=infile,
depth=ps['depth'],
outfile='/dev/null')
self.submit_job(name, argstring, extra)
self.num_submitted += 1
print("Submitted {} jobs out of {}".format(self.num_submitted,
self.num_checked))
def get_jobfilename(self, **kwargs):
filename = ''
bits = {}
for k in list(kwargs):
if '_string' in k:
filename = "_".join([filename, kwargs.pop(k)])
return "_".join([filename, clean_string(kwargs)])
def submit_job(self, filename, argstring, extra=None):
"""
Submit specific experiment to the pbs experiment queue
Save the submission file with the jobid
If debug is on, print job command rather than submitting it.
If run_now is on, run the experiment directly.
"""
sh = self.pbs_template(filename, argstring, extra)
tmpfile = os.path.join(self.log_dir, filename)
print("Scheduling {} ... ".format(filename), end=""); sys.stdout.flush()
with open(tmpfile, 'w') as shfile:
shfile.write(sh)
cmd = "qsub {}".format(tmpfile)
if self.debug:
print("\n"+cmd)
pname = 'DEBUG'
else:
try:
P = subprocess.check_output(cmd, shell=True)
pname = P.decode('ascii').strip()
print("Submitted {}".format(pname))
except Exception as e:
print("Problem calling {}\n{}".format(cmd, e))
pname = "FAILED"
if self.run_now:
print("Running experiment")
try:
import z
z.__main__(argstring.split())
except Exception as e:
print("Problem with experiment: {}".format(e))
jobscript = "{}.{}.sh".format(filename, pname)
print("Saving {} file".format(jobscript))
script_path = os.path.join(self.log_dir, jobscript)
os.rename(tmpfile, script_path)
return script_path
def pbs_template(self, filename, argstring, extra=""):
lines = ["#!/bin/sh",
"",
"#PBS -S /bin/sh",
"#PBS -j oe",
"#PBS -r n",
"#PBS -o {0}/{1}.$PBS_JOBID.log".format(self.log_dir,
filename),
"#PBS -l nodes=1:ppn=1," #no comma here on purpose
"walltime={}:{}:00,mem=4000mb".format(self.num_hours, self.num_minutes),
"",
extra,
"cd $PBS_O_WORKDIR",
"echo \"Current working directory is `pwd`\"",
"echo \"Starting run at: `date`\"",
"alias pypy=/home/akoop/pypy3-2.4-linux_x86_64-portable/bin/pypy3",
"pypy z.py {}".format(argstring),
"echo \"Completed run with exit code $? at: `date`\""]
return "\n".join(lines)
if __name__ == "__main__":
if len(sys.argv) > 1:
print("Creating...")
setup = JobSet.from_args(sys.argv[1:])
print("Running...")
setup.run(**exp_params)
print("Done")
else:
print("Supply at least one command-line argument to run for real")
setup = JobSet.from_args(['--run_now', '--debug', '--safe_mode', '--log_name', 'logs'])
#setup.get_parser().print_help()
print("Running in debug mode.")
setup.run(**exp_params)
| unlicense |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/linear_model/ransac.py | 16 | 17217 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
from ..utils.validation import has_fit_parameter
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
NOTE: residual_metric is deprecated from 0.18 and will be removed in 0.20
Use ``loss`` instead.
loss : string, callable, optional, default "absolute_loss"
String inputs, "absolute_loss" and "squared_loss" are supported which
find the absolute loss and squared loss per sample
respectively.
If ``loss`` is a callable, then it should be a function that takes
two arrays as inputs, the true and predicted value and returns a 1-D
array with the ``i``th value of the array corresponding to the loss
on `X[i]`.
If the loss on a sample is greater than the ``residual_threshold``, then
this sample is classified as an outlier.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] https://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
loss='absolute_loss', random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
sample_weight : array-like, shape = [n_samples]
Individual weights for each sample
raises error if sample_weight is passed and base_estimator
fit method does not support it.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is not None:
warnings.warn(
"'residual_metric' was deprecated in version 0.18 and "
"will be removed in version 0.20. Use 'loss' instead.",
DeprecationWarning)
if self.loss == "absolute_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda \
y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
elif self.loss == "squared_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda \
y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
elif callable(self.loss):
loss_function = self.loss
else:
raise ValueError(
"loss should be 'absolute_loss', 'squared_loss' or a callable."
"Got %s. " % self.loss)
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,
"sample_weight")
estimator_name = type(base_estimator).__name__
if (sample_weight is not None and not
estimator_fit_has_sample_weight):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
if sample_weight is None:
base_estimator.fit(X_subset, y_subset)
else:
base_estimator.fit(X_subset, y_subset,
sample_weight=sample_weight[subset_idxs])
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
# XXX: Deprecation: Remove this if block in 0.20
if self.residual_metric is not None:
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = self.residual_metric(diff)
else:
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
mayblue9/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
yabata/ficus | doc/conf.py | 1 | 1826 | # -*- coding: utf-8 -*-
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
]
#templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'ficus'
copyright = u'2015, Dennis Atabay'
author = u'Dennis Atabay'
version = '0.1'
release = '0.1'
exclude_patterns = ['_build']
#pygments_style = 'sphinx'
# HTML output
htmlhelp_basename = 'ficusdoc'
# LaTeX output
latex_elements = {
'papersize': 'a4paper',
'pointsize': '11pt',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ficus.tex', u'ficus Documentation',
u'Dannis Atabay', 'manual'),
]
# Manual page output
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ficus', u'ficus Documentation',
[author], 1)
]
# Texinfo output
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ficus', u'ficus Documentation',
author, 'ficus', 'A (mixed integer) linear optimisation model for local energy systems',
'Miscellaneous'),
]
# Epub output
# Bibliographic Dublin Core info.
epub_title = u'ficus'
epub_author = u'Dennis Atabay'
epub_publisher = u'Dennis Atabay'
epub_copyright = u'2015, Dennis Atabay'
epub_exclude_files = ['search.html']
# Intersphinx
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'matplotlib': ('http://matplotlib.org/', None)}
| gpl-3.0 |
Archman/beamline | beamline/datautils.py | 1 | 10884 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" This module is created for data processing framework,
to make rules for data saving, visualization issues, etc.
"""
try:
import sdds
SDDS_ = True
except:
SDDS_ = False
import h5py
import numpy as np
import subprocess
import os
class DataExtracter(object):
""" Extract required data from a SDDS formated file,
to put into hdf5 formated file or just dump into RAM
for post-processing.
:param sddsfile: filename of SDDS data file
:param kws: packed tuple/list options, usually sdds column names,
e.g. ``('s', 'Sx')``
:Example:
>>> # *sddsquery -col* shows it has 's', 'Sx' data columns
>>> sddsfile = 'output.sdds'
>>> param_list = ('s', 'Sx')
>>> dh = DataExtracter(sddsfile, *param_list)
>>> # *dh* is a newly created DataExtracter instance
.. Author: Tong Zhang
.. Date : 2016-03-10
"""
def __init__(self, sddsfile, *kws):
self.sddsfile = sddsfile
self.kwslist = kws
self.precision = '%.16e'
self.dcmdline = 'sddsprintout {fn} -notitle -nolabel'.format(fn=self.sddsfile)
self.h5data = ''
if SDDS_:
self.sddsobj = sdds.SDDS(1)
self.sddsobj.load(self.sddsfile)
def getAllCols(self, sddsfile=None):
""" get all available column names from sddsfile
:param sddsfile: sdds file name, if not given, rollback to the one that from ``__init__()``
:return: all sdds data column names
:rtype: list
:Example:
>>> dh = DataExtracter('test.out')
>>> print(dh.getAllCols())
['x', 'xp', 'y', 'yp', 't', 'p', 'particleID']
>>> print(dh.getAllCols('test.twi'))
['s', 'betax', 'alphax', 'psix', 'etax', 'etaxp', 'xAperture', 'betay', 'alphay', 'psiy', 'etay', 'etayp',
'yAperture', 'pCentral0', 'ElementName', 'ElementOccurence', 'ElementType']
"""
if SDDS_:
if sddsfile is not None:
sddsobj = sdds.SDDS(2)
sddsobj.load(sddsfile)
else:
sddsobj = self.sddsobj
return sddsobj.columnName
else:
if sddsfile is None:
sddsfile = self.sddsfile
return subprocess.check_output(['sddsquery', '-col', sddsfile]).split()
def getAllPars(self, sddsfile=None):
""" get all available parameter names from sddsfile
:param sddsfile: sdds file name, if not given, rollback to the one that from ``__init__()``
:return: all sdds data parameter names
:rtype: list
.. warning:: `sdds` needs to be installed as an extra dependency.
:Example:
>>> dh = DataExtracter('test.w1')
>>> print(dh.getAllPars())
['Step', 'pCentral', 'Charge', 'Particles', 'IDSlotsPerBunch', 'SVNVersion', 'Pass', 'PassLength',
'PassCentralTime', 'ElapsedCoreTime', 'MemoryUsage', 's', 'Description', 'PreviousElementName']
:seealso: :func:`getAllCols`
"""
if SDDS_:
if sddsfile is not None:
sddsobj = sdds.SDDS(2)
sddsobj.load(sddsfile)
else:
sddsobj = self.sddsobj
return sddsobj.parameterName
else:
if sddsfile is None:
sddsfile = self.sddsfile
return subprocess.check_output(['sddsquery', '-par', sddsfile]).split()
def extractData(self):
""" return `self` with extracted data as `numpy array`
Extract the data of the columns and parameters of `self.kws` and put
them in a :np:func:`array` with all columns as columns or parameters as
columns. If columns and parameters are requested at the same then each column
is one row and all parameters are in the last row. This
:np:func:`array` is saved in ``h5data``.
.. note::
If you mix types (e. g. float and str) then the minimal fitting type is
taken for all columns.
.. warning:: Non float types need `sdds` as an extra dependency
:return: instance of itself
:Example:
One column of the watch element
>>> dh = DataExtracter('test.w1')
>>> dh.kwslist = ['Step']
>>> print(dh.extractData().h5data)
array([[1]])
Two columns of the watch element
>>> dh = DataExtracter('test.w1')
>>> dh.kwslist = ['s', 'betax']
>>> print(dh.extractData().h5data)
array([[0, 1], [1, 2], [2, 1]])
Two columns of the watch element and one parameter.
The columns transform to rows and the parameter row is at the end.
Furthermore all elements are strings, because the type of
`PreviousElementName` is str and not float.
>>> dh = DataExtracter('test.w1')
>>> dh.kwslist = ['s', 'PreviousElementName', 'betax']
>>> print(dh.extractData().h5data)
array([['0', '1', '2'], ['1', '2', '1'], ['DR01']])
"""
if SDDS_:
columns = self.sddsobj.columnName
parameters = self.sddsobj.parameterName
data = [self.sddsobj.columnData[columns.index(col)][0]
for col in self.kwslist if col in columns]
data.append([self.sddsobj.parameterData[parameters.index(par)][0]
for par in self.kwslist if par in parameters])
self.h5data = np.array(filter(None, data)).T
else:
for k in self.kwslist:
self.dcmdline += ' -col={kw},format={p}'.format(kw=k, p=self.precision)
cmdlist = ['bash', self.dscript, self.dpath, self.dcmdline]
retlist = []
proc = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
for line in proc.stdout:
retlist.append([float(i) for i in line.split()])
self.h5data = np.array(retlist)
return self
def getH5Data(self):
""" return extracted data as numpy array
:return: numpy array after executing ``extractData()``
"""
return self.h5data
def getKws(self):
""" return data column fields that defined in constructor, e.g. ``('s', 'Sx')``
:return: data columns keyword
:rtype: tuple
"""
return self.kwslist
def setDataScript(self, fullscriptpath='sddsprintdata.sh'):
""" configure script that should be utilized by DataExtracter,
to extract data colums from sddsfile.
:param fullscriptpath: full path of script that handles the data extraction of sddsfile,
default value is ``sddsprintdata.sh``, which is a script that distributed
with ``beamline`` package.
:return: None
"""
self.dscript = os.path.expanduser(fullscriptpath)
def setDataPath(self, path):
""" set full dir path of data files
:param path: data path, usually is the directory where numerical simulation was taken place
:return: None
"""
self.dpath = os.path.expanduser(path)
def setH5file(self, h5filepath):
""" set h5file full path name
:param h5filepath: path for hdf5 file
:return: None
"""
self.h5file = os.path.expanduser(h5filepath)
def setKws(self, *kws):
""" set keyword list, i.e. sdds field names, update ``kwslist`` property
:param kws: packed tuple of sdds datafile column names
:return None:
"""
self.kwslist = kws
def dump(self):
""" dump extracted data into a single hdf5file,
:return: None
:Example:
>>> # dump data into an hdf5 formated file
>>> datafields = ['s', 'Sx', 'Sy', 'enx', 'eny']
>>> datascript = 'sddsprintdata.sh'
>>> datapath = './tests/tracking'
>>> hdf5file = './tests/tracking/test.h5'
>>> A = DataExtracter('test.sig', *datafields)
>>> A.setDataScript(datascript)
>>> A.setDataPath (datapath)
>>> A.setH5file (hdf5file)
>>> A.extractData().dump()
>>>
>>> # read dumped file
>>> fd = h5py.File(hdf5file, 'r')
>>> d_s = fd['s'][:]
>>> d_sx = fd['Sx'][:]
>>>
>>> # plot dumped data
>>> import matplotlib.pyplot as plt
>>> plt.figure(1)
>>> plt.plot(d_s, d_sx, 'r-')
>>> plt.xlabel('$s$')
>>> plt.ylabel('$\sigma_x$')
>>> plt.show()
Just like the following figure shows:
.. image:: ../../images/test_DataExtracter.png
:width: 400px
"""
f = h5py.File(self.h5file, 'w')
for i, k in enumerate(self.kwslist):
v = self.h5data[:, i]
dset = f.create_dataset(k, shape=v.shape, dtype=v.dtype)
dset[...] = v
f.close()
class DataVisualizer(object):
""" for data visualization purposes, to be implemented
.. Author: Tong Zhang
.. Date : 2016-03-14
"""
def __init__(self, data):
self.data = data
def inspectDataFile(self):
""" inspect hdf5 data file
"""
pass
def illustrate(self, xlabel, ylabel):
""" plot x, y w.r.t. xlabel and ylabel
:param ylabel: xlabel
:param xlabel: ylabel
"""
pass
def saveArtwork(self, name='image', fmt='jpg'):
""" save figure by default name of image.jpg
:param name: image name, 'image' by default
:param fmt: image format, 'jpg' by default
"""
pass
class DataStorage(object):
""" for data storage management, to be implemented.
communicate with database like mongodb, mysql, sqlite, etc.
.. Author: Tong Zhang
.. Date : 2016-03-14
"""
def __init__(self, data):
self.data = data
def configDatabase(self):
""" configure database
"""
pass
def putData(self):
""" put data into database
"""
pass
def getData(self):
""" get data from database
"""
pass
def test():
# workflow
datafields = ['s', 'Sx', 'Sy', 'enx', 'eny']
datascript = '~/Programming/projects/beamline/scripts/sddsprintdata.sh'
datapath = '~/Programming/projects/beamline/tests/tracking'
hdf5file = os.path.join(os.path.expanduser(datapath), 'test.h5')
A = DataExtracter('test.sig', *datafields)
A.setDataScript(datascript)
A.setDataPath(datapath)
A.setH5file(hdf5file)
A.extractData().dump()
fd = h5py.File(hdf5file, 'r')
d_s = fd['s'][:]
d_sx = fd['Sx'][:]
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(d_s, d_sx, 'r-')
plt.xlabel('$s$')
plt.ylabel('$\sigma_x$')
plt.show()
if __name__ == '__main__':
test()
| mit |
TomAugspurger/pandas | pandas/tests/arrays/categorical/test_repr.py | 1 | 25894 | import numpy as np
from pandas import (
Categorical,
CategoricalIndex,
Series,
date_range,
option_context,
period_range,
timedelta_range,
)
from pandas.tests.arrays.categorical.common import TestCategorical
class TestCategoricalReprWithFactor(TestCategorical):
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]", "Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
assert actual == expected
class TestCategoricalRepr:
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ["a", "b", "c"], fastpath=True)
expected = [
"[a, b, c, a, b, ..., b, c, a, b, c]",
"Length: 600",
"Categories (3, object): [a, b, c]",
]
expected = "\n".join(expected)
actual = repr(factor)
assert actual == expected
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = "[], Categories (3, object): [a, b, c]"
actual = repr(factor)
assert actual == expected
assert expected == actual
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = "[], Categories (3, object): [a < b < c]"
actual = repr(factor)
assert expected == actual
factor = Categorical([], [])
expected = "[], Categories (0, object): []"
assert expected == repr(factor)
def test_print_none_width(self):
# GH10087
a = Series(Categorical([1, 2, 3, 4]))
exp = (
"0 1\n1 2\n2 3\n3 4\n"
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]"
)
with option_context("display.width", None):
assert exp == repr(a)
def test_unicode_print(self):
c = Categorical(["aaaaa", "bb", "cccc"] * 20)
expected = """\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
assert repr(c) == expected
c = Categorical(["ああああ", "いいいいい", "ううううううう"] * 20)
expected = """\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
assert repr(c) == expected
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context("display.unicode.east_asian_width", True):
c = Categorical(["ああああ", "いいいいい", "ううううううう"] * 20)
expected = """[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
assert repr(c) == expected
def test_categorical_repr(self):
c = Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
assert repr(c) == exp
c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
assert repr(c) == exp
c = Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
assert repr(c) == exp
c = Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
assert repr(c) == exp
def test_categorical_repr_ordered(self):
c = Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
assert repr(c) == exp
c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3], ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
assert repr(c) == exp
c = Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
assert repr(c) == exp
c = Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
assert repr(c) == exp
def test_categorical_repr_datetime(self):
idx = date_range("2011-01-01 09:00", freq="H", periods=5)
c = Categorical(idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]"
""
)
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]"
)
assert repr(c) == exp
idx = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern")
c = Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]"
)
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]"
)
assert repr(c) == exp
def test_categorical_repr_datetime_ordered(self):
idx = date_range("2011-01-01 09:00", freq="H", periods=5)
c = Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
assert repr(c) == exp
idx = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern")
c = Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
assert repr(c) == exp
def test_categorical_repr_int_with_nan(self):
c = Categorical([1, 2, np.nan])
c_exp = """[1, 2, NaN]\nCategories (2, int64): [1, 2]"""
assert repr(c) == c_exp
s = Series([1, 2, np.nan], dtype="object").astype("category")
s_exp = """0 1\n1 2\n2 NaN
dtype: category
Categories (2, int64): [1, 2]"""
assert repr(s) == s_exp
def test_categorical_repr_period(self):
idx = period_range("2011-01-01 09:00", freq="H", periods=5)
c = Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]""" # noqa
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]""" # noqa
assert repr(c) == exp
idx = period_range("2011-01", freq="M", periods=5)
c = Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa
assert repr(c) == exp
def test_categorical_repr_period_ordered(self):
idx = period_range("2011-01-01 09:00", freq="H", periods=5)
c = Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]""" # noqa
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]""" # noqa
assert repr(c) == exp
idx = period_range("2011-01", freq="M", periods=5)
c = Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa
assert repr(c) == exp
def test_categorical_repr_timedelta(self):
idx = timedelta_range("1 days", periods=5)
c = Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" # noqa
assert repr(c) == exp
idx = timedelta_range("1 hours", periods=20)
c = Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]""" # noqa
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]""" # noqa
assert repr(c) == exp
def test_categorical_repr_timedelta_ordered(self):
idx = timedelta_range("1 days", periods=5)
c = Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
assert repr(c) == exp
idx = timedelta_range("1 hours", periods=20)
c = Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]""" # noqa
assert repr(c) == exp
c = Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]""" # noqa
assert repr(c) == exp
def test_categorical_index_repr(self):
idx = CategoricalIndex(Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa
assert repr(idx) == exp
i = CategoricalIndex(Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_ordered(self):
i = CategoricalIndex(Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
i = CategoricalIndex(Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_datetime(self):
idx = date_range("2011-01-01 09:00", freq="H", periods=5)
i = CategoricalIndex(Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
idx = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern")
i = CategoricalIndex(Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_datetime_ordered(self):
idx = date_range("2011-01-01 09:00", freq="H", periods=5)
i = CategoricalIndex(Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
idx = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern")
i = CategoricalIndex(Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
i = CategoricalIndex(Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_period(self):
# test all length
idx = period_range("2011-01-01 09:00", freq="H", periods=1)
i = CategoricalIndex(Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
idx = period_range("2011-01-01 09:00", freq="H", periods=2)
i = CategoricalIndex(Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
idx = period_range("2011-01-01 09:00", freq="H", periods=3)
i = CategoricalIndex(Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
idx = period_range("2011-01-01 09:00", freq="H", periods=5)
i = CategoricalIndex(Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
i = CategoricalIndex(Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
idx = period_range("2011-01", freq="M", periods=5)
i = CategoricalIndex(Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_period_ordered(self):
idx = period_range("2011-01-01 09:00", freq="H", periods=5)
i = CategoricalIndex(Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
idx = period_range("2011-01", freq="M", periods=5)
i = CategoricalIndex(Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_timedelta(self):
idx = timedelta_range("1 days", periods=5)
i = CategoricalIndex(Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
idx = timedelta_range("1 hours", periods=10)
i = CategoricalIndex(Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')""" # noqa
assert repr(i) == exp
def test_categorical_index_repr_timedelta_ordered(self):
idx = timedelta_range("1 days", periods=5)
i = CategoricalIndex(Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
idx = timedelta_range("1 hours", periods=10)
i = CategoricalIndex(Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')""" # noqa
assert repr(i) == exp
| bsd-3-clause |
wzbozon/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
lobnek/pyutil | test/test_timeseries/test_merge.py | 1 | 2040 | import pandas as pd
import pandas.testing as pt
import pytest
from pyutil.timeseries.merge import merge, last_index, first_index, to_datetime, to_date
from test.config import read_pd
@pytest.fixture()
def ts():
return read_pd("ts.csv", squeeze=True, header=None, parse_dates=True, index_col=0)
class TestMerge(object):
def test_last_index(self, ts):
t = last_index(ts)
t0 = pd.Timestamp("2015-04-22")
assert t == t0
assert not last_index(None)
assert last_index(None, default=t0) == t0
assert last_index(pd.Series({}, dtype=float), default=t0) == t0
def test_first_index(self, ts):
t = first_index(ts)
t0 = pd.Timestamp("2014-01-01")
assert t == t0
assert not first_index(None)
assert first_index(None, default=t0) == t0
assert first_index(pd.Series({}, dtype=float), default=t0) == t0
def test_merge(self, ts):
x = merge(new=ts)
pt.assert_series_equal(x, ts)
x = merge(new=ts, old=ts)
pt.assert_series_equal(x, ts)
x = merge(new=5*ts, old=ts)
pt.assert_series_equal(x, 5 * ts)
y = merge(None)
assert not y
y = merge(pd.Series({}, dtype=float), None)
pt.assert_series_equal(y, pd.Series({}, dtype=float))
y = merge(pd.Series({}, dtype=float), pd.Series({}, dtype=float))
pt.assert_series_equal(y, pd.Series({}, dtype=float))
def test_to_datetime(self):
assert not to_datetime(None)
t0 = pd.Timestamp("2015-04-22")
x = pd.Series(index=[t0], data=[2.0])
# should be safe to apply to_datetime
pt.assert_series_equal(x, to_datetime(x))
def test_to_date(self):
assert not to_date(None)
t0 = pd.Timestamp("2015-04-22")
x = pd.Series(index=[t0], data=[2.0])
pt.assert_series_equal(pd.Series(index=[t0.date()], data=[2.0]), to_date(x))
pt.assert_series_equal(to_date(ts=x, format="%Y%m%d"), pd.Series(index=["20150422"], data=[2.0]))
| mit |
vivekmishra1991/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 35 | 11709 | try:
# Python 2 compat
reload
except NameError:
# Regular Python 3+ import
from importlib import reload
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
clf.fit(X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert lda.LDA is LinearDiscriminantAnalysis
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert qda.QDA is QuadraticDiscriminantAnalysis
| bsd-3-clause |
xavierwu/scikit-learn | sklearn/grid_search.py | 61 | 37197 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
btallman/incubator-airflow | docs/conf.py | 33 | 8957 | # -*- coding: utf-8 -*-
#
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import mock
MOCK_MODULES = [
'apiclient',
'apiclient.discovery',
'apiclient.http',
'mesos',
'mesos.interface',
'mesos.native',
'oauth2client.service_account',
'pandas.io.gbq',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
from airflow import settings
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinxarg.ext',
]
viewcode_import = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Airflow'
#copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '1.0.0'
# The full version, including alpha/beta/rc tags.
#release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Airflowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Airflow.tex', u'Airflow Documentation',
u'Maxime Beauchemin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'airflow', u'Airflow Documentation',
[u'Maxime Beauchemin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
'index', 'Airflow', u'Airflow Documentation',
u'Maxime Beauchemin', 'Airflow',
'Airflow is a system to programmaticaly author, schedule and monitor data pipelines.',
'Miscellaneous'
),]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 |
moonbury/pythonanywhere | github/MasteringPandas/2060_11_Code/run_svm_titanic.py | 3 | 3341 | #!/home/femibyte/local/anaconda/bin/python
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn import metrics,svm
from patsy import dmatrix, dmatrices
import re
train_df = pd.read_csv('csv/train.csv', header=0)
test_df = pd.read_csv('csv/test.csv', header=0)
formula1 = 'C(Pclass) + C(Sex) + Fare'
formula2 = 'C(Pclass) + C(Sex)'
formula3 = 'C(Sex)'
formula4 = 'C(Pclass) + C(Sex) + Age + SibSp + Parch'
formula5 = 'C(Pclass) + C(Sex) + Age + SibSp + Parch + C(Embarked)'
formula6 = 'C(Pclass) + C(Sex) + C(Embarked)'
formula7 = 'C(Pclass) + C(Sex) + Age + Parch + C(Embarked)'
formula8 = 'C(Pclass) + C(Sex) + SibSp + Parch + C(Embarked)'
formula_map = {'PClass_Sex_Fare' : formula1,
'PClass_Sex' : formula2,
'Sex' : formula3,
'PClass_Sex_Age_Sibsp_Parch' : formula4,
'PClass_Sex_Age_Sibsp_Parch_Embarked' : formula5
}
#formula_map={'PClass_Sex_Embarked' : formula6}
formula_map = {'PClass_Sex_SibSp_Parch_Embarked' : formula8}
kernel_types=['linear','rbf','poly']
kernel_types=['poly']
#kernel_types=['rbf']
def main():
train_df_filled=fill_null_vals(train_df,'Fare')
train_df_filled=fill_null_vals(train_df_filled,'Age')
assert len(train_df_filled)==len(train_df)
test_df_filled=fill_null_vals(test_df,'Fare')
test_df_filled=fill_null_vals(test_df_filled,'Age')
assert len(test_df_filled)==len(test_df)
for formula_name, formula in formula_map.iteritems():
print "name=%s formula=%s" % (formula_name,formula)
y_train,X_train = dmatrices('Survived ~ ' + formula,
train_df_filled,return_type='dataframe')
print "Running SVM with formula : %s" % formula
print "X_train cols=%s " % X_train.columns
y_train = np.ravel(y_train)
for kernel in kernel_types:
#model = svm.SVC(kernel=kernel,gamma=3)
model = svm.SVC(kernel=kernel)
print "About to fit..."
svm_model = model.fit(X_train, y_train)
print "Kernel: %s" % kernel
print "Training score:%s" % svm_model.score(X_train,y_train)
X_test=dmatrix(formula,test_df_filled)
predicted=svm_model.predict(X_test)
print "predicted:%s\n" % predicted[:5]
assert len(predicted)==len(test_df)
pred_results=pd.Series(predicted,name='Survived')
svm_results=pd.concat([test_df['PassengerId'],pred_results],axis=1)
svm_results.Survived=svm_results.Survived.astype(int)
results_file='csv/svm_%s_%s.csv' % (kernel,formula_name)
#results_file = re.sub('[+ ()C]','',results_file)
svm_results.to_csv(results_file,index=False)
def fill_null_vals(df,col_name):
null_passengers=df[df[col_name].isnull()]
passenger_id_list=null_passengers['PassengerId'].tolist()
df_filled=df.copy()
for pass_id in passenger_id_list:
idx=df[df['PassengerId']==pass_id].index[0]
similar_passengers=df[(df['Sex']==null_passengers['Sex'][idx]) & (df['Pclass']==null_passengers['Pclass'][idx])]
mean_val=np.mean(similar_passengers[col_name].dropna())
df_filled.loc[idx,col_name]=mean_val
return df_filled
if __name__ == '__main__':
main()
| gpl-3.0 |
Vishruit/DDP_models | miscellaneous/code/ae.py | 1 | 5430 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from keras.layers import Input, Dense,Reshape, Conv2D, MaxPooling2D, UpSampling3D, Conv3D, MaxPooling3D
from keras.models import Model
from keras.layers.core import Lambda
from keras.utils.io_utils import HDF5Matrix
from keras.utils.np_utils import normalize
import keras.backend as K
import h5py
from keras.models import load_model
import tensorflow as tf
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, LambdaCallback, CSVLogger
import sys
import numpy as np
import matplotlib.pyplot as plt
tf.device('/gpu:0')
# Add commentsgfswsrfssgsdhge
from keras.datasets import mnist
import numpy as np
# with h5py.File('../data_small.h5', 'r') as hdf:
# # data = hdf['data_small'][:,:150,:,:]
# data = hdf['data_small'][:10]
# print ('Hi1')
# # data = data.reshape((len(data)*data.shape[1]),1,data.shape[2],data.shape[3])
# data = data.reshape(len(data),data.shape[1],data.shape[2],data.shape[3],1)
# print ('Hi2')
# print(data.shape)
#
#
# x_train = data[:int(6*len(data)/10)]
# x_test = data[int(6*len(data)/10):]
# print(x_train.shape, x_test.shape)
def data_preprocess(data):
# data = data.reshape((len(data)*data.shape[1]),data.shape[2],data.shape[3],1)
# print (data.shape)
# print (data.dtype, type(data))
maxVal = np.max(data)
data = data / (maxVal+0.00001)
# sys.exit()
# data = data.astype('float32')
# normalize(data, axis=1, order=2)
# data = data[:int(6*len(data)/10)]
# print(x_train.shape)
# x_test = data[int(6*len(data)/10):]
# maxVal= K.max(data)
# print("#####################################")
# data = data / maxVal
# print(type(data), data.dtype, K.is_keras_tensor(data))
# data = np.array
return data
with h5py.File('../data_small_100.h5', 'r') as hdf:
# data = hdf['data_small'][:20]
# print(len(hdf['data_small']))
data_slice_size = 202
x_train = HDF5Matrix('../data_small_100.h5', 'data_small', start=0, end=int(6*data_slice_size/10), normalizer=lambda x: data_preprocess(x))
x_test = HDF5Matrix('../data_small_100.h5', 'data_small', start=int(6*data_slice_size/10), end=data_slice_size, normalizer=lambda x: data_preprocess(x))
# print (data_hdf.shape, type(data_hdf))
print(x_train.shape)
print ('Hi3')
frames = x_train.shape[1]
height = x_train.shape[2]
width = x_train.shape[3]
input_img = Input(shape=(frames, height, width))
x = Reshape((x_train.shape[1],x_train.shape[2],x_train.shape[3], 1))(input_img)
x = Conv3D(16, (3, 3, 3), activation='relu', padding='same')(x)
x = MaxPooling3D((2, 2, 2), padding='same')(x)
x = Conv3D(8, (3, 3, 3), activation='relu', padding='same')(x)
x = MaxPooling3D((2, 2, 2), padding='same')(x)
x = Conv3D(8, (3, 3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling3D((5, 2, 2), padding='same')(x)
# at this point the representation is (8, 4, 4) i.e. 128-dimensional
x = Conv3D(8, (3, 3, 3), activation='relu', padding='same')(encoded)
x = UpSampling3D((5, 2, 2))(x)
x = Conv3D(8, (3, 3, 3), activation='relu', padding='same')(x)
x = UpSampling3D((2, 2, 2))(x)
x = Conv3D(16, (3, 3, 3), activation='relu', padding='same')(x)
x = UpSampling3D((2, 2, 2))(x)
decoded = Conv3D(1, (3, 3, 3), activation='sigmoid', padding='same')(x)
decoded = Reshape((x_train.shape[1],x_train.shape[2],x_train.shape[3]))(decoded)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
autoencoder.summary()
chkpt = ModelCheckpoint('./CheckPoint/weights_adam.{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
# Plot the loss after every epoch.
plot_loss_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: plt.plot(np.arange(epoch),
logs['loss']))
csvLogger = CSVLogger('./CheckPoint/csv_log_file_50_5_adamReal_200.csv', separator=',', append=False)
# callback_list = [chkpt, reduce_lr, plot_loss_callback, csvLogger]
callback_list = [chkpt, reduce_lr, csvLogger]
autoencoder.fit(x_train, x_train,\
epochs=50,\
batch_size=2,\
shuffle='batch',\
validation_data=(x_test, x_test), \
callbacks=callback_list)
# decoded_imgs = autoencoder.predict(x_test[:n])
n = 10
# decoded_imgs = autoencoder.predict(x_test[0:n])
plt.figure(figsize=(20, 4))
for i in range(n):
decoded_imgs = autoencoder.predict(x_test[i].reshape(1, x_test.shape[1], x_test.shape[2], x_test.shape[3]))
# display original
ax = plt.subplot(2, n, i + 1)
# TODO remove hard links
print(x_test[i].shape)
plt.imshow(x_test[i].reshape(frames, 256, 320)[i,...])
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
#plt.savefig('original.jpg')
# display reconstruction
ax = plt.subplot(2, n, i + n + 1)
plt.imshow(decoded_imgs.reshape(frames, 256, 320)[i,...])
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig('reconstruction1_50_5_adamReal_200.png')
| gpl-3.0 |
alpinedatalabs/ODST | notebooks/logreg.py | 4 | 1420 | # logreg supporting functions
# Nitin Borwankar
# Open Data Science Training
import matplotlib.pyplot as plt
import numpy as np
def nfl_outcomes():
scores = [3,11,12,13,20,22,21,25, 26,27,28,29,30,31,33,35,37,41,42,43]
outcomes = [0,0,0,0,0,0,0,1,1,0,1,1,1,1,1,1,1,1,1,1]
figsize = (8, 5)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
line = ax.plot(scores,outcomes,'o')[0]
#x = np.arange(5,50,5)
#y = (1.1/30.)*x-0.3
#line2 = ax.plot(x,y)
ax.set_title('Win/Loss Outcomes for an NFL team')
ax.set_xlabel('Score')
ax.set_ylabel('Proability of a Win')
ax.set_ylim((-0.1,1.1))
ax.grid(True)
#line.set_marker('o')
#plt.savefig('oo.png',dpi=150)
plt.show()
return ax,fig
def nfl_outcomes_with_line():
scores = [3,11,12,13,20,22,21,25, 26,27,28,29,30,31,33,35,37,41,42,43]
outcomes = [0,0,0,0,0,0,0,1,1,0,1,1,1,1,1,1,1,1,1,1]
figsize = (8, 5)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
line = ax.plot(scores,outcomes,'o')[0]
x = np.arange(5,50,5)
y = (1.1/30.)*x-0.3
line2 = ax.plot(x,y)
ax.set_title('Win/Loss Outcomes for an NFL team')
ax.set_xlabel('Score')
ax.set_ylabel('Proability of a Win')
ax.set_ylim((-0.1,1.1))
ax.grid(True)
#line.set_marker('o')
#plt.savefig('oo.png',dpi=150)
plt.show()
return ax,fig
def fz(fico,amt,coeff):
z = coeff[0]+coeff[1]*fico+coeff[2]*amt
return 1/(1+exp(-1*z))
| bsd-2-clause |
seansu4you87/kupo | projects/MOOCs/udacity/drive/project-4-advanced-lane-finding/sobel/sobel.py | 1 | 2232 | import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
image = mpimg.imread("./signs_vehicles_xygrad.png")
def abs_sobel_thresh(img, orient="x", ksize=3, thresh=(0, 255)):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if orient == "x":
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=ksize)
else:
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=ksize)
abs_sobel = np.absolute(sobel)
scaled = np.uint8(255 * abs_sobel / np.max(abs_sobel))
binary = np.zeros_like(scaled)
binary[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1
return binary
def mag_thresh(img, ksize=3, thresh=(0, 255)):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=ksize)
sobel_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=ksize)
mag = np.sqrt(sobel_x ** 2 + sobel_y ** 2)
scaled = np.uint8(255 * mag / np.max(mag))
binary = np.zeros_like(scaled)
binary[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1
return binary
def dir_thresh(img, ksize=3, thresh=(0, np.pi / 2)):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=ksize)
sobel_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=ksize)
abs_x = np.absolute(sobel_x)
abs_y = np.absolute(sobel_y)
arc = np.arctan2(abs_y, abs_x)
binary = np.zeros_like(arc)
binary[(arc >= thresh[0]) & (arc <= thresh[1])] = 1
return binary
lanes_x = abs_sobel_thresh(image, orient="x", ksize=9, thresh=(30, 100))
lanes_y = abs_sobel_thresh(image, orient="y", ksize=9, thresh=(30, 100))
lanes_mag = mag_thresh(image, ksize=9, thresh=(100, 200))
lanes_dir = dir_thresh(image, ksize=9, thresh=(0.7, 1.3))
lanes = np.zeros_like(image)
lanes[
((lanes_x == 1) & (lanes_y == 1)) |
((lanes_mag == 1) & (lanes_dir == 1))
] = 1
# lanes = np.concatenate([lanes_x, lanes_y, lanes_mag, lanes_dir])
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(image)
ax1.set_title("Original Image", fontsize=50)
ax2.imshow(lanes)
ax2.set_title("Lane Lines", fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.show(block=True)
| mit |
eg-zhang/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
Eigenstate/msmbuilder | msmbuilder/cluster/__init__.py | 8 | 3364 | # Author: Robert McGibbon <[email protected]>
# Contributors: Matthew Harrigan <[email protected]>, Brooke Husic <[email protected]>
# Copyright (c) 2016, Stanford University
# All rights reserved.
from __future__ import absolute_import, print_function, division
import warnings
from sklearn import cluster
try:
# sklearn >= 0.18
from sklearn.mixture import GaussianMixture as sklearn_GMM
except ImportError:
from sklearn.mixture import GMM as sklearn_GMM
from ..base import BaseEstimator
from .base import MultiSequenceClusterMixin
from .kcenters import KCenters
from .ndgrid import NDGrid
from .agglomerative import LandmarkAgglomerative
from .regularspatial import RegularSpatial
from .kmedoids import KMedoids
from .minibatchkmedoids import MiniBatchKMedoids
from .apm import APM
warnings.filterwarnings("once", '', DeprecationWarning, r'^sklearn\.')
__all__ = ['KMeans', 'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'GMM', 'SpectralClustering', 'KCenters', 'NDGrid',
'LandmarkAgglomerative', 'RegularSpatial', 'KMedoids',
'MiniBatchKMedoids', 'MultiSequenceClusterMixin', 'APM',
'AgglomerativeClustering']
def _replace_labels(doc):
"""Really hacky find-and-replace method that modifies one of the sklearn
docstrings to change the semantics of labels_ for the subclasses"""
lines = doc.splitlines()
labelstart, labelend = None, None
foundattributes = False
for i, line in enumerate(lines):
stripped = line.strip()
if stripped == 'Attributes':
foundattributes = True
if foundattributes and not labelstart and stripped.startswith('labels_'):
labelstart = len('\n'.join(lines[:i])) + 1
if labelstart and not labelend and stripped == '':
labelend = len('\n'.join(lines[:i + 1]))
if labelstart is None or labelend is None:
return doc
replace = '\n'.join([
' labels_ : list of arrays, each of shape [sequence_length, ]',
' The label of each point is an integer in [0, n_clusters).',
'',
])
return doc[:labelstart] + replace + doc[labelend:]
class KMeans(MultiSequenceClusterMixin, cluster.KMeans, BaseEstimator):
__doc__ = _replace_labels(cluster.KMeans.__doc__)
class MiniBatchKMeans(MultiSequenceClusterMixin, cluster.MiniBatchKMeans,
BaseEstimator):
__doc__ = _replace_labels(cluster.MiniBatchKMeans.__doc__)
class AffinityPropagation(MultiSequenceClusterMixin,
cluster.AffinityPropagation, BaseEstimator):
__doc__ = _replace_labels(cluster.AffinityPropagation.__doc__)
class MeanShift(MultiSequenceClusterMixin, cluster.MeanShift, BaseEstimator):
__doc__ = _replace_labels(cluster.MeanShift.__doc__)
class SpectralClustering(MultiSequenceClusterMixin, cluster.SpectralClustering,
BaseEstimator):
__doc__ = _replace_labels(cluster.SpectralClustering.__doc__)
class AgglomerativeClustering(MultiSequenceClusterMixin,
cluster.AgglomerativeClustering,
BaseEstimator):
__doc__ = _replace_labels(cluster.AgglomerativeClustering.__doc__)
class GMM(MultiSequenceClusterMixin, sklearn_GMM, BaseEstimator):
__doc__ = _replace_labels(sklearn_GMM.__doc__)
| lgpl-2.1 |
dsullivan7/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 8 | 50342 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'auto':
raise ValueError("class_weight 'auto' is not supported for "
"partial_fit. In order to use 'auto' weights, "
"use compute_class_weight('auto', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.")
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "auto" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = y.astype(np.float64)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self.decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
`average_coef_` : array, shape (n_features,)
Averaged weights assigned to the features.
`average_intercept_` : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
tebeka/arrow | python/pyarrow/tests/test_array.py | 1 | 41758 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import datetime
import hypothesis as h
import hypothesis.strategies as st
import itertools
import pickle
import pytest
import struct
import sys
import numpy as np
import pandas as pd
import pandas.util.testing as tm
try:
import pickle5
except ImportError:
pickle5 = None
import pyarrow as pa
import pyarrow.tests.strategies as past
from pyarrow.pandas_compat import get_logical_type
def test_total_bytes_allocated():
assert pa.total_allocated_bytes() == 0
def test_getitem_NULL():
arr = pa.array([1, None, 2])
assert arr[1] is pa.NULL
def test_constructor_raises():
# This could happen by wrong capitalization.
# ARROW-2638: prevent calling extension class constructors directly
with pytest.raises(TypeError):
pa.Array([1, 2])
def test_list_format():
arr = pa.array([[1], None, [2, 3, None]])
result = arr.format()
expected = """\
[
[
1
],
null,
[
2,
3,
null
]
]"""
assert result == expected
def test_string_format():
arr = pa.array([u'', None, u'foo'])
result = arr.format()
expected = """\
[
"",
null,
"foo"
]"""
assert result == expected
def test_long_array_format():
arr = pa.array(range(100))
result = arr.format(window=2)
expected = """\
[
0,
1,
...
98,
99
]"""
assert result == expected
def test_to_numpy_zero_copy():
arr = pa.array(range(10))
old_refcount = sys.getrefcount(arr)
np_arr = arr.to_numpy()
np_arr[0] = 1
assert arr[0] == 1
assert sys.getrefcount(arr) == old_refcount
arr = None
import gc
gc.collect()
# Ensure base is still valid
assert np_arr.base is not None
expected = np.arange(10)
expected[0] = 1
np.testing.assert_array_equal(np_arr, expected)
def test_to_numpy_unsupported_types():
# ARROW-2871: Some primitive types are not yet supported in to_numpy
bool_arr = pa.array([True, False, True])
with pytest.raises(NotImplementedError):
bool_arr.to_numpy()
null_arr = pa.array([None, None, None])
with pytest.raises(NotImplementedError):
null_arr.to_numpy()
def test_to_pandas_zero_copy():
import gc
arr = pa.array(range(10))
for i in range(10):
np_arr = arr.to_pandas()
assert sys.getrefcount(np_arr) == 2
np_arr = None # noqa
assert sys.getrefcount(arr) == 2
for i in range(10):
arr = pa.array(range(10))
np_arr = arr.to_pandas()
arr = None
gc.collect()
# Ensure base is still valid
# Because of py.test's assert inspection magic, if you put getrefcount
# on the line being examined, it will be 1 higher than you expect
base_refcount = sys.getrefcount(np_arr.base)
assert base_refcount == 2
np_arr.sum()
def test_asarray():
arr = pa.array(range(4))
# The iterator interface gives back an array of Int64Value's
np_arr = np.asarray([_ for _ in arr])
assert np_arr.tolist() == [0, 1, 2, 3]
assert np_arr.dtype == np.dtype('O')
assert type(np_arr[0]) == pa.lib.Int64Value
# Calling with the arrow array gives back an array with 'int64' dtype
np_arr = np.asarray(arr)
assert np_arr.tolist() == [0, 1, 2, 3]
assert np_arr.dtype == np.dtype('int64')
# An optional type can be specified when calling np.asarray
np_arr = np.asarray(arr, dtype='str')
assert np_arr.tolist() == ['0', '1', '2', '3']
# If PyArrow array has null values, numpy type will be changed as needed
# to support nulls.
arr = pa.array([0, 1, 2, None])
assert arr.type == pa.int64()
np_arr = np.asarray(arr)
elements = np_arr.tolist()
assert elements[:3] == [0., 1., 2.]
assert np.isnan(elements[3])
assert np_arr.dtype == np.dtype('float64')
def test_array_getitem():
arr = pa.array(range(10, 15))
lst = arr.to_pylist()
for idx in range(-len(arr), len(arr)):
assert arr[idx].as_py() == lst[idx]
for idx in range(-2 * len(arr), -len(arr)):
with pytest.raises(IndexError):
arr[idx]
for idx in range(len(arr), 2 * len(arr)):
with pytest.raises(IndexError):
arr[idx]
def test_array_slice():
arr = pa.array(range(10))
sliced = arr.slice(2)
expected = pa.array(range(2, 10))
assert sliced.equals(expected)
sliced2 = arr.slice(2, 4)
expected2 = pa.array(range(2, 6))
assert sliced2.equals(expected2)
# 0 offset
assert arr.slice(0).equals(arr)
# Slice past end of array
assert len(arr.slice(len(arr))) == 0
with pytest.raises(IndexError):
arr.slice(-1)
# Test slice notation
assert arr[2:].equals(arr.slice(2))
assert arr[2:5].equals(arr.slice(2, 3))
assert arr[-5:].equals(arr.slice(len(arr) - 5))
with pytest.raises(IndexError):
arr[::-1]
with pytest.raises(IndexError):
arr[::2]
n = len(arr)
for start in range(-n * 2, n * 2):
for stop in range(-n * 2, n * 2):
assert arr[start:stop].to_pylist() == arr.to_pylist()[start:stop]
def test_array_iter():
arr = pa.array(range(10))
for i, j in zip(range(10), arr):
assert i == j
assert isinstance(arr, collections.Iterable)
def test_struct_array_slice():
# ARROW-2311: slicing nested arrays needs special care
ty = pa.struct([pa.field('a', pa.int8()),
pa.field('b', pa.float32())])
arr = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)
assert arr[1:].to_pylist() == [{'a': 3, 'b': 4.5},
{'a': 5, 'b': 6.5}]
def test_array_factory_invalid_type():
arr = np.array([datetime.timedelta(1), datetime.timedelta(2)])
with pytest.raises(ValueError):
pa.array(arr)
def test_array_ref_to_ndarray_base():
arr = np.array([1, 2, 3])
refcount = sys.getrefcount(arr)
arr2 = pa.array(arr) # noqa
assert sys.getrefcount(arr) == (refcount + 1)
def test_array_eq_raises():
# ARROW-2150: we are raising when comparing arrays until we define the
# behavior to either be elementwise comparisons or data equality
arr1 = pa.array([1, 2, 3], type=pa.int32())
arr2 = pa.array([1, 2, 3], type=pa.int32())
with pytest.raises(NotImplementedError):
arr1 == arr2
def test_array_from_buffers():
values_buf = pa.py_buffer(np.int16([4, 5, 6, 7]))
nulls_buf = pa.py_buffer(np.uint8([0b00001101]))
arr = pa.Array.from_buffers(pa.int16(), 4, [nulls_buf, values_buf])
assert arr.type == pa.int16()
assert arr.to_pylist() == [4, None, 6, 7]
arr = pa.Array.from_buffers(pa.int16(), 4, [None, values_buf])
assert arr.type == pa.int16()
assert arr.to_pylist() == [4, 5, 6, 7]
arr = pa.Array.from_buffers(pa.int16(), 3, [nulls_buf, values_buf],
offset=1)
assert arr.type == pa.int16()
assert arr.to_pylist() == [None, 6, 7]
with pytest.raises(TypeError):
pa.Array.from_buffers(pa.int16(), 3, [u'', u''], offset=1)
with pytest.raises(NotImplementedError):
pa.Array.from_buffers(pa.list_(pa.int16()), 4, [None, values_buf])
def test_dictionary_from_numpy():
indices = np.repeat([0, 1, 2], 2)
dictionary = np.array(['foo', 'bar', 'baz'], dtype=object)
mask = np.array([False, False, True, False, False, False])
d1 = pa.DictionaryArray.from_arrays(indices, dictionary)
d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask)
assert d1.indices.to_pylist() == indices.tolist()
assert d1.indices.to_pylist() == indices.tolist()
assert d1.dictionary.to_pylist() == dictionary.tolist()
assert d2.dictionary.to_pylist() == dictionary.tolist()
for i in range(len(indices)):
assert d1[i].as_py() == dictionary[indices[i]]
if mask[i]:
assert d2[i] is pa.NULL
else:
assert d2[i].as_py() == dictionary[indices[i]]
def test_dictionary_from_boxed_arrays():
indices = np.repeat([0, 1, 2], 2)
dictionary = np.array(['foo', 'bar', 'baz'], dtype=object)
iarr = pa.array(indices)
darr = pa.array(dictionary)
d1 = pa.DictionaryArray.from_arrays(iarr, darr)
assert d1.indices.to_pylist() == indices.tolist()
assert d1.dictionary.to_pylist() == dictionary.tolist()
for i in range(len(indices)):
assert d1[i].as_py() == dictionary[indices[i]]
def test_dictionary_from_arrays_boundscheck():
indices1 = pa.array([0, 1, 2, 0, 1, 2])
indices2 = pa.array([0, -1, 2])
indices3 = pa.array([0, 1, 2, 3])
dictionary = pa.array(['foo', 'bar', 'baz'])
# Works fine
pa.DictionaryArray.from_arrays(indices1, dictionary)
with pytest.raises(pa.ArrowException):
pa.DictionaryArray.from_arrays(indices2, dictionary)
with pytest.raises(pa.ArrowException):
pa.DictionaryArray.from_arrays(indices3, dictionary)
# If we are confident that the indices are "safe" we can pass safe=False to
# disable the boundschecking
pa.DictionaryArray.from_arrays(indices2, dictionary, safe=False)
def test_dictionary_with_pandas():
indices = np.repeat([0, 1, 2], 2)
dictionary = np.array(['foo', 'bar', 'baz'], dtype=object)
mask = np.array([False, False, True, False, False, False])
d1 = pa.DictionaryArray.from_arrays(indices, dictionary)
d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask)
pandas1 = d1.to_pandas()
ex_pandas1 = pd.Categorical.from_codes(indices, categories=dictionary)
tm.assert_series_equal(pd.Series(pandas1), pd.Series(ex_pandas1))
pandas2 = d2.to_pandas()
ex_pandas2 = pd.Categorical.from_codes(np.where(mask, -1, indices),
categories=dictionary)
tm.assert_series_equal(pd.Series(pandas2), pd.Series(ex_pandas2))
def test_list_from_arrays():
offsets_arr = np.array([0, 2, 5, 8], dtype='i4')
offsets = pa.array(offsets_arr, type='int32')
pyvalues = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h']
values = pa.array(pyvalues, type='binary')
result = pa.ListArray.from_arrays(offsets, values)
expected = pa.array([pyvalues[:2], pyvalues[2:5], pyvalues[5:8]])
assert result.equals(expected)
# With nulls
offsets = [0, None, 2, 6]
values = ['a', 'b', 'c', 'd', 'e', 'f']
result = pa.ListArray.from_arrays(offsets, values)
expected = pa.array([values[:2], None, values[2:]])
assert result.equals(expected)
# Another edge case
offsets2 = [0, 2, None, 6]
result = pa.ListArray.from_arrays(offsets2, values)
expected = pa.array([values[:2], values[2:], None])
assert result.equals(expected)
def test_union_from_dense():
binary = pa.array([b'a', b'b', b'c', b'd'], type='binary')
int64 = pa.array([1, 2, 3], type='int64')
types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8')
value_offsets = pa.array([0, 0, 2, 1, 1, 2, 3], type='int32')
result = pa.UnionArray.from_dense(types, value_offsets, [binary, int64])
assert result.to_pylist() == [b'a', 1, b'c', b'b', 2, 3, b'd']
def test_union_from_sparse():
binary = pa.array([b'a', b' ', b'b', b'c', b' ', b' ', b'd'],
type='binary')
int64 = pa.array([0, 1, 0, 0, 2, 3, 0], type='int64')
types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8')
result = pa.UnionArray.from_sparse(types, [binary, int64])
assert result.to_pylist() == [b'a', 1, b'b', b'c', 2, 3, b'd']
def test_union_array_slice():
# ARROW-2314
arr = pa.UnionArray.from_sparse(pa.array([0, 0, 1, 1], type=pa.int8()),
[pa.array(["a", "b", "c", "d"]),
pa.array([1, 2, 3, 4])])
assert arr[1:].to_pylist() == ["b", 3, 4]
binary = pa.array([b'a', b'b', b'c', b'd'], type='binary')
int64 = pa.array([1, 2, 3], type='int64')
types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8')
value_offsets = pa.array([0, 0, 2, 1, 1, 2, 3], type='int32')
arr = pa.UnionArray.from_dense(types, value_offsets, [binary, int64])
lst = arr.to_pylist()
for i in range(len(arr)):
for j in range(i, len(arr)):
assert arr[i:j].to_pylist() == lst[i:j]
def test_string_from_buffers():
array = pa.array(["a", None, "b", "c"])
buffers = array.buffers()
copied = pa.StringArray.from_buffers(
len(array), buffers[1], buffers[2], buffers[0], array.null_count,
array.offset)
assert copied.to_pylist() == ["a", None, "b", "c"]
copied = pa.StringArray.from_buffers(
len(array), buffers[1], buffers[2], buffers[0])
assert copied.to_pylist() == ["a", None, "b", "c"]
sliced = array[1:]
buffers = sliced.buffers()
copied = pa.StringArray.from_buffers(
len(sliced), buffers[1], buffers[2], buffers[0], -1, sliced.offset)
assert copied.to_pylist() == [None, "b", "c"]
assert copied.null_count == 1
# Slice but exclude all null entries so that we don't need to pass
# the null bitmap.
sliced = array[2:]
buffers = sliced.buffers()
copied = pa.StringArray.from_buffers(
len(sliced), buffers[1], buffers[2], None, -1, sliced.offset)
assert copied.to_pylist() == ["b", "c"]
assert copied.null_count == 0
def _check_cast_case(case, safe=True):
in_data, in_type, out_data, out_type = case
expected = pa.array(out_data, type=out_type)
# check casting an already created array
in_arr = pa.array(in_data, type=in_type)
casted = in_arr.cast(out_type, safe=safe)
assert casted.equals(expected)
# constructing an array with out type which optionally involves casting
# for more see ARROW-1949
in_arr = pa.array(in_data, type=out_type, safe=safe)
assert in_arr.equals(expected)
def test_cast_integers_safe():
safe_cases = [
(np.array([0, 1, 2, 3], dtype='i1'), 'int8',
np.array([0, 1, 2, 3], dtype='i4'), pa.int32()),
(np.array([0, 1, 2, 3], dtype='i1'), 'int8',
np.array([0, 1, 2, 3], dtype='u4'), pa.uint16()),
(np.array([0, 1, 2, 3], dtype='i1'), 'int8',
np.array([0, 1, 2, 3], dtype='u1'), pa.uint8()),
(np.array([0, 1, 2, 3], dtype='i1'), 'int8',
np.array([0, 1, 2, 3], dtype='f8'), pa.float64())
]
for case in safe_cases:
_check_cast_case(case)
unsafe_cases = [
(np.array([50000], dtype='i4'), 'int32', 'int16'),
(np.array([70000], dtype='i4'), 'int32', 'uint16'),
(np.array([-1], dtype='i4'), 'int32', 'uint16'),
(np.array([50000], dtype='u2'), 'uint16', 'int16')
]
for in_data, in_type, out_type in unsafe_cases:
in_arr = pa.array(in_data, type=in_type)
with pytest.raises(pa.ArrowInvalid):
in_arr.cast(out_type)
def test_cast_none():
# ARROW-3735: Ensure that calling cast(None) doesn't segfault.
arr = pa.array([1, 2, 3])
col = pa.column('foo', [arr])
with pytest.raises(TypeError):
arr.cast(None)
with pytest.raises(TypeError):
col.cast(None)
def test_cast_column():
arrays = [pa.array([1, 2, 3]), pa.array([4, 5, 6])]
col = pa.column('foo', arrays)
target = pa.float64()
casted = col.cast(target)
expected = pa.column('foo', [x.cast(target) for x in arrays])
assert casted.equals(expected)
def test_cast_integers_unsafe():
# We let NumPy do the unsafe casting
unsafe_cases = [
(np.array([50000], dtype='i4'), 'int32',
np.array([50000], dtype='i2'), pa.int16()),
(np.array([70000], dtype='i4'), 'int32',
np.array([70000], dtype='u2'), pa.uint16()),
(np.array([-1], dtype='i4'), 'int32',
np.array([-1], dtype='u2'), pa.uint16()),
(np.array([50000], dtype='u2'), pa.uint16(),
np.array([50000], dtype='i2'), pa.int16())
]
for case in unsafe_cases:
_check_cast_case(case, safe=False)
def test_floating_point_truncate_safe():
safe_cases = [
(np.array([1.0, 2.0, 3.0], dtype='float32'), 'float32',
np.array([1, 2, 3], dtype='i4'), pa.int32()),
(np.array([1.0, 2.0, 3.0], dtype='float64'), 'float64',
np.array([1, 2, 3], dtype='i4'), pa.int32()),
(np.array([-10.0, 20.0, -30.0], dtype='float64'), 'float64',
np.array([-10, 20, -30], dtype='i4'), pa.int32()),
]
for case in safe_cases:
_check_cast_case(case, safe=True)
def test_floating_point_truncate_unsafe():
unsafe_cases = [
(np.array([1.1, 2.2, 3.3], dtype='float32'), 'float32',
np.array([1, 2, 3], dtype='i4'), pa.int32()),
(np.array([1.1, 2.2, 3.3], dtype='float64'), 'float64',
np.array([1, 2, 3], dtype='i4'), pa.int32()),
(np.array([-10.1, 20.2, -30.3], dtype='float64'), 'float64',
np.array([-10, 20, -30], dtype='i4'), pa.int32()),
]
for case in unsafe_cases:
# test safe casting raises
with pytest.raises(pa.ArrowInvalid,
match='Floating point value truncated'):
_check_cast_case(case, safe=True)
# test unsafe casting truncates
_check_cast_case(case, safe=False)
def test_safe_cast_nan_to_int_raises():
arr = pa.array([np.nan, 1.])
with pytest.raises(pa.ArrowInvalid,
match='Floating point value truncated'):
arr.cast(pa.int64(), safe=True)
def test_cast_timestamp_unit():
# ARROW-1680
val = datetime.datetime.now()
s = pd.Series([val])
s_nyc = s.dt.tz_localize('tzlocal()').dt.tz_convert('America/New_York')
us_with_tz = pa.timestamp('us', tz='America/New_York')
arr = pa.Array.from_pandas(s_nyc, type=us_with_tz)
# ARROW-1906
assert arr.type == us_with_tz
arr2 = pa.Array.from_pandas(s, type=pa.timestamp('us'))
assert arr[0].as_py() == s_nyc[0]
assert arr2[0].as_py() == s[0]
# Disallow truncation
arr = pa.array([123123], type='int64').cast(pa.timestamp('ms'))
expected = pa.array([123], type='int64').cast(pa.timestamp('s'))
target = pa.timestamp('s')
with pytest.raises(ValueError):
arr.cast(target)
result = arr.cast(target, safe=False)
assert result.equals(expected)
# ARROW-1949
series = pd.Series([pd.Timestamp(1), pd.Timestamp(10), pd.Timestamp(1000)])
expected = pa.array([0, 0, 1], type=pa.timestamp('us'))
with pytest.raises(ValueError):
pa.array(series, type=pa.timestamp('us'))
with pytest.raises(ValueError):
pa.Array.from_pandas(series, type=pa.timestamp('us'))
result = pa.Array.from_pandas(series, type=pa.timestamp('us'), safe=False)
assert result.equals(expected)
result = pa.array(series, type=pa.timestamp('us'), safe=False)
assert result.equals(expected)
def test_cast_signed_to_unsigned():
safe_cases = [
(np.array([0, 1, 2, 3], dtype='i1'), pa.uint8(),
np.array([0, 1, 2, 3], dtype='u1'), pa.uint8()),
(np.array([0, 1, 2, 3], dtype='i2'), pa.uint16(),
np.array([0, 1, 2, 3], dtype='u2'), pa.uint16())
]
for case in safe_cases:
_check_cast_case(case)
def test_unique_simple():
cases = [
(pa.array([1, 2, 3, 1, 2, 3]), pa.array([1, 2, 3])),
(pa.array(['foo', None, 'bar', 'foo']),
pa.array(['foo', 'bar']))
]
for arr, expected in cases:
result = arr.unique()
assert result.equals(expected)
result = pa.column("column", arr).unique()
assert result.equals(expected)
result = pa.chunked_array([arr]).unique()
assert result.equals(expected)
def test_dictionary_encode_simple():
cases = [
(pa.array([1, 2, 3, None, 1, 2, 3]),
pa.DictionaryArray.from_arrays(
pa.array([0, 1, 2, None, 0, 1, 2], type='int32'),
[1, 2, 3])),
(pa.array(['foo', None, 'bar', 'foo']),
pa.DictionaryArray.from_arrays(
pa.array([0, None, 1, 0], type='int32'),
['foo', 'bar']))
]
for arr, expected in cases:
result = arr.dictionary_encode()
assert result.equals(expected)
result = pa.column("column", arr).dictionary_encode()
assert result.data.chunk(0).equals(expected)
result = pa.chunked_array([arr]).dictionary_encode()
assert result.chunk(0).equals(expected)
def test_cast_time32_to_int():
arr = pa.array(np.array([0, 1, 2], dtype='int32'),
type=pa.time32('s'))
expected = pa.array([0, 1, 2], type='i4')
result = arr.cast('i4')
assert result.equals(expected)
def test_cast_time64_to_int():
arr = pa.array(np.array([0, 1, 2], dtype='int64'),
type=pa.time64('us'))
expected = pa.array([0, 1, 2], type='i8')
result = arr.cast('i8')
assert result.equals(expected)
def test_cast_timestamp_to_int():
arr = pa.array(np.array([0, 1, 2], dtype='int64'),
type=pa.timestamp('us'))
expected = pa.array([0, 1, 2], type='i8')
result = arr.cast('i8')
assert result.equals(expected)
def test_cast_date32_to_int():
arr = pa.array([0, 1, 2], type='i4')
result1 = arr.cast('date32')
result2 = result1.cast('i4')
expected1 = pa.array([
datetime.date(1970, 1, 1),
datetime.date(1970, 1, 2),
datetime.date(1970, 1, 3)
]).cast('date32')
assert result1.equals(expected1)
assert result2.equals(arr)
def test_cast_binary_to_utf8():
binary_arr = pa.array([b'foo', b'bar', b'baz'], type=pa.binary())
utf8_arr = binary_arr.cast(pa.utf8())
expected = pa.array(['foo', 'bar', 'baz'], type=pa.utf8())
assert utf8_arr.equals(expected)
non_utf8_values = [(u'mañana').encode('utf-16-le')]
non_utf8_binary = pa.array(non_utf8_values)
assert non_utf8_binary.type == pa.binary()
with pytest.raises(ValueError):
non_utf8_binary.cast(pa.string())
non_utf8_all_null = pa.array(non_utf8_values, mask=np.array([True]),
type=pa.binary())
# No error
casted = non_utf8_all_null.cast(pa.string())
assert casted.null_count == 1
def test_cast_date64_to_int():
arr = pa.array(np.array([0, 1, 2], dtype='int64'),
type=pa.date64())
expected = pa.array([0, 1, 2], type='i8')
result = arr.cast('i8')
assert result.equals(expected)
@pytest.mark.parametrize(('ty', 'values'), [
('bool', [True, False, True]),
('uint8', range(0, 255)),
('int8', range(0, 128)),
('uint16', range(0, 10)),
('int16', range(0, 10)),
('uint32', range(0, 10)),
('int32', range(0, 10)),
('uint64', range(0, 10)),
('int64', range(0, 10)),
('float', [0.0, 0.1, 0.2]),
('double', [0.0, 0.1, 0.2]),
('string', ['a', 'b', 'c']),
('binary', [b'a', b'b', b'c']),
(pa.binary(3), [b'abc', b'bcd', b'cde'])
])
def test_cast_identities(ty, values):
arr = pa.array(values, type=ty)
assert arr.cast(ty).equals(arr)
pickle_test_parametrize = pytest.mark.parametrize(
('data', 'typ'),
[
([True, False, True, True], pa.bool_()),
([1, 2, 4, 6], pa.int64()),
([1.0, 2.5, None], pa.float64()),
(['a', None, 'b'], pa.string()),
([], None),
([[1, 2], [3]], pa.list_(pa.int64())),
([['a'], None, ['b', 'c']], pa.list_(pa.string())),
([(1, 'a'), (2, 'c'), None],
pa.struct([pa.field('a', pa.int64()), pa.field('b', pa.string())]))
]
)
@pickle_test_parametrize
def test_array_pickle(data, typ):
# Allocate here so that we don't have any Arrow data allocated.
# This is needed to ensure that allocator tests can be reliable.
array = pa.array(data, type=typ)
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
result = pickle.loads(pickle.dumps(array, proto))
assert array.equals(result)
@h.given(
past.arrays(
past.all_types,
size=st.integers(min_value=0, max_value=10)
)
)
def test_pickling(arr):
data = pickle.dumps(arr)
restored = pickle.loads(data)
assert arr.equals(restored)
@pickle_test_parametrize
def test_array_pickle5(data, typ):
# Test zero-copy pickling with protocol 5 (PEP 574)
picklemod = pickle5 or pickle
if pickle5 is None and picklemod.HIGHEST_PROTOCOL < 5:
pytest.skip("need pickle5 package or Python 3.8+")
array = pa.array(data, type=typ)
addresses = [buf.address if buf is not None else 0
for buf in array.buffers()]
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
pickled = picklemod.dumps(array, proto, buffer_callback=buffers.append)
result = picklemod.loads(pickled, buffers=buffers)
assert array.equals(result)
result_addresses = [buf.address if buf is not None else 0
for buf in result.buffers()]
assert result_addresses == addresses
@pytest.mark.parametrize(
'narr',
[
np.arange(10, dtype=np.int64),
np.arange(10, dtype=np.int32),
np.arange(10, dtype=np.int16),
np.arange(10, dtype=np.int8),
np.arange(10, dtype=np.uint64),
np.arange(10, dtype=np.uint32),
np.arange(10, dtype=np.uint16),
np.arange(10, dtype=np.uint8),
np.arange(10, dtype=np.float64),
np.arange(10, dtype=np.float32),
np.arange(10, dtype=np.float16),
]
)
def test_to_numpy_roundtrip(narr):
arr = pa.array(narr)
assert narr.dtype == arr.to_numpy().dtype
np.testing.assert_array_equal(narr, arr.to_numpy())
np.testing.assert_array_equal(narr[:6], arr[:6].to_numpy())
np.testing.assert_array_equal(narr[2:], arr[2:].to_numpy())
np.testing.assert_array_equal(narr[2:6], arr[2:6].to_numpy())
@pytest.mark.parametrize(
('type', 'expected'),
[
(pa.null(), 'empty'),
(pa.bool_(), 'bool'),
(pa.int8(), 'int8'),
(pa.int16(), 'int16'),
(pa.int32(), 'int32'),
(pa.int64(), 'int64'),
(pa.uint8(), 'uint8'),
(pa.uint16(), 'uint16'),
(pa.uint32(), 'uint32'),
(pa.uint64(), 'uint64'),
(pa.float16(), 'float16'),
(pa.float32(), 'float32'),
(pa.float64(), 'float64'),
(pa.date32(), 'date'),
(pa.date64(), 'date'),
(pa.binary(), 'bytes'),
(pa.binary(length=4), 'bytes'),
(pa.string(), 'unicode'),
(pa.list_(pa.list_(pa.int16())), 'list[list[int16]]'),
(pa.decimal128(18, 3), 'decimal'),
(pa.timestamp('ms'), 'datetime'),
(pa.timestamp('us', 'UTC'), 'datetimetz'),
(pa.time32('s'), 'time'),
(pa.time64('us'), 'time')
]
)
def test_logical_type(type, expected):
assert get_logical_type(type) == expected
def test_array_uint64_from_py_over_range():
arr = pa.array([2 ** 63], type=pa.uint64())
expected = pa.array(np.array([2 ** 63], dtype='u8'))
assert arr.equals(expected)
def test_array_conversions_no_sentinel_values():
arr = np.array([1, 2, 3, 4], dtype='int8')
refcount = sys.getrefcount(arr)
arr2 = pa.array(arr) # noqa
assert sys.getrefcount(arr) == (refcount + 1)
assert arr2.type == 'int8'
arr3 = pa.array(np.array([1, np.nan, 2, 3, np.nan, 4], dtype='float32'),
type='float32')
assert arr3.type == 'float32'
assert arr3.null_count == 0
def test_array_from_numpy_datetimeD():
arr = np.array([None, datetime.date(2017, 4, 4)], dtype='datetime64[D]')
result = pa.array(arr)
expected = pa.array([None, datetime.date(2017, 4, 4)], type=pa.date32())
assert result.equals(expected)
@pytest.mark.parametrize(('dtype', 'type'), [
('datetime64[s]', pa.timestamp('s')),
('datetime64[ms]', pa.timestamp('ms')),
('datetime64[us]', pa.timestamp('us')),
('datetime64[ns]', pa.timestamp('ns'))
])
def test_array_from_numpy_datetime(dtype, type):
data = [
None,
datetime.datetime(2017, 4, 4, 12, 11, 10),
datetime.datetime(2018, 1, 1, 0, 2, 0)
]
# from numpy array
arr = pa.array(np.array(data, dtype=dtype))
expected = pa.array(data, type=type)
assert arr.equals(expected)
# from list of numpy scalars
arr = pa.array(list(np.array(data, dtype=dtype)))
assert arr.equals(expected)
def test_array_from_different_numpy_datetime_units_raises():
data = [
None,
datetime.datetime(2017, 4, 4, 12, 11, 10),
datetime.datetime(2018, 1, 1, 0, 2, 0)
]
s = np.array(data, dtype='datetime64[s]')
ms = np.array(data, dtype='datetime64[ms]')
data = list(s[:2]) + list(ms[2:])
with pytest.raises(pa.ArrowNotImplementedError):
pa.array(data)
@pytest.mark.parametrize('unit', ['ns', 'us', 'ms', 's'])
def test_array_from_list_of_timestamps(unit):
n = np.datetime64('NaT', unit)
x = np.datetime64('2017-01-01 01:01:01.111111111', unit)
y = np.datetime64('2018-11-22 12:24:48.111111111', unit)
a1 = pa.array([n, x, y])
a2 = pa.array([n, x, y], type=pa.timestamp(unit))
assert a1.type == a2.type
assert a1.type.unit == unit
assert a1[0] == a2[0]
def test_array_from_timestamp_with_generic_unit():
n = np.datetime64('NaT')
x = np.datetime64('2017-01-01 01:01:01.111111111')
y = np.datetime64('2018-11-22 12:24:48.111111111')
with pytest.raises(pa.ArrowNotImplementedError,
match='Unbound or generic datetime64 time unit'):
pa.array([n, x, y])
def test_array_from_py_float32():
data = [[1.2, 3.4], [9.0, 42.0]]
t = pa.float32()
arr1 = pa.array(data[0], type=t)
arr2 = pa.array(data, type=pa.list_(t))
expected1 = np.array(data[0], dtype=np.float32)
expected2 = pd.Series([np.array(data[0], dtype=np.float32),
np.array(data[1], dtype=np.float32)])
assert arr1.type == t
assert arr1.equals(pa.array(expected1))
assert arr2.equals(pa.array(expected2))
def test_array_from_numpy_ascii():
arr = np.array(['abcde', 'abc', ''], dtype='|S5')
arrow_arr = pa.array(arr)
assert arrow_arr.type == 'binary'
expected = pa.array(['abcde', 'abc', ''], type='binary')
assert arrow_arr.equals(expected)
mask = np.array([False, True, False])
arrow_arr = pa.array(arr, mask=mask)
expected = pa.array(['abcde', None, ''], type='binary')
assert arrow_arr.equals(expected)
# Strided variant
arr = np.array(['abcde', 'abc', ''] * 5, dtype='|S5')[::2]
mask = np.array([False, True, False] * 5)[::2]
arrow_arr = pa.array(arr, mask=mask)
expected = pa.array(['abcde', '', None, 'abcde', '', None, 'abcde', ''],
type='binary')
assert arrow_arr.equals(expected)
# 0 itemsize
arr = np.array(['', '', ''], dtype='|S0')
arrow_arr = pa.array(arr)
expected = pa.array(['', '', ''], type='binary')
assert arrow_arr.equals(expected)
def test_array_from_numpy_unicode():
dtypes = ['<U5', '>U5']
for dtype in dtypes:
arr = np.array(['abcde', 'abc', ''], dtype=dtype)
arrow_arr = pa.array(arr)
assert arrow_arr.type == 'utf8'
expected = pa.array(['abcde', 'abc', ''], type='utf8')
assert arrow_arr.equals(expected)
mask = np.array([False, True, False])
arrow_arr = pa.array(arr, mask=mask)
expected = pa.array(['abcde', None, ''], type='utf8')
assert arrow_arr.equals(expected)
# Strided variant
arr = np.array(['abcde', 'abc', ''] * 5, dtype=dtype)[::2]
mask = np.array([False, True, False] * 5)[::2]
arrow_arr = pa.array(arr, mask=mask)
expected = pa.array(['abcde', '', None, 'abcde', '', None,
'abcde', ''], type='utf8')
assert arrow_arr.equals(expected)
# 0 itemsize
arr = np.array(['', '', ''], dtype='<U0')
arrow_arr = pa.array(arr)
expected = pa.array(['', '', ''], type='utf8')
assert arrow_arr.equals(expected)
def test_buffers_primitive():
a = pa.array([1, 2, None, 4], type=pa.int16())
buffers = a.buffers()
assert len(buffers) == 2
null_bitmap = buffers[0].to_pybytes()
assert 1 <= len(null_bitmap) <= 64 # XXX this is varying
assert bytearray(null_bitmap)[0] == 0b00001011
# Slicing does not affect the buffers but the offset
a_sliced = a[1:]
buffers = a_sliced.buffers()
a_sliced.offset == 1
assert len(buffers) == 2
null_bitmap = buffers[0].to_pybytes()
assert 1 <= len(null_bitmap) <= 64 # XXX this is varying
assert bytearray(null_bitmap)[0] == 0b00001011
assert struct.unpack('hhxxh', buffers[1].to_pybytes()) == (1, 2, 4)
a = pa.array(np.int8([4, 5, 6]))
buffers = a.buffers()
assert len(buffers) == 2
# No null bitmap from Numpy int array
assert buffers[0] is None
assert struct.unpack('3b', buffers[1].to_pybytes()) == (4, 5, 6)
a = pa.array([b'foo!', None, b'bar!!'])
buffers = a.buffers()
assert len(buffers) == 3
null_bitmap = buffers[0].to_pybytes()
assert bytearray(null_bitmap)[0] == 0b00000101
offsets = buffers[1].to_pybytes()
assert struct.unpack('4i', offsets) == (0, 4, 4, 9)
values = buffers[2].to_pybytes()
assert values == b'foo!bar!!'
def test_buffers_nested():
a = pa.array([[1, 2], None, [3, None, 4, 5]], type=pa.list_(pa.int64()))
buffers = a.buffers()
assert len(buffers) == 4
# The parent buffers
null_bitmap = buffers[0].to_pybytes()
assert bytearray(null_bitmap)[0] == 0b00000101
offsets = buffers[1].to_pybytes()
assert struct.unpack('4i', offsets) == (0, 2, 2, 6)
# The child buffers
null_bitmap = buffers[2].to_pybytes()
assert bytearray(null_bitmap)[0] == 0b00110111
values = buffers[3].to_pybytes()
assert struct.unpack('qqq8xqq', values) == (1, 2, 3, 4, 5)
a = pa.array([(42, None), None, (None, 43)],
type=pa.struct([pa.field('a', pa.int8()),
pa.field('b', pa.int16())]))
buffers = a.buffers()
assert len(buffers) == 5
# The parent buffer
null_bitmap = buffers[0].to_pybytes()
assert bytearray(null_bitmap)[0] == 0b00000101
# The child buffers: 'a'
null_bitmap = buffers[1].to_pybytes()
assert bytearray(null_bitmap)[0] == 0b00000001
values = buffers[2].to_pybytes()
assert struct.unpack('bxx', values) == (42,)
# The child buffers: 'b'
null_bitmap = buffers[3].to_pybytes()
assert bytearray(null_bitmap)[0] == 0b00000100
values = buffers[4].to_pybytes()
assert struct.unpack('4xh', values) == (43,)
def test_invalid_tensor_constructor_repr():
# ARROW-2638: prevent calling extension class constructors directly
with pytest.raises(TypeError):
repr(pa.Tensor([1]))
def test_invalid_tensor_construction():
with pytest.raises(TypeError):
pa.Tensor()
def test_list_array_flatten():
typ2 = pa.list_(
pa.list_(
pa.int64()
)
)
arr2 = pa.array([
None,
[
[1, None, 2],
None,
[3, 4]
],
[],
[
[],
[5, 6],
None
],
[
[7, 8]
]
])
assert arr2.type.equals(typ2)
typ1 = pa.list_(pa.int64())
arr1 = pa.array([
[1, None, 2],
None,
[3, 4],
[],
[5, 6],
None,
[7, 8]
])
assert arr1.type.equals(typ1)
typ0 = pa.int64()
arr0 = pa.array([
1, None, 2,
3, 4,
5, 6,
7, 8
])
assert arr0.type.equals(typ0)
assert arr2.flatten().equals(arr1)
assert arr1.flatten().equals(arr0)
assert arr2.flatten().flatten().equals(arr0)
def test_struct_array_flatten():
ty = pa.struct([pa.field('x', pa.int16()),
pa.field('y', pa.float32())])
a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)
xs, ys = a.flatten()
assert xs.type == pa.int16()
assert ys.type == pa.float32()
assert xs.to_pylist() == [1, 3, 5]
assert ys.to_pylist() == [2.5, 4.5, 6.5]
xs, ys = a[1:].flatten()
assert xs.to_pylist() == [3, 5]
assert ys.to_pylist() == [4.5, 6.5]
a = pa.array([(1, 2.5), None, (3, 4.5)], type=ty)
xs, ys = a.flatten()
assert xs.to_pylist() == [1, None, 3]
assert ys.to_pylist() == [2.5, None, 4.5]
xs, ys = a[1:].flatten()
assert xs.to_pylist() == [None, 3]
assert ys.to_pylist() == [None, 4.5]
a = pa.array([(1, None), (2, 3.5), (None, 4.5)], type=ty)
xs, ys = a.flatten()
assert xs.to_pylist() == [1, 2, None]
assert ys.to_pylist() == [None, 3.5, 4.5]
xs, ys = a[1:].flatten()
assert xs.to_pylist() == [2, None]
assert ys.to_pylist() == [3.5, 4.5]
a = pa.array([(1, None), None, (None, 2.5)], type=ty)
xs, ys = a.flatten()
assert xs.to_pylist() == [1, None, None]
assert ys.to_pylist() == [None, None, 2.5]
xs, ys = a[1:].flatten()
assert xs.to_pylist() == [None, None]
assert ys.to_pylist() == [None, 2.5]
def test_struct_array_field():
ty = pa.struct([pa.field('x', pa.int16()),
pa.field('y', pa.float32())])
a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty)
x0 = a.field(0)
y0 = a.field(1)
x1 = a.field(-2)
y1 = a.field(-1)
x2 = a.field('x')
y2 = a.field('y')
assert isinstance(x0, pa.lib.Int16Array)
assert isinstance(y1, pa.lib.FloatArray)
assert x0.equals(pa.array([1, 3, 5], type=pa.int16()))
assert y0.equals(pa.array([2.5, 4.5, 6.5], type=pa.float32()))
assert x0.equals(x1)
assert x0.equals(x2)
assert y0.equals(y1)
assert y0.equals(y2)
for invalid_index in [None, pa.int16()]:
with pytest.raises(TypeError):
a.field(invalid_index)
for invalid_index in [3, -3]:
with pytest.raises(IndexError):
a.field(invalid_index)
for invalid_name in ['z', '']:
with pytest.raises(KeyError):
a.field(invalid_name)
def test_empty_cast():
types = [
pa.null(),
pa.bool_(),
pa.int8(),
pa.int16(),
pa.int32(),
pa.int64(),
pa.uint8(),
pa.uint16(),
pa.uint32(),
pa.uint64(),
pa.float16(),
pa.float32(),
pa.float64(),
pa.date32(),
pa.date64(),
pa.binary(),
pa.binary(length=4),
pa.string(),
]
for (t1, t2) in itertools.product(types, types):
try:
# ARROW-4766: Ensure that supported types conversion don't segfault
# on empty arrays of common types
pa.array([], type=t1).cast(t2)
except pa.lib.ArrowNotImplementedError:
continue
def test_nested_dictionary_array():
dict_arr = pa.DictionaryArray.from_arrays([0, 1, 0], ['a', 'b'])
list_arr = pa.ListArray.from_arrays([0, 2, 3], dict_arr)
assert list_arr.to_pylist() == [['a', 'b'], ['a']]
dict_arr = pa.DictionaryArray.from_arrays([0, 1, 0], ['a', 'b'])
dict_arr2 = pa.DictionaryArray.from_arrays([0, 1, 2, 1, 0], dict_arr)
assert dict_arr2.to_pylist() == ['a', 'b', 'a', 'b', 'a']
def test_array_from_numpy_str_utf8():
# ARROW-3890 -- in Python 3, NPY_UNICODE arrays are produced, but in Python
# 2 they are NPY_STRING (binary), so we must do UTF-8 validation
vec = np.array(["toto", "tata"])
vec2 = np.array(["toto", "tata"], dtype=object)
arr = pa.array(vec, pa.string())
arr2 = pa.array(vec2, pa.string())
expected = pa.array([u"toto", u"tata"])
assert arr.equals(expected)
assert arr2.equals(expected)
# with mask, separate code path
mask = np.array([False, False], dtype=bool)
arr = pa.array(vec, pa.string(), mask=mask)
assert arr.equals(expected)
# UTF8 validation failures
vec = np.array([(u'mañana').encode('utf-16-le')])
with pytest.raises(ValueError):
pa.array(vec, pa.string())
with pytest.raises(ValueError):
pa.array(vec, pa.string(), mask=np.array([False]))
@pytest.mark.large_memory
def test_numpy_string_overflow_to_chunked():
# ARROW-3762
# 2^31 + 1 bytes
values = [b'x']
# Make 10 unique 1MB strings then repeat then 2048 times
unique_strings = {
i: b'x' * ((1 << 20) - 1) + str(i % 10).encode('utf8')
for i in range(10)
}
values += [unique_strings[i % 10] for i in range(1 << 11)]
arr = np.array(values)
arrow_arr = pa.array(arr)
assert isinstance(arrow_arr, pa.ChunkedArray)
# Split up into 16MB chunks. 128 * 16 = 2048, so 129
assert arrow_arr.num_chunks == 129
value_index = 0
for i in range(arrow_arr.num_chunks):
chunk = arrow_arr.chunk(i)
for val in chunk:
assert val.as_py() == values[value_index]
value_index += 1
| apache-2.0 |
gplepage/lsqfit | doc/source/eg-spline.py | 1 | 3601 | """
eg-spline.py --- fitting a spline to data in file spline.json
"""
# Created by G. Peter Lepage (Cornell University) on 2014-04-28.
# Copyright (c) 2020 G. Peter Lepage.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version (see <http://www.gnu.org/licenses/>).
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import print_function # makes this work for python2 and 3
import gvar as gv
import lsqfit
import numpy as np
def main():
param, data = collect_data('spline.p')
F, prior = make_fcn_prior(param)
fit = lsqfit.nonlinear_fit(data=data, prior=prior, fcn=F)
print(fit)
# create f(m)
f = gv.cspline.CSpline(fit.p['mknot'], fit.p['fknot'])
# create error budget
outputs = {'f(1)':f(1), 'f(5)':f(5), 'f(9)':f(9)}
inputs = {'data':data}
inputs.update(prior)
print(gv.fmt_values(outputs))
print(gv.fmt_errorbudget(outputs=outputs, inputs=inputs))
make_plot(param, data, fit)
def make_fcn_prior(param):
def F(p):
f = gv.cspline.CSpline(p['mknot'], p['fknot'])
ans = {}
for s in param:
ainv, am = param[s]
m = am * ainv
ans[s] = f(m)
for i,ci in enumerate(p['c']):
ans[s] += ci * am ** (2 + 2 * i)
return ans
prior = gv.gvar(dict(
mknot=['1.00(1)', '1.5(5)', '3(1)', '9.00(1)'],
fknot=['0(1)', '1(1)', '1(1)', '1(1)'],
# mknot=['1.00(1)', '1.5(5)', '3(1)', '6(2)', '9.00(1)'],
# fknot=['0(1)', '1(1)', '1(1)', '1(1)', '1(1)'],
c=['0(1)'] * 5,
))
return F, prior
def collect_data(datafile):
param = dict(
A=(10., np.array([0.1, 0.3, 0.5, 0.7, 0.9])),
B=(5., np.array([0.3, 0.5, 0.7, 0.9])),
C=(2.5, np.array([0.5, 0.7, 0.9])),
)
data = gv.load(datafile)
return param,data
def make_plot(param, data, fit):
import matplotlib.pyplot as plt
plt.cla()
f = gv.cspline.CSpline(
fit.p['mknot'], fit.p['fknot'],
)
coliter = iter(['r', 'b', 'g'])
m = np.arange(1, 9, 0.1)
fm = f(m)
fmavg = gv.mean(fm)
fmplus = fmavg + gv.sdev(fm)
fmminus = fmavg - gv.sdev(fm)
plt.fill_between(m, fmplus, fmminus, color='k', alpha=0.20)
plt.plot(m, fmavg, 'k:')
# true function
fm = 1. - .3 / m - .3 / m**2
plt.plot(m, fm, 'k--')
for s in data:
plt.plot()
ainv, am = param[s]
ms = ainv * am
d = gv.mean(data[s])
derr = gv.sdev(data[s])
col = next(coliter)
plt.errorbar(x=ms, y=d, yerr=derr, fmt=col + 'o')
plt.text(ms[-1] - 0.6, d[-1], s, color=col, fontsize='x-large')
fs = gv.mean(fm)
ams = m / ainv
idx = ams < am[-1]
ams = ams[idx]
fs = gv.mean(fm[idx])
for i, ci in enumerate(fit.p['c']):
fs += ci.mean * ams ** (2 * (i + 1))
plt.plot(m[idx], fs, col + ':')
plt.xlabel('m')
plt.ylabel('f')
plt.text(8, 0.65, 'f(m)', fontsize='x-large')
plt.savefig('eg-spline.png', bbox_inches='tight')
plt.show()
if __name__ == "__main__":
import tee
import sys
sys.stdout = tee.tee(sys.stdout, open('eg-spline.out', 'w'))
main() | gpl-3.0 |
JeanKossaifi/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
berkeley-stat159/project-epsilon | code/utils/scripts/multi_comparison_script.py | 1 | 12928 | """
Purpose:
-----------------------------------------------------------------------------------
We seek the activated voxel positionsi through multi-comparison of beta values across
subjects
Step
-----------------------------------------------------------------------------------
1. calculate the mean of each single beta values across subject and plot them
2. calculate the variance of each single beta values across subject and plot them
3. calculate the t-stat of each single beta values across subject and plot them
4. calculate the p-value of each single betav values across subject and plot them
"""
from __future__ import print_function, division
import sys, os
import pdb
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname('__file__'), "../functions/"))
sys.path.append(os.path.join(os.path.dirname('__file__'), "./"))
from glm_func import *
from matplotlib import colors
from smoothing import *
from plot_mosaic import *
from scipy.stats import t as t_dist
nice_cmap_values = np.loadtxt('actc.txt')
nice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')
dirs = ['../../../txt_output/multi_beta','../../../fig/multi_beta']
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
project_path='../../../'
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
#Template to plot brain images
template = nib.load(project_path+\
'data/mni_icbm152_t1_tal_nlin_asym_09c_2mm.nii')
template_data_int = template.get_data()
template_data = template_data_int.astype(float)
task = dict()
gain = dict()
loss = dict()
print("\n================================================================================")
print("Starting multi comparison analysis for the selected subjects")
subject_list = [str(i) for i in range(1,17))
#subject_list = ['1','5']
#load all of them
#for x in range(1,17):
for sub in subject_list:
task[sub] = np.loadtxt(dirs[0]+'/ds005_sub'+str(sub).zfill(3)+'_t1r1_beta_task.txt')
#for x in range(1,17):
gain[sub] = np.loadtxt(dirs[0]+'/ds005_sub'+str(sub).zfill(3)+'_t1r1_beta_gain.txt')
#for x in range(1,17):
loss[sub] = np.loadtxt(dirs[0]+'/ds005_sub'+str(sub).zfill(3)+'_t1r1_beta_loss.txt')
# for x in range(1,17):
# dist[x] = np.loadtxt(dirs[0]+'/ds005_sub'+str(x).zfill(3)+'_t1r1_beta_dist.txt')
##################################### MEAN plot #########################################
print("\n================================================================================")
print("Starting analysis for the mean voxels values accross subjects")
#calculate mean and plot (let's try for task)
#task_sum = task[1]
#for x in range(2,17):
# task_sum +=task[x]
#task_mean = task_sum/16
#task_mean = task_sum/len(subject_list)
#task_mean_reshape = task_mean.reshape(91,109,91)
#gain_sum = gain[1]
#for x in range(2,17):
# gain_sum +=gain[x]
#gain_mean = gain_sum/16
#gain_mean_reshape = gain_mean.reshape(91,109,91)
#loss_sum = gain[1]
#for x in range(2,17):
# loss_sum +=loss[x]
#loss_mean = loss_sum/16
#loss_mean_reshape = loss_mean.reshape(91,109,91)
for mydict in [task, gain, loss]:
mydict['mean'] = sum([task[sub] for sub in subject_list])/(len(subject_list))
mydict['mean_reshape'] = mydict['mean'].reshape(91,109,91)
mydict['mean_mask'] = (mydict['mean_reshape'] - 0.0) < 0.01
mydict['mean_reshape_plot'] = mydict['mean_reshape']
mydict['mean_reshape_plot'][~mydict['mean_mask']] = np.nan
print("Creating plots for the voxel mean accross subjects analysis")
task_mean = task['mean']
gain_mean = gain['mean']
loss_mean = loss['mean']
in_brain_task = task['mean_mask']
in_brain_gain = gain['mean_mask']
in_brain_loss = loss['mean_mask']
plt.title('In brain activated voxels - \nmean across ' + str(len(subject_list)) +' subjects on TASK condition', fontsize=12)
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(task['mean_reshape_plot'], transpose=False), \
cmap='seismic', alpha=1, vmin=task['mean_reshape_plot'].min(), vmax= task['mean_reshape_plot'].max())
plt.colorbar()
plt.savefig(dirs[1]+'/mean_task.png')
#plt.show()
plt.clf()
print(" Plot for the TASK condition saved in " + dirs[1]+'/mean_task.png')
plt.title('In brain activated voxels - \nmean across ' + str(len(subject_list)) + ' subjects on GAIN condition', fontsize=12)
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(gain['mean_reshape_plot'], transpose=False), \
cmap='seismic', alpha=1, vmin=gain['mean_reshape_plot'].min(), vmax= gain['mean_reshape_plot'].max())
plt.colorbar()
plt.savefig(dirs[1]+'/mean_gain.png')
#plt.show()
plt.clf()
print(" Plot for the GAIN condition saved in " + dirs[1] + '/mean_gain.png')
plt.title('In brain activated voxels - \nmean across ' + str(len(subject_list)) + ' subjects on LOSS condition', fontsize=12)
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(loss['mean_reshape_plot'], transpose=False), \
cmap='seismic', alpha=1, vmin=loss['mean_reshape_plot'].min(), vmax= loss['mean_reshape_plot'].max())
plt.colorbar()
plt.savefig(dirs[1]+'/mean_loss.png')
#plt.show()
plt.clf()
print(" Plot for the LOSS condition saved in " + dirs[1] + '/mean_loss.png')
#################################### SD plot #########################################
print("\n================================================================================")
print("Starting analysis and plot for the mean variance accross subjects")
#calculate variance and plot
stdlst = []
for x in subject_list:
stdlst.append(task[x])
stdarray = np.array(stdlst)
task_std = stdarray.std(axis=0)
#task_std.shape -> (902629,0)
task_std_reshape = task_std.reshape(91,109,91)
task_std_reshape_plot = task_std_reshape
task_std_reshape_plot[~in_brain_gain] = np.nan
stdlst = []
#for x in range(1,17):
for x in subject_list:
stdlst.append(gain[x])
stdarray = np.array(stdlst)
gain_std = stdarray.std(axis=0)
#task_std.shape -> (902629,0)
gain_std_reshape = gain_std.reshape(91,109,91)
gain_std_reshape_plot = gain_std_reshape
gain_std_reshape_plot[~in_brain_gain] = np.nan
stdlst = []
for x in subject_list:
stdlst.append(loss[x])
stdarray = np.array(stdlst)
loss_std = stdarray.std(axis=0)
#task_std.shape -> (902629,0)
loss_std_reshape = loss_std.reshape(91,109,91)
loss_std_reshape_plot = loss_std_reshape
loss_std_reshape_plot[~in_brain_loss] = np.nan
print("Creating plots for the voxel mean variance accross subjects analysis")
plt.title('In brain activated voxels - \nStandard Deviation across ' + str(len(subject_list)) + ' subjects on TASK condition', fontsize=12)
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(task_std_reshape_plot, transpose=False), cmap='seismic', alpha=1, vmin=task_std_reshape_plot.min(), vmax= task_std_reshape_plot.max())
plt.colorbar()
plt.savefig(dirs[1]+'/std_task.png')
#plt.show()
plt.clf()
print(" Plot for the TASK condition saved in " + dirs[1] + '/std_task.png')
plt.title('In brain activated voxels - \nStandard Deviation across ' + str(len(subject_list)) + ' subjects on GAIN condition', fontsize=12)
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(gain_std_reshape_plot, transpose=False), cmap='seismic', alpha=1, vmin=gain_std_reshape_plot.min(), vmax= gain_std_reshape_plot.max())
plt.colorbar()
plt.savefig(dirs[1]+'/std_gain.png')
#plt.show()
plt.clf()
print(" Plot for the GAIN condition saved in " + dirs[1] + '/std_task.png')
plt.title('In brain activated voxels - \nStandard Deviation across ' + str(len(subject_list)) + ' subjects on LOSS condition', fontsize=12)
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(loss_std_reshape_plot, transpose=False), cmap='seismic', alpha=1, vmin=loss_std_reshape_plot.min(), vmax= loss_std_reshape_plot.max())
plt.colorbar()
plt.savefig(dirs[1]+'/std_loss.png')
#plt.show()
plt.clf()
print(" Plot for the LOSS condition saved in " + dirs[1] + '/std_loss.png')
##################################### stat plot #########################################
print("\n================================================================================")
print("Starting analysis and plot for the t-test accross subjects")
#calculate t-stat and plot
task_tstat = task_mean/(task_std/np.sqrt(15))
task_tstat = np.nan_to_num(task_tstat)
task_tstat_reshape = task_tstat.reshape(91,109,91)
task_tstat_reshape_plot = task_tstat_reshape
task_tstat_reshape_plot[~in_brain_task] = np.nan
gain_tstat = gain_mean/(gain_std/np.sqrt(15))
gain_tstat = np.nan_to_num(gain_tstat)
gain_tstat_reshape = gain_tstat.reshape(91,109,91)
gain_tstat_reshape_plot = gain_tstat_reshape
gain_tstat_reshape_plot[~in_brain_gain] = np.nan
loss_tstat = loss_mean/(loss_std/np.sqrt(15))
loss_tstat = np.nan_to_num(loss_tstat)
loss_tstat_reshape = loss_tstat.reshape(91,109,91)
loss_tstat_reshape_plot = loss_tstat_reshape
loss_tstat_reshape_plot[~in_brain_loss] = np.nan
plt.title('In brain activated voxels - \nT-statistics across ' + str(len(subject_list)) + ' subjects on TASK condition', fontsize=12)
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(task_tstat_reshape_plot, transpose=False), cmap='seismic', alpha=1, vmin=task_tstat_reshape_plot.min(), vmax=task_tstat_reshape_plot.max())
plt.colorbar()
plt.savefig(dirs[1]+'/tstat_task.png')
#plt.show()
plt.clf()
print(" Plot for the TASK condition saved in " + dirs[1] + '/tstat_task.png')
plt.title('In brain activated voxels - \nT-statistics across ' + str(len(subject_list)) + ' subjects on GAIN condition', fontsize=12)
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(gain_tstat_reshape_plot, transpose=False), cmap='seismic', alpha=1, vmin=gain_tstat_reshape_plot.min(), vmax=gain_tstat_reshape_plot.max())
plt.colorbar()
plt.savefig(dirs[1]+'/tstat_gain.png')
#plt.show()
plt.clf()
print(" Plot for the GAIN condition saved in " + dirs[1] + '/tstat_gain.png')
plt.title('In brain activated voxels - \nT-statistics across ' + str(len(subject_list)) + ' subjects on LOSS condition', fontsize=12)
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(loss_tstat_reshape_plot, transpose=False), cmap='seismic', alpha=1, vmin=loss_tstat_reshape_plot.min(), vmax=loss_tstat_reshape_plot.max())
plt.colorbar()
plt.savefig(dirs[1]+'/tstat_loss.png')
#plt.show()
plt.clf()
print(" Plot for the LOSS condition saved in " + dirs[1] + '/tstat_loss.png')
##################################### P-value plot #########################################
print("\n================================================================================")
print("Starting analysis and plot for the p-values accross subjects")
#calculate p-value and plot
task_pval = t_dist.cdf(abs(task_tstat), 15)
task_pval_reshape_old = task_pval.reshape(91,109,91)
task_pval_reshape = np.ones((91,109,91))-task_pval_reshape_old
task_pval_reshape_plot = task_pval_reshape
mask = task_pval_reshape_plot < 0.10
task_pval_reshape_plot[~mask] = np.nan
gain_pval = t_dist.cdf(abs(gain_tstat), 15)
gain_pval_reshape_old = gain_pval.reshape(91,109,91)
gain_pval_reshape = np.ones((91,109,91))-gain_pval_reshape_old
gain_pval_reshape_plot = gain_pval_reshape
mask = gain_pval_reshape_plot < 0.10
gain_pval_reshape_plot[~mask] = np.nan
loss_pval = t_dist.cdf(abs(loss_tstat), 15)
loss_pval_reshape_old = loss_pval.reshape(91,109,91)
loss_pval_reshape = np.ones((91,109,91))-loss_pval_reshape_old
loss_pval_reshape_plot = loss_pval_reshape
#loss_pval_reshape_plot[~in_brain_task] = np.nan
p_value_thres = 0.05/(91*109*91)
plt.title('In brain activated voxels - \nP-value across ' + str(len(subject_list)) + ' subjects on TASK condition', fontsize=12)
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(task_pval_reshape_plot, transpose=False), cmap='seismic', alpha=1, vmin=task_pval_reshape_plot.min(), vmax=task_pval_reshape_plot.max())
plt.colorbar()
plt.savefig(dirs[1]+'/pval_task.png')
#plt.show()
plt.clf()
print(" Plot for the TASK condition saved in " + dirs[1] + '/pval_task.png')
plt.title('In brain activated voxels - \nP-value across ' + str(len(subject_list)) +' subjects on GAIN condition', fontsize=12)
plt.imshow(plot_mosaic(template_data, transpose=False), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(gain_pval_reshape_plot, transpose=False), cmap='seismic', alpha=1, vmin=gain_pval_reshape_plot.min(), vmax=gain_pval_reshape_plot.max())
plt.colorbar()
plt.savefig(dirs[1]+'/pval_gain.png')
#plt.show()
plt.clf()
print(" Plot for the GAIN condition saved in " + dirs[1] + '/pval_gain.png')
print("Multi comparison analysis done")
| bsd-3-clause |
serazing/xscale | xscale/filtering/linearfilters.py | 1 | 15487 | """Define functions for linear filtering that works on multi-dimensional
xarray.DataArray and xarray.Dataset objects.
"""
# Python 2/3 compatibility
from __future__ import absolute_import, division, print_function
# Internal
import copy
from collections import Iterable
# Numpy and scipy
import numpy as np
import scipy.signal as sig
import scipy.ndimage as im
import xarray as xr
# Matplotlib
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import transforms
# Current package
from .. import _utils
from ..spectral.fft import fft, psd
import pdb
@xr.register_dataarray_accessor('window')
@xr.register_dataset_accessor('window')
class Window(object):
"""
Class for all different type of windows
"""
_attributes = ['order', 'cutoff', 'dx', 'window']
def __init__(self, xarray_obj):
self._obj = xarray_obj
self.obj = xarray_obj # Associated xarray object
self.n = None # Size of the window
self.dims = None # Dimensions of the window
self.ndim = 0 # Number of dimensions
self.cutoff = None # Window cutoff
self.window = None # Window type (scipy-like type)
self.order = None # Window order
self.coefficients = 1. # Window coefficients
self._depth = dict() # Overlap between different blocks
self.fnyq = dict() # Nyquist frequency
def __repr__(self):
"""
Provide a nice string representation of the window object
"""
# Function copied from xarray.core.rolling
attrs = ["{k}->{v}".format(k=k, v=getattr(self, k))
for k in self._attributes if
getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self.__class__.__name__,
attrs=', '.join(attrs))
def set(self, n=None, dim=None, cutoff=None, dx=None, window='boxcar',
chunks=None):
"""Set the different properties of the current window.
Parameters
----------
n : int, sequence or dict, optional
Window order over dimensions specified through an integer coupled
with the ``dim`` parameter. A dictionnary can also be used to specify
the order.
dim : str or sequence, optional
Names of the dimensions associated with the window.
cutoff : float, sequence or dict, optional
The window cutoff over the dimensions specified through a
dictionnary or coupled with the dim parameter. If None,
the cutoff is not used to desgin the filter.
dx : float, sequence or dict, optional
Define the resolution of the dimensions. If None, the resolution
is directly infered from the coordinates associated to the
dimensions.
trim : bool, optional
If True, choose to only keep the valid data not affected by the
boundaries.
window : string, tupple, or string and parameters values, or dict, optional
Window to use, see :py:func:`scipy.signal.get_window` for a list
of windows and required parameters
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``
"""
# Check and interpret n and dims parameters
self.n, self.dims = _utils.infer_n_and_dims(self._obj, n, dim)
self.ndim = len(self.dims)
self.order = {di: nbw for nbw, di in zip(self.n, self.dims)}
self.cutoff = _utils.infer_arg(cutoff, self.dims)
self.dx = _utils.infer_arg(dx, self.dims)
self.window = _utils.infer_arg(window, self.dims,
default_value='boxcar')
# Rechunk if needed
self.obj = self._obj.chunk(chunks=chunks)
# Reset attributes
self.fnyq = dict()
self.coefficients = xr.DataArray(1.)
#/!\ Modif for Dataset
#self._depth = dict()
# Build the multi-dimensional window: the hard part
for di in self.obj.dims:
#/!\ Modif for Dataset
#axis_num = self.obj.get_axis_num(di)
#dim_chunk = self.obj.chunks[di][0]
if di in self.dims:
#/!\ Modif for Dataset
#self._depth[axis_num] = self.order[di] // 2
if self.dx[di] is None:
self.dx[di] = _utils.get_dx(self.obj, di)
self.fnyq[di] = 1. / (2. * self.dx[di])
# Compute the coefficients associated to the window using scipy functions
if self.cutoff[di] is None:
# Use get_window if the cutoff is undefined
coefficients1d = sig.get_window(self.window[di],
self.order[di])
else:
# Use firwin if the cutoff is defined
coefficients1d = sig.firwin(self.order[di],
1. / self.cutoff[di],
window=self.window[di],
nyq=self.fnyq[di])
try:
chunks = self.obj.chunks[di][0]
except TypeError:
axis_num = self.obj.get_axis_num(di)
chunks = self.obj.chunks[axis_num][0]
n = len(coefficients1d)
coords = {di: np.arange(-(n - 1) // 2, (n + 1) // 2)}
coeffs1d = xr.DataArray(coefficients1d, dims=di,
coords=coords).chunk(chunks=chunks)
self.coefficients = self.coefficients * coeffs1d
# TODO: Try to add the rotational convention using meshgrid,
# in complement to the outer product
#self.coefficients = self.coefficients.squeeze()
else:
self.coefficients = self.coefficients.expand_dims(di, axis=-1)
# self.coefficients = self.coefficients.expand_dim(di, axis=-1)
# np.expand_dims(self.coefficients,
# axis=axis_num)
def convolve(self, mode='reflect', weights=1., trim=False):
"""Convolve the current window with the data
Parameters
----------
mode : {'reflect', 'periodic', 'any-constant'}, optional
The mode parameter determines how the array borders are handled.
Default is 'reflect'.
weights : DataArray, optional
Array to weight the result of the convolution close to the
boundaries.
trim : bool, optional
If True, choose to only keep the valid data not affected by the
boundaries.
Returns
-------
res : xarray.DataArray
Return a filtered DataArray
"""
if isinstance(self.obj, xr.DataArray):
res = _convolve(self.obj, self.coefficients, self.dims, self.order,
mode, weights, trim)
elif isinstance(self.obj, xr.Dataset):
res = self.obj.apply(_convolve, keep_attrs=True,
args=(self.coefficients, self.dims, self.order,
mode, weights, trim))
return res
def boundary_weights(self, mode='reflect', mask=None, drop_dims=[], trim=False):
"""
Compute the boundary weights
Parameters
----------
mode : {'reflect', 'periodic', 'any-constant'}, optional
The mode parameter determines how the array borders are handled.
Default is 'reflect'.
mask : array-like, optional
Specify the mask, if None the mask is inferred from missing values
drop_dims : list, optional
Specify dimensions along which the weights do not need to be
computed
Returns
-------
weights : xarray.DataArray or xarray.Dataset
Return a DataArray or a Dataset containing the weights
"""
# Drop extra dimensions if
if drop_dims:
new_coeffs = self.coefficients.squeeze()
else:
new_coeffs = self.coefficients
if mask is None:
# Select only the first
new_obj = self.obj.isel(**{di: 0 for di in drop_dims}).squeeze()
mask = 1. - np.isnan(new_obj)
if isinstance(mask, xr.DataArray):
res = _convolve(mask, new_coeffs, self.dims, self.order,
mode, 1., trim)
elif isinstance(mask, xr.Dataset):
res = mask.apply(_convolve, keep_attrs=True,
args=(self.coefficients, self.dims, self.order,
mode, 1., trim))
# Mask the output
res = res.where(mask == 1.)
return res
def tapper(self, overlap=0.):
"""
Do a tappering of the data using the current window
Parameters
----------
overlap:
Returns
-------
data_tappered : dask array
The data tappered y the window
Notes
-----
"""
# TODO: Improve this function to implement multitapper
res = xr.DataArray(self.coefficients * self.obj.data,
dims=self.obj.dims, coords=self.obj.coords,
name=self.obj.name)
return res
def plot(self):
"""
Plot the weights distribution of the window and the associated
spectrum (work only for 1D and 2D windows).
"""
win_array = xr.DataArray(self.coefficients.squeeze(),
dims=self.dims).squeeze()
win_spectrum = psd(fft(win_array, nfft=1024, dim=self.dims,
dx=self.dx, sym=True))
win_spectrum_norm = 20 * np.log10(win_spectrum / abs(win_spectrum).max())
self.win_spectrum_norm = win_spectrum_norm
if self.ndim == 1:
_plot1d_window(win_array, win_spectrum_norm)
elif self.ndim == 2:
_plot2d_window(win_array, win_spectrum_norm)
else:
raise ValueError("This number of dimension is not supported by the "
"plot function")
def _plot1d_window(win_array, win_spectrum_norm):
dim = win_spectrum_norm.dims[0]
freq = win_spectrum_norm[dim]
min_freq = np.extract(freq > 0, freq).min()
# next, should eventually be udpated in order to delete call to .values
# https://github.com/pydata/xarray/issues/1388
# Changed by using load()
cutoff_3db = 1. / abs(freq[np.abs(win_spectrum_norm + 3).argmin(dim).data])
cutoff_6db = 1. / abs(freq[np.abs(win_spectrum_norm + 6).argmin(dim).data])
# Plot window properties
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
# First plot: weight distribution
win_array.plot(ax=ax1)
ax1.set_ylabel("Amplitude")
ax1.set_xlabel("Sample")
# Second plot: frequency response
win_spectrum_norm.plot(ax=ax2)
ax2.set_xscale('symlog', linthreshx=min_freq,
subsx=[2, 3, 4, 5, 6, 7, 8, 9])
box = dict(boxstyle='round', facecolor='white', alpha=1)
textstr = '$\lambda^{3dB}=%.1f$ \n $\lambda^{6dB}=%.1f$' % (cutoff_3db,
cutoff_6db)
ax2.text(0.5, 0.45, textstr, transform=ax2.transAxes, fontsize=14,
verticalalignment='top',
horizontalalignment='center', bbox=box)
ax2.set_ylim((-200, 20))
ax2.set_ylabel("Normalized magnitude [dB]")
ax2.set_xlabel("Frequency [cycles per sample]")
ax2.grid(True)
plt.tight_layout()
def _plot2d_window(win_array, win_spectrum_norm):
fig = plt.figure(figsize=(18, 9))
n_x, n_y = win_array.shape
n_fx, n_fy = win_spectrum_norm.shape
dim_fx, dim_fy = win_spectrum_norm.dims
win_array_x = win_array[:, n_y // 2]
win_array_y = win_array[n_x // 2, :]
win_spectrum_x = win_spectrum_norm.isel(**{dim_fy: n_fy // 2})
win_spectrum_y = win_spectrum_norm.isel(**{dim_fx: n_fx // 2})
freq_x, freq_y = win_spectrum_norm[dim_fx], win_spectrum_norm[dim_fy]
min_freq_x = np.extract(freq_x > 0, freq_x).min()
min_freq_y = np.extract(freq_y > 0, freq_y).min()
cutoff_x_3db = 1. / abs(freq_x[np.abs(win_spectrum_x + 3).argmin(dim_fx).data])
cutoff_x_6db = 1. / abs(freq_x[np.abs(win_spectrum_x + 6).argmin(dim_fx).data])
cutoff_y_3db = 1. / abs(freq_y[np.abs(win_spectrum_y + 3).argmin(dim_fy).data])
cutoff_y_6db = 1. / abs(freq_y[np.abs(win_spectrum_y + 6).argmin(dim_fy).data])
#fig = plt.figure(1, figsize=(16, 8))
# Definitions for the axes
left, width = 0.05, 0.25
bottom, height = 0.05, 0.5
offset = 0.05
bottom_h = bottom + height + offset
rect_2D_weights = [left, bottom, width, height]
rect_x_weights = [left, bottom_h, width, height / 2]
rect_y_weights = [left + width + offset, bottom, width / 2, height]
rect_2D_spectrum = [left + 3. / 2 * width + 2 * offset, bottom, width,
height]
rect_x_spectrum = [left + 3. / 2 * width + 2 * offset, bottom_h, width,
height / 2]
rect_y_spectrum = [left + 5. / 2 * width + 3 * offset, bottom,
width / 2, height]
ax_2D_weights = plt.axes(rect_2D_weights)
ax_x_weights = plt.axes(rect_x_weights)
ax_y_weights = plt.axes(rect_y_weights)
ax_x_spectrum = plt.axes(rect_x_spectrum)
ax_y_spectrum = plt.axes(rect_y_spectrum)
ax_2D_spectrum = plt.axes(rect_2D_spectrum)
# Weight disribution along y
win_array_y.squeeze().plot(ax=ax_x_weights)
ax_x_weights.set_ylabel('')
ax_x_weights.set_xlabel('')
# Weight disribution along x
base = ax_y_weights.transData
rot = transforms.Affine2D().rotate_deg(270)
win_array_x.plot(ax=ax_y_weights, transform=rot + base)
ax_y_weights.set_ylabel('')
ax_y_weights.set_xlabel('')
# Full 2d weight distribution
win_array.plot(ax=ax_2D_weights, add_colorbar=False)
# Spectrum along f_y
win_spectrum_y.plot(ax=ax_x_spectrum)
ax_x_spectrum.set_xscale('symlog', linthreshx=min_freq_y,
subsx=[2, 3, 4, 5, 6, 7, 8, 9])
ax_x_spectrum.set_ylim([-200, 20])
ax_x_spectrum.grid()
ax_x_spectrum.set_ylabel("Normalized magnitude [dB]")
ax_x_spectrum.set_xlabel("")
box = dict(boxstyle='round', facecolor='white', alpha=1)
# place a text box in upper left in axes coords
textstr = '$\lambda_y^{3dB}=%.1f$ \n $\lambda_y^{6dB}=%.1f$' % (
cutoff_y_3db, cutoff_y_6db)
ax_x_spectrum.text(0.5, 0.45, textstr,
transform=ax_x_spectrum.transAxes,
fontsize=14, verticalalignment='top',
horizontalalignment='center', bbox=box)
# Spectrum along f_x
base = ax_y_spectrum.transData
rot = transforms.Affine2D().rotate_deg(270)
win_spectrum_x.squeeze().plot(ax=ax_y_spectrum,
transform=rot + base)
ax_y_spectrum.set_yscale('symlog', linthreshy=min_freq_x,
subsy=[2, 3, 4, 5, 6, 7, 8, 9])
ax_y_spectrum.set_xlim([-200, 20])
ax_y_spectrum.grid()
ax_y_spectrum.set_ylabel("")
ax_y_spectrum.set_xlabel("Normalized magnitude [dB]")
textstr = '$\lambda_x^{3dB}=%.1f$ \n $\lambda_x^{6dB}=%.1f$' % (
cutoff_x_3db, cutoff_x_6db)
ax_y_spectrum.text(0.7, 0.5, textstr, transform=ax_y_spectrum.transAxes,
fontsize=14,
verticalalignment='center',
horizontalalignment='right',
bbox=box)
# Full 2d spectrum
win_spectrum_norm.plot(ax=ax_2D_spectrum,
add_colorbar=False,
vmin=-200,
vmax=0,
cmap=matplotlib.cm.Spectral_r)
ax_2D_spectrum.set_xscale('symlog', linthreshx=min_freq_y)
ax_2D_spectrum.set_yscale('symlog', linthreshy=min_freq_x)
def _convolve(dataarray, coeffs, dims, order, mode, weights, trim):
"""Convolve the current window with the data
"""
# Check if the kernel has more dimensions than the input data,
# if so the extra dimensions of the kernel are squeezed
squeezed_dims = [di for di in dims if di not in dataarray.dims]
new_coeffs = coeffs.squeeze(squeezed_dims)
new_coeffs /= new_coeffs.sum()
if trim:
mode = np.nan
mode_conv = 'constant'
new_data = dataarray.data
else:
new_data = dataarray.fillna(0.).data
if mode is 'periodic':
mode_conv = 'wrap'
else:
mode_conv = mode
boundary = {dataarray.get_axis_num(di): mode for di in dims}
depth = {dataarray.get_axis_num(di): order[di] // 2 for di in dims}
conv = lambda x: im.convolve(x, new_coeffs.data, mode=mode_conv)
data_conv = new_data.map_overlap(conv, depth=depth,
boundary=boundary,
trim=True)
res = 1. / weights * xr.DataArray(data_conv, dims=dataarray.dims,
coords=dataarray.coords,
name=dataarray.name)
return res
| apache-2.0 |
Aasmi/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
rr1964/Caterpillar | JSS Exploration.py | 1 | 4242 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 01 11:26:48 2017
@author: reeserd2
"""
print "Entering JSS Analysis mode."
####All of the following is now done by my module simpleRead
#import csv
#
#with open('C:/Users/reeserd2/Desktop/JSS Analysis/data/JSS_fixed_CIM_CapIQ_20170525_fixed.csv', mode = 'r') as f:
# readIn = csv.reader(f, delimiter = ',', skipinitialspace=True)
#
# lineData = list()
#
# cols = next(readIn)
# print(cols)
#
# for col in cols:
# # Create a list in lineData for each column of data.
# lineData.append(list())
#
#
# for line in readIn:
# for i in xrange(0, len(lineData)):
# # Copy the data from the line into the correct columns.
# lineData[i].append(line[i])
#
# data = dict()
#
# for i in xrange(0, len(cols)):
## Create each key in the dict with the data in its column.
# data[cols[i]] = lineData[i]
#
#print(data)
#
#f.close()
import simpleRead as sr###A crude module I personally wrote for reading in raw csv files.
import numpy as np
import math as m
import matplotlib
import pylab as pl
import pandas
print matplotlib.__version__
"""
I am learning some work from 'Python for Data Analysis'.
"""
####One way of reading in the data. A bit choppy, but you can get at it at a more "raw" level.
#raw_data = sr.simpleReadCSV('C:/Users/reeserd2/Desktop/JSS Analysis/data/JSS_fixed_CIM_CapIQ_20170525_fixed.csv')
#my_data.keys()
#print my_data["ISWON"]
def make_float(s):
s = s.strip()
return float(s) if s else 'NA'
#raw_data["RoAPer"] = map(make_float, raw_data["RoAPer"])
#print my_data["RoAPer"]
###dataf = pd.DataFrame(raw_data, columns = [])
####pandas.read_csv() most closely resembles R's ability to intelligently read in a csv file.
JSS_Data = pandas.read_csv('../JSS Analysis/data/JSS_fixed_CIM_CapIQ_20170525_fixed.csv')
print JSS_Data.keys()
JSS_Data["Rev"]
JSS_Data.count()
JSS_Data.sum()##Who knows what this does for strings.....But I believe it ignores NaN. It also seems to ignore string coulmns.
#%%
import json
path = "C:/Users/reeserd2/Documents/bitlyData.txt"
bitly =[json.loads(line) for line in open(path)] ###Note the list constructor using a for loop.
#print bitly[2]["tz"]
time_zones = [record['tz'] for record in bitly if "tz" in record] ###Again, the list constructor using a for loop.
time_zones[:15]###The index is not inclusive remember.
from collections import defaultdict
def get_counts(seq):
counts = defaultdict(int)###Initializes all values to 0.
for rec in seq:
counts[rec] += 1
return counts
tzCounts = get_counts(time_zones)
tzCounts['America/New_York']
#%%%
###We can find the top 5 most common time zones, but it requires us "flipping" the dictionary so to speak.
def top_counts(count_dict, n = 5):
value_key = [(count,tz) for tz,count in count_dict.items()]
value_key.sort(reverse = True)
#value_key.sort() ###sorts based on the FIRST value in the tuple.
###The value_key list now remains sorted. No need to cache. To not modify the list, use sorted(LIST)
return value_key[-n:]
top_counts(tzCounts, n = 10)
###This same thing can be done using some tools that are importable from the collections module.
from collections import Counter
tzCounts_simple = Counter(time_zones) ###A single function to cover the last 20 lines or so.
tzCounts_simple.most_common(10) #Also presents these in a top to bottom format.
#%%
###All of the time zone stuff can be done by using DataFrame in pandas.
import pandas as pd
df = pd.DataFrame(bitly)
df['tz'][:10]
##print df["tz"].value_counts()[:10]
clean_tz = df['tz'].fillna('Missing')
clean_tz[clean_tz == ''] = 'Unknown'
tz_counts = clean_tz.value_counts()
tz_counts[:10]
#%%
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 4))
tz_counts[:10].plot(kind='barh', rot=0)
df['a'][1]
df['a'][50]
df['a'][51]
results = pd.Series([x.split()[0] for x in df.a.dropna()])
results[:5]
#%%
#%%
#%%
#%%
#%%
#%%
#%%
| gpl-3.0 |
DuCorey/bokeh | examples/app/stocks/main.py | 11 | 4555 | ''' Create a simple stocks correlation dashboard.
Choose stocks to compare in the drop down widgets, and make selections
on the plots to update the summary and histograms accordingly.
.. note::
Running this example requires downloading sample data. See
the included `README`_ for more information.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve stocks
at your command prompt. Then navigate to the URL
http://localhost:5006/stocks
.. _README: https://github.com/bokeh/bokeh/blob/master/examples/app/stocks/README.md
'''
try:
from functools import lru_cache
except ImportError:
# Python 2 does stdlib does not have lru_cache so let's just
# create a dummy decorator to avoid crashing
print ("WARNING: Cache for this example is available on Python 3 only.")
def lru_cache():
def dec(f):
def _(*args, **kws):
return f(*args, **kws)
return _
return dec
from os.path import dirname, join
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import PreText, Select
from bokeh.plotting import figure
DATA_DIR = join(dirname(__file__), 'daily')
DEFAULT_TICKERS = ['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
def nix(val, lst):
return [x for x in lst if x != val]
@lru_cache()
def load_ticker(ticker):
fname = join(DATA_DIR, 'table_%s.csv' % ticker.lower())
data = pd.read_csv(fname, header=None, parse_dates=['date'],
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'])
data = data.set_index('date')
return pd.DataFrame({ticker: data.c, ticker+'_returns': data.c.diff()})
@lru_cache()
def get_data(t1, t2):
df1 = load_ticker(t1)
df2 = load_ticker(t2)
data = pd.concat([df1, df2], axis=1)
data = data.dropna()
data['t1'] = data[t1]
data['t2'] = data[t2]
data['t1_returns'] = data[t1+'_returns']
data['t2_returns'] = data[t2+'_returns']
return data
# set up widgets
stats = PreText(text='', width=500)
ticker1 = Select(value='AAPL', options=nix('GOOG', DEFAULT_TICKERS))
ticker2 = Select(value='GOOG', options=nix('AAPL', DEFAULT_TICKERS))
# set up plots
source = ColumnDataSource(data=dict(date=[], t1=[], t2=[], t1_returns=[], t2_returns=[]))
source_static = ColumnDataSource(data=dict(date=[], t1=[], t2=[], t1_returns=[], t2_returns=[]))
tools = 'pan,wheel_zoom,xbox_select,reset'
corr = figure(plot_width=350, plot_height=350,
tools='pan,wheel_zoom,box_select,reset')
corr.circle('t1_returns', 't2_returns', size=2, source=source,
selection_color="orange", alpha=0.6, nonselection_alpha=0.1, selection_alpha=0.4)
ts1 = figure(plot_width=900, plot_height=200, tools=tools, x_axis_type='datetime', active_drag="xbox_select")
ts1.line('date', 't1', source=source_static)
ts1.circle('date', 't1', size=1, source=source, color=None, selection_color="orange")
ts2 = figure(plot_width=900, plot_height=200, tools=tools, x_axis_type='datetime', active_drag="xbox_select")
ts2.x_range = ts1.x_range
ts2.line('date', 't2', source=source_static)
ts2.circle('date', 't2', size=1, source=source, color=None, selection_color="orange")
# set up callbacks
def ticker1_change(attrname, old, new):
ticker2.options = nix(new, DEFAULT_TICKERS)
update()
def ticker2_change(attrname, old, new):
ticker1.options = nix(new, DEFAULT_TICKERS)
update()
def update(selected=None):
t1, t2 = ticker1.value, ticker2.value
data = get_data(t1, t2)
source.data = source.from_df(data[['t1', 't2', 't1_returns', 't2_returns']])
source_static.data = source.data
update_stats(data, t1, t2)
corr.title.text = '%s returns vs. %s returns' % (t1, t2)
ts1.title.text, ts2.title.text = t1, t2
def update_stats(data, t1, t2):
stats.text = str(data[[t1, t2, t1+'_returns', t2+'_returns']].describe())
ticker1.on_change('value', ticker1_change)
ticker2.on_change('value', ticker2_change)
def selection_change(attrname, old, new):
t1, t2 = ticker1.value, ticker2.value
data = get_data(t1, t2)
selected = source.selected['1d']['indices']
if selected:
data = data.iloc[selected, :]
update_stats(data, t1, t2)
source.on_change('selected', selection_change)
# set up layout
widgets = column(ticker1, ticker2, stats)
main_row = row(corr, widgets)
series = column(ts1, ts2)
layout = column(main_row, series)
# initialize
update()
curdoc().add_root(layout)
curdoc().title = "Stocks"
| bsd-3-clause |
NicovincX2/Python-3.5 | Physique/Mesure physique/Traitement du signal/Traitement numérique du signal/Filtrage_fort.py | 1 | 1223 | # -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
def X(t0, T, h):
return np.arange(t0, t0 + T, h)
def Y(t0, T, h, y0, Phi):
t = X(t0, T, h)
y = np.zeros(len(t))
y[0] = y0
for k in range(len(t) - 1):
y[k + 1] = y[k] + h * Phi(k, t[k], y[k], h)
return y
def F(k, t, y):
return b * (eb[k] - y)
def Euler(k, t, y, h):
return F(k, t, y)
def Heun(k, t, y, h):
return (F(k, t, y) + F(k + 1, t + h, y + h * F(k, t, y))) / 2
donnees = np.loadtxt('Filtrage_fort.csv', delimiter=';')
n, p = np.shape(donnees)
t0, T = 0, 1
y0 = 0
t, h = np.linspace(t0, t0 + T, num=n + 1, retstep=True)
b = 1000
eb = donnees[:, 0] # données brutes
ef = donnees[:, 1] # données filtrées
plt.grid()
plt.title('Comparaison des 2 méthodes à partir de données discrètes')
plt.xlabel('Valeurs de $t$')
plt.ylabel('Valeurs de $y$')
t = X(t0, T, h)
plt.plot(t, Y(t0, T, h, y0, Euler), 'b:', linewidth=2, label='Euler')
plt.plot(t, Y(t0, T, h, y0, Heun), 'y--', linewidth=2, label='Heun')
plt.plot(t, eb, 'k-.', linewidth=3, label='Entrée brute')
plt.plot(t, ef, 'k-', linewidth=1, label='Entrée filtrée')
plt.legend(loc=0)
plt.show()
os.system("pause")
| gpl-3.0 |
pmaunz/pyqtgraph | pyqtgraph/widgets/MatplotlibWidget.py | 4 | 1576 | from ..Qt import QtGui, QtCore, USE_PYSIDE, USE_PYQT5
import matplotlib
if not USE_PYQT5:
if USE_PYSIDE:
matplotlib.rcParams['backend.qt4']='PySide'
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
try:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
except ImportError:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
else:
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class MatplotlibWidget(QtGui.QWidget):
"""
Implements a Matplotlib figure inside a QWidget.
Use getFigure() and redraw() to interact with matplotlib.
Example::
mw = MatplotlibWidget()
subplot = mw.getFigure().add_subplot(111)
subplot.plot(x,y)
mw.draw()
"""
def __init__(self, size=(5.0, 4.0), dpi=100):
QtGui.QWidget.__init__(self)
self.fig = Figure(size, dpi=dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addWidget(self.toolbar)
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
def getFigure(self):
return self.fig
def draw(self):
self.canvas.draw()
| mit |
harry0519/nsnqt | nsnqtlib/strategies/FundBGRIDStrategy.py | 1 | 19601 | # -*- coding:utf-8 -*-
from nsnqtlib.strategies.strategy import basestrategy,reportforms
import pandas as pd
import tushare as ts
from datetime import datetime
class FundBstrategy(basestrategy):
'''
重写买入条件和卖出条件,
'''
def __init__(self,startdate=(2011, 1, 1),enddate=[],emafast=12,emaslow=26,demday=9):
self.pre_MA = False
self.curr_MA = False
self.buyprice = 0
self.startingprice = 0
self.buytimes = 0
self.selltimes = 0
self.bought = False
self.tempstatus = []
self.collection = ''
self.procedurevol = ["stock", "date", "close", "startprice", "buytimes", "selltimes"]
super(FundBstrategy, self).__init__(startdate, enddate)
# 获取需要交易股票列表
def import_stocklist(self, stocklistname):
df = pd.read_csv(str(stocklistname) + '.csv')
#df = pd.read_csv(str(stocklistname) + '.csv', parse_dates=['startdate'])
df['stock'] = df['stock'].astype('str')
count = 0
df_len = len(df.index)
while (count < df_len):
stock_name = str(df.iat[count, 0])
if len(stock_name) == 1:
stock_name = '00000' + stock_name
df.iat[count, 0] = stock_name
elif len(stock_name) == 2:
stock_name = '0000' + stock_name
df.iat[count, 0] = stock_name
elif len(stock_name) == 3:
stock_name = '000' + stock_name
df.iat[count, 0] = stock_name
elif len(stock_name) == 4:
stock_name = '00' + stock_name
df.iat[count, 0] = stock_name
elif len(stock_name) == 5:
stock_name = '0' + stock_name
df.iat[count, 0] = stock_name
count = count + 1
return df
def setlooplist(self,lst=[]):
if not lst:
self.looplist = self.m.getallcollections("etf")
else:
self.looplist = lst
return self.looplist
def _getdata(self,collection="600455.SH",db="ml_security_table",out=[],isfilt=True,filt={}):
self.collection = collection
if db == "tushare":
#d1 = datetime.datetime.now()
#d2 = d1 + datetime.timedelta(-240)
#d1 = d1.strftime('%Y-%m-%d')
#d2 = d2.strftime('%Y-%m-%d')
#query = ts.get_hist_data(collection, start=d2, end=d1, )
query = ts.get_hist_data(collection, start='2012-01-01', end='2017-02-03')
query['date'] = query.index
query = query.sort_index(axis=0, ascending=True)
query['pre_close'] = query['close'].shift(1)
query.to_csv(collection + 'new.csv')
return query
elif db == 'local':
query = pd.read_csv(str(collection) + '.csv')
#out = self.formatlist
return query
else:
if not out: out = self.formatlist
if isfilt and not filt: filt = {"date": {"$gt": self.startdate}}
query = self.m.read_data(db, collection, filt=filt)
#query.to_csv(collection)
#df = pd.DataFrame(query)
#df.to_csv(collection+'new.csv')
print(query)
print('downloaded')
return self.formatquery(query, out)
def historyreturn(self, collection, par):
trading_record = []
holding_record = []
data = self._getdata(collection,'ab')
print(data)
#data = self._getdata(collection, "tushare")
#df = pd.DataFrame(data)
#df.to_csv(collection+'new.csv')
#datalen = len(data)
#if datalen <= 200: return trading_record, holding_record, False
self.selltimes = 0
self.buytimes = 0
self.startingprice = 0
self.bought = False
lst = [l for l in data[self.formatlist].fillna(0).values if l[1] != 0]
count = 0
for line in lst[:]:
isbuy = self.buy(lst, count, par)
for b in holding_record[:]:
issell, traderecord = self.sell(lst, count, b)
if issell:
holding_record.remove(b)
trading_record.append(traderecord)
break
if isbuy:
print(collection)
holding_record.append(([i for i in line], count, collection))
print(count)
count += 1
self.trading_records = trading_record
self.savetrading2db(db="regressiontraderesult", strategyname="strategytradersult")
return trading_record, holding_record
#返回值的最后一个是代表是否有超过200个交易日的数据
def dailyupdate(self, collection, par):
trading_record = []
holding_record = []
out = ["stock", "date", "close", "startprice", "buytimes", "selltimes"]
#下载股票数据
data = self._getdata(collection, 'ab')
#data = self._getdata(collection,"tushare")
datalen = len(data)
if datalen <= 200: return trading_record, holding_record, False
lst = [l for l in data[self.formatlist].fillna(0).values if l[1] != 0]
#下载历史交易数据
df = self._getdata(collection, "etfgrid", out=out, isfilt=False)
df_len = len(df.index) - 1
#df.to_csv(collection+'new_df.csv')
self.selltimes = df['selltimes'].iloc[df_len].astype('float64')
#self.selltimes = 0
#print('selltimes:' + str(self.selltimes))
self.buytimes = df['buytimes'].iloc[df_len].astype('float64')
#self.buytimes = 0
#print('buytimes:' + str(self.buytimes))
self.startingprice = df['startprice'].iloc[df_len].astype('float64')
#self.startingprice = 0
#print('startingprice:' + str(self.startingprice))
self.bought = False
if self.buytimes > 0:
self.bought = True
count = datalen-10
for line in lst[datalen-10:datalen]:
#print(line)
isbuy = self.buy(lst, count, par)
for b in holding_record[:]:
issell, traderecord = self.sell(lst, count, b)
if issell:
holding_record.remove(b)
trading_record.append(traderecord)
break
if isbuy:
print(collection)
holding_record.append(([i for i in line], count, collection))
print(count)
count += 1
return trading_record, holding_record, True
def looplist_historyreturn(self, df, actiontype="regression"):
buy = []
sell = []
buylist = []
selllist = []
error_list = []
count = 0
df_len = len(df.index)
column_num = len(df.count())
#df_len = 2
while (count < df_len):
columncount = 1
par = []
while (columncount < column_num):
par.append(df.iat[count, columncount])
columncount = columncount + 1
print(par)
#stock_name = str(df.iat[count, 'stock'])
stock_name = str(df.ix[count, 'stock'])
self.lateststatus = []
#try:
if actiontype == 'regression':
tr,hr = self.historyreturn(stock_name, par)
#self.lateststatus.append(self.tempstatus)
self.trading_records.extend(tr)
self.holding_records.extend(hr)
self.saveprocedure2db(collection=stock_name)
elif actiontype == 'dailyupdate':
tr,hr = self.dailyupdate(stock_name, par)
#self.lateststatus.append(self.tempstatus)
self.trading_records.extend(tr)
self.holding_records.extend(hr)
self.saveprocedure2db(collection=stock_name)
elif actiontype == 'trade':
print(stock_name)
#stock_name = stock_name[0:6]
#print(stock_name)
buy, sell = self.getprocedure(isdb=True, collection=stock_name)
buylist.extend(buy)
selllist.extend(sell)
#print(buy)
#print(sell)
#except:
#error_list.append(stock_name)
count = count + 1
print(error_list)
print('buylist:')
print(buylist)
print('selllist:')
print(selllist)
return self.trading_records,self.holding_records, buylist, selllist
def buy(self, lst, count, par):
''' input:
line: [] ,row data in every stock data,default is in self.formatlist = ["date","volume","close","high","low","open","pre_close"]
count: float, the number to the row since first row
ouput:
bool, can buy or not buy
[], buy record,if can't buy,is empty list
'''
rst = False
#vol_day = 10
#price_day = 60
#vol_weight = 1.2
#count = len(lst)
#if count <= 200: return rst
#if count <= 200: return False
dat = lst[count][0]
close = lst[count][2]
pre_close = lst[count][6]
#position = self.getposition(lst,dat)
#print(position)
#position = lst_index[0] / lst_len
#print(position)
#lst[count][6] = position
#and self.condition7(close, par[0]) and self.condition9(close, pre_close)
#if self.condition10(close) and self.condition9(close, pre_close) and self.MA_condition(lst, count):
if self.ETFGridbuycondition2(close, float(par[0])) and self.bought == False:
#self.startingprice = close
self.startingprice = float(par[0])
#print('startingprice'+str(self.startingprice)+' ')
#print('statingdate'+str(dat))
if self.ETFGridbuycondition1(close, self.startingprice, self.buytimes) and self.startingprice > 0:
#print('buy:'+str(close))
self.buytimes = self.buytimes + 1
self.bought = True
rst = True
self.setprocedure(lst, count)
self.lateststatus.append(self.tempstatus)
return rst
def sell(self, lst, count, buyrecord):
currentday_high = lst[count][3]
gain_grads = 0.2
loss_grads = -0.05
dayout = 60
currentday_low = lst[count][4]
sell_date = lst[count][0]
close = lst[count][2]
high = lst[count][3]
low = lst[count][4]
buy_price = buyrecord[0][2]
hold_days = count - buyrecord[1]
#hold_days = float(holdd)
buy_date = buyrecord[0][0]
collection = buyrecord[2]
#if self.holdingtime_condition(hold_days, dayout) or self.ETFGridsellcondition1(high, self.startingprice, self.selltimes):
if self.ETFGridsellcondition1(high, self.startingprice, self.selltimes):
sell_date = sell_date.strftime('%Y-%m-%d')
buy_date = buy_date.strftime('%Y-%m-%d')
self.selltimes = self.selltimes + 1
self.buytimes = self.buytimes - 1
print('sell date:'+str(sell_date)+' sell price:'+str(close))
print('sell times:'+str(self.selltimes))
print('buytimes @ sell: '+str(self.buytimes))
if self.buytimes == 0:
self.bought = False
self.startingprice = 0
self.selltimes = 0
print('self.bought: False')
#if self.selltimes > 5:
#self.selltimes = 0
return True, [collection, buy_date, sell_date, hold_days, (close - buy_price) / buy_price, '']
return False, None
def getposition(self,lst, dat):
count = len(lst)
#print(count)
if count <= 200: return False
new_lst = lst.copy()
new_lst.sort(key=lambda x: x[3])
l = [x[0] for x in new_lst]
lst_index = self.find_index(l, dat)
lst_len = len(l)
position = lst_index[0] / lst_len
#lst[count][6] = position
return position
#取实时数据,根据历史回测数据比较是否存在交易机会
def getprocedure(self, filename="procedure_records.csv", isdb=False, collection="processstatus", db="etfgrid"):
'''"stock","date","data","s_ema","f_ema","diff","dem","macd","status"
'''
buy = []
sell = []
newlist = []
newdatalist = []
out = ["stock", "date", "close", "startprice", "buytimes", "selltimes"]
if isdb:
#df = self._getdata(collection, db, out=out, isfilt=False)[out]
df = self._getdata(collection, db, out=out, isfilt=False)
#print(df)
else:
#df = pd.read_csv(filename)[out]
df = pd.read_csv(filename)
#df.to_csv(collection)
#datalen = len(df.index)
#if datalen < 200: return buy, sell, False
#print('downloaded')
#print(df)
print(df)
print(collection)
stock = str(df['stock'].iloc[0])
print('stock name:')
print(stock)
print('realtime quetes:')
print(collection[0:6])
collection = collection[0:6]
new_df = ts.get_realtime_quotes(collection)
#print(new_df)
price = float(new_df['ask'].iloc[0])
high = float(new_df['high'].iloc[0])
#price = 0.89
df_len = len(df.index) - 1
# if df_len < 200: return buy, sell
startprice = df['startprice'].iloc[df_len]
buynumber = df['buytimes'].iloc[df_len]
sellnumber = df['selltimes'].iloc[df_len]
print('startprice:'+str(startprice))
print('buynumber:' + str(buynumber))
print('sellnumber:' + str(sellnumber))
if buynumber == 0:
if price < startprice:
buy.append(collection)
elif self.ETFGridbuycondition1(price, startprice, buynumber) and buynumber > 0:
#elif price < startprice:
buy.append(collection)
elif self.ETFGridsellcondition1(high, startprice, sellnumber):
sell.append(collection)
#print(buy)
return buy, sell
def setprocedure(self, lst, count):
dat = lst[count][0]
close = lst[count][2]
self.tempstatus = [self.collection, dat, close, self.startingprice, self.buytimes,self.selltimes]
def saveprocedure(self,filename="procedure_records.csv"):
df = pd.DataFrame(self.lateststatus,columns=self.procedurevol)
#df = pd.DataFrame(self.lateststatus)
df.to_csv(filename)
return
def saveprocedure2db(self, db="etfgrid", collection="processstatus"):
self.lateststatus
#print('daily update data:')
#print(self.lateststatus)
db = eval("self.m.client.{}".format(db))
bulk = db[collection].initialize_ordered_bulk_op()
for line in self.lateststatus:
bulk.find({'date': line[1]}).upsert().update( \
{'$set': {'stock': line[0], \
'date': line[1], \
'close': line[2], \
'startprice': line[3], \
'buytimes': line[4], \
'selltimes': line[5], \
}})
bulk.execute()
return
'''
def savetraderecord2db(self, db="etfgrid", collection="traderecords"):
db = eval("self.m.client.{}".format(db))
bulk = db[collection].initialize_ordered_bulk_op()
for line in self.lateststatus:
bulk.find({'date': line[1]}).upsert().update( \
{'$set': {'stock': line[0], \
'close': line[2], \
'startprice': line[3], \
'buytimes': line[4], \
'selltimes': line[5], \
}})
bulk.execute()
return
'''
'''
def savetraderecord2db(self, data, db="etfgrid", collection="traderecords"):
if not data: return
localbackup = []
db = eval('self.m.client.{}'.format(db))
print(data)
for line in data:
#buydate = line[1]
#buydate = buydate.strftime('%Y-%m-%d')
localbackup.append({"stock": '500123', "buy_date": line[1], "sell_date": line[2], "holddays": " ", "profit":"", "features":""})
db[collection].insert_many(localbackup)
print('save trade record to db')
return
'''
def stopgain_condition(self, buy_price, current_price, grads=0.1):
if (current_price - buy_price) / buy_price >= grads:
return True
return False
def stoploss_condition(self, buy_price, current_price, grads=-0.05):
if (current_price - buy_price) / buy_price <= grads:
return True
return False
def holdingtime_condition(self, hold_days, dayout=10):
if hold_days >= dayout:
return True
return False
def ETFGridbuycondition1(self, close, startingprice, buytime):
if close <= startingprice *(1- buytime * 0.05) and buytime < 6:
#print(self.buytimes)
#print(str(startingprice *(1- self.buytimes * 0.05)))
return True
return False
def ETFGridbuycondition2(self, price, startprice):
if price < startprice:
return True
return False
def ETFGridsellcondition1(self, close, startingprice, selltime):
#if close > startingprice * (1.1 + selltime*0.05) and self.bought == True:
if close > startingprice * 1.1 and self.bought == True:
#print('sell times: '+str(self.selltimes))
return True
return False
def find_index(self, arr, item):
return [i for i, a in enumerate(arr) if a == item]
if __name__ == '__main__':
s = FundBstrategy()
#df_stocklist = s.import_stocklist("fundb")
#print(df_stocklist)
formatlist = ['stock', 'startprice']
df_stocklist = s._getdata('FundBstrategy', 'strategyconfig',formatlist,isfilt=False)
df_stocklist = df_stocklist[['stock', 'startprice']]
#print(newdf)
print(df_stocklist)
#stocklst = s.setlooplist()
#df_stocklist = pd.DataFrame(s.setlooplist())
#stock = df_stocklist.iat[0,0]
#print("test:"+stock)
#df.iat[count, 0]
#s.setlooplist()'
s.looplist_historyreturn(df_stocklist,actiontype="regression")
s.savetrading2csv()
s.savetrading2db(strategyname='FundBGrid')
#data = s._getdata('traderecords')
#print('trade records:')
#print(data)
#s.saveholding2csv()
'''
df = pd.read_csv('trading_records.csv')
report = reportforms(df)
report.cumulative_graph()
report.positiongain(20)
'''
#buy = s.getprocedure('procedure_records.csv')
#print(s.tempstatus)
#print(s.lateststatus)
#df = pd.DataFrame(s.lateststatus)
#df.to_csv('lateststatus.csv')
#s.saveprocedure()
#s.saveprocedure2db(collection=stock)
#new_df = ts.get_realtime_quotes(stock)
#print(new_df)
#print(new_df['ask'].iloc[0])
#print(new_df.ix[0, 'ask'])
#print(new_df[['code','name','price','bid','ask','volume','amount','time']])
'''
ls = s.getcurrentdata()
new_df = pd.DataFrame(ls)
new_df.to_csv('new_df.csv')
print(ls)
'''
#df = pd.read_csv('trading_records.csv')
#report = reportforms(df)
#report.cumulative_graph()
#report.positiongain(100)
| bsd-2-clause |
philouc/pyhrf | python/pyhrf/tools/misc.py | 1 | 64867 | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import os
import os.path as op
import sys
import numpy as np
import scipy.linalg
import scipy.signal
import string
import cPickle
import hashlib
import gzip
from itertools import izip
import datetime
import inspect
from time import time
import re
from collections import defaultdict
import pyhrf
try:
from joblib.memory import MemorizedFunc
except ImportError:
class MemorizedFunc: pass #Dummy class
try:
from itertools import product as iproduct
except ImportError:
def iproduct(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def is_importable(module_name, func_name=None):
""" Return True if given *module_name* (str) is importable """
try:
__import__(module_name)
except ImportError:
return False
else:
if func_name is None:
return True
else:
return func_name in dir(module_name)
def report_arrays_in_obj(o):
for a in dir(o):
attr = eval('o.%s' %a)
if isinstance(attr, np.ndarray):
print a,'->', attr.shape, attr.dtype, '[id=%s]' %str(id(attr))
elif (isinstance(attr, list) or isinstance(attr, tuple)) and len(attr)>0 and \
isinstance(attr[0], np.ndarray):
print a, '-> list of %d arrays (first array: %s, %s), [id=%s]' \
%(len(attr), str(attr[0].shape), str(attr[0].dtype),str(id(attr)))
elif isinstance(attr, dict) \
and len(attr)>0 and isinstance(attr[attr.keys()[0]], np.ndarray):
print a, 'is a dict of arrays comprising:'
for k,v in attr.iteritems():
print k, '->'
report_arrays_in_obj(v)
print 'end of listing dict "%s"' %a
def has_ext(fn, ext):
if fn.endswith('gz'):
return fn.split('.')[-2] == ext
else:
return fn.split('.')[-1] == ext
def replace_ext(fn, ext):
if fn.endswith('gz'):
return '.'.join(fn.split('.')[:-2]+[ext,'gz'])
else:
return '.'.join(fn.split('.')[:-1]+[ext])
def add_suffix(fn, suffix):
""" Add a suffix before file extension.
>>> add_suffix('./my_file.txt', '_my_suffix')
'./my_file_my_suffix.txt'
"""
if suffix is None:
return fn
sfn = op.splitext(fn)
if sfn[1] == '.gz':
sfn = op.splitext(fn[:-3])
sfn = (sfn[0], sfn[1] + '.gz')
return sfn[0] + suffix + sfn[1]
def add_prefix(fn, prefix):
""" Add a prefix at the beginning of a file name.
>>> add_prefix('./my_file.txt', 'my_prefix_')
'./my_prefix_.txt'
"""
if prefix is None:
return fn
sfn = op.split(fn)
if sfn[1] == '.gz':
sfn = op.splitext(fn[:-3])
sfn = (sfn[0], sfn[1] + '.gz')
return op.join(sfn[0], prefix + sfn[1])
def assert_path_not_in_src(p):
p = op.realpath(p)
src_path = op.realpath(op.join(op.dirname(pyhrf.__file__),'../../'))
if op.commonprefix([p, src_path]) == src_path:
raise Exception('Directory %s is in source path' %p)
def assert_file_exists(fn):
if not op.exists(fn):
raise Exception('File %s does not exists' %fn)
def non_existent_canditate(f, start_idx=1):
yield f
i = start_idx
while True:
yield add_suffix(f,'_%d' %i)
i += 1
def non_existent_file(f):
for f in non_existent_canditate(f):
if not op.exists(f):
return f
# s = suffix_int()
# in_f = f
# for f in
# while op.exists(f):
# print f, 'exists'
# f = add_suffix(in_f,s.next())
# return f
def do_if_nonexistent_file(*dargs, **kwargs):
force = kwargs.get('force', False)
vlevel = kwargs.get('verbose', 1)
def wrapper(func):
def wrapper(*args,**kwargs):
if force:
return func(*args,**kwargs)
ins_a,_,_, d = inspect.getargspec(func)
# print 'args:', args
# print 'ins_a:', ins_a
# print 'func.func_code.co_varnames;', func.func_code.co_varnames
do_func = False
checked_fns = []
for a in dargs:
if a in kwargs:
fn = kwargs[a]
else:
try:
iarg = func.func_code.co_varnames.index(a)
fn = args[iarg]
# print 'iarg:', iarg
except (IndexError, ValueError):
try:
la, ld = len(ins_a), len(d)
i = ins_a[la-ld:].index(a)
except (IndexError, ValueError):
msg = 'Error when defining decorator '\
'do_if_nonexistent_file: '\
'"%s" is not a valid '\
'argument of func %s' \
%(a,func.func_name)
raise Exception(msg)
fn = d[i]
if not isinstance(fn, str):
raise Exception('Arg %s should be a string, %s found'\
%(fn, type(fn)))
if not op.exists(fn):
pyhrf.verbose(vlevel, 'func %s executed because file "%s" does'\
' not exist' %(func.func_name, fn))
do_func = True
break
checked_fns.append(fn)
if do_func:
return func(*args,**kwargs)
pyhrf.verbose(vlevel, 'func %s not executed because file(s) '\
'exist(s)' %func.func_name)
pyhrf.verbose(vlevel+1, '\n'.join(['-> '+ f for f in checked_fns]))
return None
return wrapper
return wrapper
def cartesian(*sequences):
"""
Generate the "cartesian product" of all 'sequences'. Each member of the
product is a list containing an element taken from each original sequence.
Note: equivalent to itertools.product, which is at least 2x faster !!
"""
length = len(sequences)
if length < 5:
# Cases 1, 2 and 3, 4 are for speed only, these are not really required.
if length == 4:
for first in sequences[0]:
for second in sequences[1]:
for third in sequences[2]:
for fourth in sequences[3]:
yield [first, second, third, fourth]
elif length == 3:
for first in sequences[0]:
for second in sequences[1]:
for third in sequences[2]:
yield [first, second, third]
elif length == 2:
for first in sequences[0]:
for second in sequences[1]:
yield [first, second]
elif length == 1:
for first in sequences[0]:
yield [first]
else:
yield []
else:
head, tail = sequences[:-1], sequences[-1]
for result in cartesian(*head):
for last in tail:
yield result + [last]
def cartesian_combine_args(varying_args, fixed_args=None):
"""
Construst the cartesian product of varying_args and append fixed_args to it.
'varying_args': Specify the arguments which are varying as a dict mapping
arg names to iterables of arg values.
e.g:
{ 'my_arg1' : ['a','b','c'],
'my_arg2' : [2, 5, 10],
}
'fixed_args' : Specify the argument which remain constant as a dict mapping
arg names to arg values
e.g:
{ 'my_arg3' : ['fixed_value'] }
Example:
>>> from pyhrf.tools import cartesian_combine_args
>>> vargs = {
'my_arg1' : ['a','b','c'],
'my_arg2' : [2, 5, 10],
}
>>> fargs = { 'my_arg3' : 'fixed_value' }
>>> cartesian_combine_args(vargs, fargs)
[{'my_arg1': 'a', 'my_arg2': 2, 'my_arg3': 'fixed_value'},
{'my_arg1': 'b', 'my_arg2': 2, 'my_arg3': 'fixed_value'},
{'my_arg1': 'c', 'my_arg2': 2, 'my_arg3': 'fixed_value'},
{'my_arg1': 'a', 'my_arg2': 5, 'my_arg3': 'fixed_value'},
{'my_arg1': 'b', 'my_arg2': 5, 'my_arg3': 'fixed_value'},
{'my_arg1': 'c', 'my_arg2': 5, 'my_arg3': 'fixed_value'},
{'my_arg1': 'a', 'my_arg2': 10, 'my_arg3': 'fixed_value'},
{'my_arg1': 'b', 'my_arg2': 10, 'my_arg3': 'fixed_value'},
{'my_arg1': 'c', 'my_arg2': 10, 'my_arg3': 'fixed_value'}]
"""
if fixed_args is None:
fixed_args = {}
return [dict(zip(varying_args.keys(),vp) + fixed_args.items()) \
for vp in iproduct(*varying_args.values())]
def icartesian_combine_args(varying_args, fixed_args=None):
"""
Same as cartesian_combine_args but return an iterator over the
list of argument combinations
"""
if fixed_args is None:
fixed_args = {}
return (dict(zip(varying_args.keys(),vp) + fixed_args.items()) \
for vp in iproduct(*varying_args.values()))
def cartesian_apply(varying_args, func, fixed_args=None):
args_iter = icartesian_combine_args(varying_args, fixed_args)
return [func(**kwargs) for kwargs in args_iter]
def format_duration(dt):
s = ''
if dt/3600 >= 1:
s += '%dH' %int(dt/3600)
dt = dt%3600
if dt/60 >= 1:
s += '%dmin' %int(dt/60)
dt = int(dt%60)
s += '%1.3fsec' %dt
return s
import pyhrf.ndarray
def swapaxes(array, a1, a2):
if isinstance(array, np.ndarray):
return np.swapaxes(array, a1, a2)
elif isinstance(array, pyhrf.ndarray.xndarray):
return array.swapaxes(a1, a2)
else:
raise Exception('Unknown array type: %s' %str(type(array)))
def rescale_values(a, v_min=0., v_max=1., axis=None):
a = a.astype(np.float64) #make sure that precision is sufficient
a_min = a.min(axis=axis)
a_max = a.max(axis=axis)
if axis is not None and axis != 0:
a = np.swapaxes(a, 0, axis) #make target axis be the 1st to enable bcast
res = (v_min - v_max)*1. / (a_min - a_max) * (a - a_max) + v_max
if axis is not None and axis != 0:
res = np.swapaxes(res, 0, axis) #reposition target axis at original pos
return res
def cartesian_params(**kwargs):
keys = kwargs.keys()
for p in cartesian(*kwargs.itervalues()):
yield dict(zip(keys, p))
def cartesian_eval(func, varargs, fixedargs=None):
resultTree = {}
if fixedargs is None:
fixedargs = {}
#print 'varargs.keys():', varargs.keys()
for p in cartesian_params(**varargs):
fargs = dict(p.items() + fixedargs.items())
#print 'fargs:', fargs
#print 'p.keys:', p.keys()
#print 'p.values:', [p[k] for k in varargs.iterkeys()]
set_leaf(resultTree, [p[k] for k in varargs.iterkeys()], func(**fargs))
return varargs.keys(), resultTree
class PickleableStaticMethod(object):
def __init__(self, fn, cls=None):
self.cls = cls
self.fn = fn
self.__name__ = fn.__name__
#self.im_func = fn.im_func
def __call__(self, *args, **kwargs):
if self.cls is None:
return self.fn(*args, **kwargs)
else:
return self.fn(self.cls, *args, **kwargs)
def __get__(self, obj, cls):
return PickleableStaticMethod(self.fn, cls)
def __getstate__(self):
return (self.cls, self.fn.__name__)
def __setstate__(self, state):
self.cls, name = state
self.fn = getattr(self.cls, name).fn
def cuboidPrinter(c):
print c.descrip()
def my_func(**kwargs):
from pyhrf.ndarray import xndarray
return xndarray(np.zeros(kwargs['shape']) + kwargs['val'])
def cartesian_test():
branchLabels, resTree = cartesian_eval(my_func, {'shape':[(2,5),(6,8)],
'val':[4,1.3]})
pprint(resTree)
apply_to_leaves(resTree, cuboidPrinter)
def crop_array(a, m=None, extension=0):
"""
Return a sub array where as many zeros as possible are discarded
Increase bounding box of mask by *extension*
"""
if m is None:
m = np.where(a!=0.)
else:
m = np.where(m!=0)
mm = np.vstack(m).transpose()
#print 'mm', mm
#print mm.ptp(0)
d = np.zeros(tuple(mm.ptp(0)+1+2*extension), dtype=a.dtype)
#print 'tuple((mm-mm.min(0)).transpose()):'
#print tuple((mm-mm.min(0)).transpose())
d[tuple((mm-mm.min(0)+extension).transpose())] = a[m]
return d
def buildPolyMat(paramLFD, n ,dt):
regressors = dt*np.arange(0, n)
timePower = np.arange(0,paramLFD+1, dtype=int)
regMat = np.zeros((len(regressors),paramLFD+1),dtype=float)
pyhrf.verbose(2, 'regMat: %s' %str(regMat.shape))
for v in xrange(paramLFD+1):
regMat[:,v] = regressors[:]
tPowerMat = np.tile(timePower, (n, 1))
lfdMat = np.power(regMat,tPowerMat)
lfdMat = np.array(scipy.linalg.orth(lfdMat))
#print 'lfdMat :', lfdMat
return lfdMat
def polyFit(signal, tr=1., order=5):
"""
Polynomial fit of signals.
'signal' is a 2D matrix with first axis being time and second being position.
'tr' is the time resolution (dt).
'order' is the order of the polynom.
Return the orthogonal polynom basis matrix (P) and fitted coefficients (l),
such that P.l yields fitted polynoms.
"""
n = len(signal)
print 'n:', n, 'tr:', tr
p = buildPolyMat(order, n, tr)
ptp = np.dot(p.transpose(),p)
invptp = np.linalg.inv(ptp)
invptppt = np.dot(invptp, p.transpose())
l = np.dot(invptppt,signal)
return (p, l)
def undrift(signal, tr, order=5):
"""
Remove the low frequency trend from 'signal' by a polynomial fit.
Assume axis 3 of 'signal' is time.
"""
print 'signal:', signal.shape
m = np.where(np.ones(signal.shape[:3]))
sm = string.join(['m[%d]'%d for d in xrange(signal.ndim-1)],',')
signal_flat = eval('signal[%s,:]' %sm)
print 'signal_flat:', signal_flat.shape
# Least square estimate of drift
p,l = polyFit(signal_flat.transpose(), tr, order)
usignal_flat = signal_flat.transpose() - np.dot(p,l)
usignal = np.zeros_like(signal)
print 'usignal:', usignal.shape
exec('usignal[%s,:] = usignal_flat.transpose()' %sm)
return usignal
def root3(listCoeffs):
length = len(listCoeffs)
if length != 4:
raise polyError(listCoeffs,"wrong poly order")
if listCoeffs[0]==0:
raise polyError(listCoeffs[0],"wrong poly order:null coefficient")
a=P[1]/P[0]
b=P[2]/P[0]
c=P[2]/P[0]
#change of variables: z = x -a/3
#Polynome Q(Z)=Z^3 - pZ -q
#np.sqrt(np.complex(-1))
p = a**2/3. -b
q= (a*b)/3. -2./27.*a**3-c
if np.abs(p)<1e-16:
polycoeffs = np.zeros((1,3),dtype=complex)
polycoeffs[0] = 1
polycoeffs[1] = (1j)**(4/3.)
polycoeffs[2] = (-1j)**(4/3.)
rp = np.multiply(polycoeffs,(np.sign(q)*q)**(1/3.)) - a/3.
elif p<0:
t2 = 2*p/3./q*np.sqrt(-p/3.)
tho = ( (np.sqrt(1.+t2**2) -1)/t2)**(1/3.)
tho2 = 2.*tho/(1-tho**2)
tho3 = 2.*tho/(1+tho**2)
rp = - a/3.*np.ones((1,3),dtype=complex)
fracTho2 = np.sqrt(-p/3.)/tho2
fracTho3 = np.sqrt(-p)/tho3
rp[0] += -2.*fracTho2
rp[1] += fracTho2 + 1j*fracTho3
rp[2] += fracTho2 - 1j*fracTho3
else:
if np.abs((p/3.)**3 - q**2/2.)<1e-16:
rp = - a/3.*np.ones((1,3),dtype=float)
rp[0] +=-3 *q/2./p
rp[1] +=-3 *q/2./p
rp[2] += 3.* q/p
def gaussian_kernel(shape):
""" Returns a normalized ND gauss kernel array for convolutions """
grid = eval('np.mgrid[%s]'%(string.join(['-%d:%d+1' %(s,s) for s in shape],',')))
k = 1./np.prod([np.exp((grid[d]**2/float(shape[d]))) for d in xrange(len(shape))],axis=0)
return k / k.sum()
def gaussian_blur(a, shape):
assert a.ndim == len(shape)
k = gaussian_kernel(shape)
return scipy.signal.convolve(a, k, mode='valid')
def foo(*args, **kwargs):
pass
class polyError(Exception):
def __init__(self, expression,message):
self.expression = expression
self.message = message
def convex_hull_mask(mask):
"""
Compute the convex hull of the positions defined by the given binary mask
Args:
- mask (numpy.ndarray): binary mask of positions to build the chull from
Return:
a numpy.ndarray binary mask of positions within the convex hull
"""
from scipy.spatial import Delaunay
hull = Delaunay(np.array(np.where(mask)).T)
result = np.zeros_like(mask)
m = np.where(np.ones_like(mask))
result[m] = hull.find_simplex(np.array(m).T)>=0
return result
def peelVolume3D(volume, backgroundLabel=0):
# Make sure that boundaries are filled with zeros
# -> expand volume with zeros:
vs = volume.shape
expVol = np.zeros((vs[0]+2, vs[1]+2, vs[2]+2), dtype=int)
expVol[1:-1, 1:-1, 1:-1] = volume!=backgroundLabel
# 27-neighbourhood mask:
neighbMask = np.array([c for c in cartesian([0,-1,1],
[0,-1,1],
[0,-1,1])][1:])
mask = np.where(expVol!=0)
coords = np.array(mask).transpose()
# For each position, compute the number of valid neighbours:
marks = np.zeros_like(expVol)
for iv in xrange(len(mask[0])):
cn = (neighbMask + coords[iv]).transpose()
curNeighbMask = (cn[0], cn[1], cn[2])
marks[mask[0][iv],mask[1][iv],mask[2][iv]] = expVol[curNeighbMask].sum()
# Let's go back to the original volume shape:
trimmedMarks = marks[1:-1,1:-1,1:-1]
# Keep only positions which have 26 neighbours (completely burried):
peeledVolume = np.zeros_like(volume)
validPos = np.where(trimmedMarks==26)
peeledVolume[validPos] = volume[validPos]
return peeledVolume
def distance(c1, c2, coord_system=None):
#TODO: use coordinate system (xform)
return ((c1-c2)**2).sum()**.5
def inspect_default_args(args, defaults):
if defaults is None:
return {}
kw_defaults = {}
firstdefault = len(args) - len(defaults)
for i in range(firstdefault, len(args)):
kw_defaults[args[i]]= defaults[i - firstdefault]
return kw_defaults
class Pipeline:
THE_ROOT = 0
def __init__(self, quantities): #, cached=False, cache_dir='./'):
"""
Handles a graph of quantities. A quantity can be a variable or
a callable.
"""
self.roots = set([]) # will store roots, ie quantities
# which have no dependency
self.quantities = {} # mapping value_label => compute function
# will hold all labels:
self.labels = set()
self.siblings = {} # sibling labels associated to the same quantity
# -> eg when a func returns multiple values
for l, q in quantities.iteritems():
if isinstance(l, (tuple,list)):
for e in l:
self.quantities[e] = q
self.siblings[e] = l
self.labels.update(l)
else:
self.quantities[l] = q
self.siblings[l] = (l,)
self.labels.add(l)
pyhrf.verbose(4, 'labels at init: %s (%d)' \
%(str(self.labels),len(self.labels)))
#self.load_from_cache = dict([ (label,False) for label in self.labels ])
#self.force_eval = dict([ (label,False) for label in self.labels ])
self.dependencies = dict( (l,set()) for l in self.labels )
self.dependers = dict( (l,set()) for l in self.labels )
#print 'dependers at init:', self.dependers
self.dependers[self.THE_ROOT] = set() # virtual common root of the forest
self.values = {} # will hold all values
# self.cached = cached
# self.cache_dir = cache_dir
#assert len(self.labels) == len(quantities)
self.init_dependencies(quantities)
def add_root(self, label):
self.roots.add(label)
# Virtually plant the forest at a common root:
self.dependers[self.THE_ROOT].add(label)
def init_dependencies(self, quantities):
pyhrf.verbose(3,'Pipeline.init_dependencies ...')
for labels, val in quantities.iteritems():
if not isinstance(labels, (list, tuple)):
labels = (labels,)
pyhrf.verbose(4,'treating quantities: %s' %str(labels))
pyhrf.verbose(4,'val: %s' %str(val))
func = self.get_func(val)
if func is not None:
pyhrf.verbose(4, '... is a function')
arg_spec = inspect.getargspec(func)
args = arg_spec[0]
#print 'args:', args
for label in labels:
#print 'treating label:', label
assert label not in args
for arg in args:
#print 'arg:', arg
if self.dependers.has_key(arg):
#print '-> in dependers!'
self.dependencies[label].add(arg)
self.dependers[arg].add(label)
else:
# print arg
# print len(args)-len(arg_spec[3])
# print 'args:', args
# print 'arg_spec[3]:', arg_spec[3]
# print 'args[len(args)-len(arg_spec[3]):]'
# print args[len(args)-len(arg_spec[3]):]
if arg_spec[3] is None or \
arg not in args[len(args)-len(arg_spec[3]):]:
raise Exception('arg "%s" of function "%s" ' \
'undefined (no quantity found' \
' or no default value)' \
%(arg, val.__name__))
#print '-> current dependers', self.dependers
#print '-> current dependencies', self.dependencies
if len(self.dependencies[label]) == 0:
self.add_root(label)
else:
pyhrf.verbose(4, '... is of type %s' %val.__class__)
#self.load_from_cache.pop(label) #It won't be cached
for label in labels:
self.add_root(label)
if 0:
print 'dependers:'
print self.dependers
print 'dependencies:'
print self.dependencies
print 'roots:'
print self.roots
print 'self.quantities:'
print self.quantities
self.checkGraph()
def save_graph_plot(self, image_filename, images=None):
import pygraphviz as pgv
g = pgv.AGraph(directed=True)
for label in self.labels:
for deper in self.dependers[label]:
#print 'label:', label, 'deper:', deper
g.add_edge(label, deper)
for label in self.roots:
try:
n = g.get_node(label)
n.attr['shape'] = 'box'
except Exception, e:
print e
pass
if images is not None:
blank_image = pyhrf.get_data_file_name('empty.png')
#image = images.values()[0]
for label in self.labels:
n = g.get_node(label)
if images.has_key(label):
n.attr['image'] = images[label]
#n.attr['imagescale'] = '4'
n.attr['labelloc'] = 'b'
else:
n.attr['image'] = blank_image
g.layout('dot')
g.draw(image_filename)
def update_subgraph(self, root):
#TODO : limit init of force_eval only to quantities involved
# in current subgraph
self.force_eval = dict([ (label,False) for label in self.labels ])
depths = {}
for lab in self.labels:
depths[lab] = -1 # mark as not visited
self.setDepths(root, depths, 0)
#print 'depths:', depths
maxDepth = max(depths.values())
levels = range(maxDepth+1)
levels = [[] for d in xrange(maxDepth+1)]
for lab,depth in depths.iteritems():
if depth != -1 :
levels[depth].append(lab)
updated = dict((l,False) for l in self.labels)
for level in levels:
for lab in level:
self.update_quantity(lab, updated)
# def updated_from_cache(self):
# print 'load_from_cache:', self.load_from_cache
# return all(self.load_from_cache.values())
def setDepths(self, label, depths, curDepth):
# print 'setDepths ...'
# print 'label:', label
# print 'curDepth:', curDepth
for deper in self.dependers[label]:
# if depth not yet set for this node
# or
# if a deeper path has been found to reach this node :
if depths[deper] == -1 or depths[deper] < curDepth:
depths[deper] = curDepth
self.setDepths(deper, depths, curDepth+1)
def resolve(self):
self.update_subgraph(self.THE_ROOT)
def get_value(self, label):
"""
Return the value associated with 'label'
"""
if len(self.values) == 0:
self.resolve()
return self.values[label]
def get_values(self):
"""
Return all computed values. Perform a full update if not done yet.
"""
if len(self.values) == 0:
self.resolve()
return self.values
def reportChange(rootLabel):
"""
Trigger update of the sub graph starting at the given root
"""
assert rootLabel in self.roots
#pyhrf.verbose(5,
# '%s reported as changed ... spreading the news ...'
# %rootLabel)
self.update_subgraph(rootLabel)
def reprAllDeps(self):
"""
Build a string representing the while graph : a concatenation of
representations of all nodes (see reprDep)
"""
s = ""
for lab in self.labels:
s += self.reprDep(lab)
s += '\n'
return s
def reprDep(self, label):
"""
Build a string representing all dependencies and dependers of the
variable 'label'. The returned string is in the form :
label
depee1 <-
depee2 <-
-> deper1
-> deper2
"""
deps = self.dependencies[label]
if len(deps) > 0:
maxLDepee = max([len(dep) for dep in deps])
else:
maxLDepee = 1
depeeMark = ' <-\n'
s = string.join([string.ljust(' '+dep,maxLDepee) for dep in deps]+[''],
depeeMark)
deps = self.dependers[label]
deperMark = string.rjust('-> ', maxLDepee+len(depeeMark)+1)
s += string.join([deperMark+dep for dep in deps],'\n')
res = '*'+string.rjust(label, maxLDepee)+'*\n'+s
return res
def checkGraph(self):
"""
Check the rightness of the builded graph (acyclicity, uniqueness and no
short-circuits)
"""
# check acyclicity :
self.visited = {}
#print 'detect cycl ...'
for r in self.roots:
#print 'starting from ', r
self.detectCyclity([r])
# Check there is no short-circuit:
depths = {}
for r in self.roots:
for lab in self.labels:
depths[lab] = -1
depths[r] = 0
self.detectShortCircuit(r, 1, depths)
def detectShortCircuit(self, curRoot, curDepth, depths):
"""
Recursive method which detects and corrects short-circuits
"""
## Breadth graph walk :
for deper in self.dependers[curRoot]:
dDeper = depths[deper]
# if depender was visited and its depth is smaller
if dDeper != -1 and dDeper < curDepth:
# Short-circuit detected -> removing it :
self.removeShortCircuits(deper, depths)
# Setting depth to the right one :
depths[dDeper] = curDepth
# Continue walking ...
for deper in self.dependers[curRoot]:
self.detectShortCircuit(deper, curDepth+1, depths)
def removeShortCircuits(self, label, depths):
d = depths[label]
# Look in direction leaf -> root, one level :
for depee in self.dependencies[label]:
dDepee = depths[depee]
# if dependence was not visited and dependence if further than
# depth-1 :
if dDepee != -1 and dDepee != d-1:
# Remove discovered shunt :
print 'removing shunt : %s <-> %s' \
%(depee, label)
self.dependers[depee].remove(label)
self.dependencies[label].remove(depee)
def detectCyclity(self, viewedNodes):
#print 'viewedNodes :', viewedNodes
## Depth graph walk, root->leaf direction :
##if viewedNodes
root = viewedNodes[-1]
#print 'root :', root
for deper in self.dependers[root]:
#print 'deper :', deper
if deper in viewedNodes:
msg = 'Cyclicity detected in dependency graph :\n'+ \
' origin is %s and dep is %s' %(root,deper)
raise Exception(msg)
else:
viewedNodes.append(deper)
self.detectCyclity(viewedNodes)
viewedNodes.pop()
## def hasChanged(self, fifo, visited, infifo):
## print 'hasChanged :', fifo[0]
## ## level by level graph walk, root->leaf direction :
## print 'dependers :', self.dependers[fifo[0]]
## for deper in self.dependers[fifo.p
## opleft()]:
## if deper not in self.roots:
## self.update(deper)
## fifo.appendleft(deper)
## self.hasChanged(fifo)
def get_func(self, f):
if isinstance(f, MemorizedFunc):
return f.func
elif inspect.isfunction(f):
return f
else:
return None
def update_quantity(self, label, updated):
if updated[label]: #already updated
return
pyhrf.verbose(4, " ------------- Update quantity '%s' -----------" \
%label)
quantity = self.quantities[label]
siblings = self.siblings[label]
func = self.get_func(quantity)
if func is not None:
if pyhrf.verbose.verbosity > 4:
pyhrf.verbose(4, " -> %s" %str(func))
t0 = time()
fargs = {}
args,_,_,d = inspect.getargspec(func)
defaults = inspect_default_args(args,d)
#print "defaults:", defaults
#print "args:", args
for depee in args:
#print "depee:", depee
#print '-> val:', self.values.get(depee,defaults.get(depee,None))
fargs[depee] = self.values.get(depee,defaults.get(depee,None))
#print self.cached
#raw_input('')
# if self.cached:
# pyhrf.verbose(4, 'Cache enabled')
# if self.force_eval[label]:
# new_eval = True
# self.load_from_cache[label] = False
# else:
# cache_existent = cache_exists(quantity, fargs,
# path=self.cache_dir,
# digest_code=True) \
# and self.cached
# self.load_from_cache[label] = cache_existent
# new_eval = not self.load_from_cache[label]
# if self.load_from_cache[label]:
# pyhrf.verbose(4, 'Load from cache')
# if new_eval:
# pyhrf.verbose(4, 'New evaluation %s ...' \
# %(['','(forced)'][self.force_eval[label]]))
# # Dependers must be updated too
# depers = self.dependers[label]
# pyhrf.verbose(4, 'Force update of dependent quantities '\
# '-> %s' %(','.join(depers)))
# for deper in depers:
# self.force_eval[deper] = True
# else:
# new_eval = True
#seval = ['E','Cached e'][self.cached]
pyhrf.verbose(4, 'Eval of %s ...' %(label))
# self.values[label] = cached_eval(quantity, fargs,
# new=new_eval,
# save=self.cached,
# path=self.cache_dir,
# digest_code=True)
results = quantity(**fargs)
pyhrf.verbose(4, 'Quantity %s updated in %s' \
%(label, format_duration(time()-t0)))
else:
if pyhrf.verbose.verbosity > 4:
if isinstance(quantity, np.ndarray):
pyhrf.verbose(4,' -> ndarray of shape %s and type %s'
%(str(quantity.shape), str(quantity.dtype)))
#pyhrf.verbose.printNdarray(3, quantity)
else:
pyhrf.verbose(4, " -> %s" %str(quantity))
results = quantity
if len(siblings) > 1:
assert len(results) == len(siblings)
else:
results = (results,)
for l,r in zip(siblings, results):
self.values[l] = r
updated[l] = True
def rebin(a, newshape):
'''Rebin an array to a new shape.
Can be used to rebin a func image onto a anat image
'''
assert len(a.shape) == len(newshape)
slices = [ slice(0,old, float(old)/new) \
for old,new in zip(a.shape,newshape) ]
coordinates = np.mgrid[slices]
#choose the biggest smaller integer index:
indices = coordinates.astype('i')
return a[tuple(indices)]
def resampleToGrid(x, y, xgrid):
# assert that target grid is included in original x
#assert x[0] <= xgrid[0]
#assert x[-1] >= xgrid[-1]
## print 'x :', x
## print 'y :', y
## print 'xgrid :', xgrid
i = 1
yg = np.empty(xgrid.shape,dtype=y.dtype)
for ig, xg in enumerate(xgrid):
while i<len(x) and xg > x[i]:
i += 1
if i>=len(x) : i-=1
#if ig == 0:
# print 'ig=0, xg=%f' %xg
# print 'i = %d, y[i]=%f, x[i]=%f, x[i-1]=%f' \
# %(i, y[i], x[i], x[i-1])
#if xg < x[i] :
dx = 0. if x[i]!=x[i-1] else 0.0000001
yg[ig] = (y[i]-y[i-1])*(xg-x[i])/((x[i]+dx)-(x[i-1]-dx)) + y[i]
#yg[ig] = (y[i]-y[i-1])*xg/((x[i]+dx)-(x[i-1]-dx))+ y[i-1]
#print 'i:', i
#print 'ig:', ig
#print 'xg:', xg
#print 'y[i] et y[i-1]:', y[i], y[i-1]
#print 'dx:', dx
#print 'x[i] et x[i-1]:', x[i], x[i-1]
#print 'yg[ig]:', yg[ig]
#if ig == 0:
# print ' -> yg[ig]=%f' %yg[ig]
#else:
# yg[ig] = y[i]
## import matplotlib.pyplot as plt
## print "x :"
## print x
## print 'y :'
## print y
## plt.plot(x,y,'o-')
## plt.plot(xgrid,yg,'x-')
## plt.show()
return yg
def resampleSignal(s, osf):
ls = len(s)
## print 'ls =', ls
## print s.shape
timeMarks = np.arange(osf*(ls-1),dtype=float)/osf
## print 'timeMarks :'
## print timeMarks
prevTMarksSrc = np.array(np.floor(timeMarks), dtype=int)
## print 'prevTMarksSrc:'
## print prevTMarksSrc
nextTMarksSrc = np.array(np.ceil(timeMarks), dtype=int)
## print 'nextTMarksSrc :'
## print nextTMarksSrc
deltaBold = s[nextTMarksSrc,:]-s[prevTMarksSrc,:]
deltaTime = timeMarks-prevTMarksSrc
sr = s[prevTMarksSrc,:] + \
(deltaBold.transpose()*deltaTime).transpose()
## print sr
return sr
def diagBlock(mats, rep=0):
"""
Construct a diagonal block matrix from blocks which can be 1D or
2D arrays. 1D arrays are taken as column vectors.
If 'rep' is a non null positive number then blocks are diagonaly 'rep' times
"""
if type(mats) == np.ndarray:
finalMat = mats
elif type(mats)==list and len(mats) == 1:
finalMat = mats[0]
elif type(mats)==list :
m = 0 # nbLines
n = 0 # nbCols
for mat in mats:
m += mat.shape[0]
n += 1 if len(mat.shape) < 2 else mat.shape[1]
finalMat = np.zeros((m,n), dtype=float)
lOffset = 0
cOffset = 0
for mat in mats:
m = mat.shape[0]
n = 1 if len(mat.shape) < 2 else mat.shape[1]
sh = finalMat[lOffset:lOffset+m, cOffset:cOffset+n].shape
finalMat[lOffset:lOffset+m, cOffset:cOffset+n] = mat.reshape(sh)[:]
lOffset += m
cOffset += n
else:
raise Exception('diagBlock: unrecognised type for "mats"')
if rep > 0:
return diagBlock([finalMat]*rep)
else:
return finalMat
def extractRoiMask(nmask, roiId):
mask = nmask==roiId
m = np.where(mask)
mm = np.vstack(m).transpose()
cropMask = np.zeros(mm.ptp(0)+1, dtype=int)
cropMask[tuple((mm-mm.min(0)).transpose())] = mask[m]
return cropMask
def describeRois(roiMask):
s = 'Number of voxels : %d\n' %(roiMask!=0).sum()
s += 'Number of regions : %d\n' %(len(np.unique(roiMask))-int(0 in roiMask))
s += 'Region sizes :\n'
counts = np.bincount(roiMask[roiMask>0])
s += np.array2string(counts) + '\n'
nbr = (counts >= 60).sum()
s += 'Nb of region with size > 60: %d\n' %nbr
return s
def array_summary(a, precision=4):
return '%s -- %1.*f(%1.*f)[%1.*f...%1.*f]' %(str(a.shape),
precision, a.mean(),
precision, a.std(),
precision, a.min(),
precision, a.max())
def get_2Dtable_string(val, rownames, colnames, precision=4, col_sep='|',
line_end='', line_start='', outline_char=None):
""" Return a nice tabular string representation of a 2D numeric array
#TODO : make colnames and rownames optional
"""
if val.ndim == 1:
val = val.reshape(val.shape[0], 1)
nrows, ncols = val.shape[:2]
if np.isscalar(val[0,0]):
valWidth = len(str('%1.*f' %(precision,-3.141658938325)))
else:
if (val>=0).all():
valWidth = len(array_summary(val[0,0], precision=precision))
else:
valWidth = len(array_summary(np.abs(val[0,0]) * -1,
precision=precision))
rowWidth = max([len(str(rn)) for rn in rownames])
colWidth = [max([valWidth, len(cn)]) for cn in colnames ]
sheader = line_start + ' ' * rowWidth + ' ' + col_sep
sheader += col_sep.join([' %*s ' %(colWidth[j], cn) for j,cn in enumerate(colnames)])
sheader += line_end + '\n'
scontent = ''
#print 'nrows:',nrows
#print 'rownames:', rownames
for i in xrange(nrows):
line = line_start + ' %*s ' %(rowWidth, str(rownames[i])) + col_sep
for j in xrange(ncols):
if np.isscalar(val[i,j]):
line += ' %*.*f ' %(colWidth[j], precision, val[i,j]) + col_sep
else:
line += ' %*s ' %(valWidth, array_summary(val[i,j], precision)) + col_sep
if outline_char is not None:
outline = outline_char * (len(line)-1 + len(line_end)) + '\n'
else:
outline = ''
scontent += outline + line[:-len(col_sep)] + line_end + '\n'
return outline + sheader + scontent + outline
def get_leaf(element, branch):
"""
Return the nested leaf element corresponding to all dictionnary keys in
branch from element
"""
if isinstance(element, dict) and len(branch)>0:
return get_leaf(element[branch[0]], branch[1:])
else:
return element
def set_leaf(tree, branch, leaf, branch_classes=None):
"""
Set the nested *leaf* element corresponding to all dictionnary keys
defined in *branch*, within *tree*
"""
assert isinstance(tree, dict)
if len(branch) == 1:
tree[branch[0]] = leaf
return
if not tree.has_key(branch[0]):
if branch_classes is None:
tree[branch[0]] = tree.__class__()
else:
tree[branch[0]] = branch_classes[0]()
else:
assert isinstance(tree[branch[0]], dict)
if branch_classes is not None:
set_leaf(tree[branch[0]], branch[1:], leaf, branch_classes[1:])
else:
set_leaf(tree[branch[0]], branch[1:], leaf)
def swap_layers(t, labels, l1, l2):
""" Create a new tree from t where layers labeled by l1 and l2 are swapped.
labels contains the branch labels of t.
"""
i1 = labels.index(l1)
i2 = labels.index(l2)
nt = t.__class__() # new tree init from the input tree
# can be dict or OrderedDict
for b,l in izip(treeBranches(t), tree_leaves(t)):
nb = list(b)
nb[i1], nb[i2] = nb[i2], nb[i1]
set_leaf(nt, nb, l)
return nt
def tree_rearrange(t, oldlabels, newlabels):
""" Create a new tree from t where layers are rearranged following newlabels.
oldlabels contains the branch labels of t.
"""
order = [oldlabels.index(nl) for nl in newlabels]
nt = t.__class__() # new tree
for b,l in izip(treeBranches(t), tree_leaves(t)):
nb = [b[i] for i in order]
set_leaf(nt, nb, l)
return nt
def treeBranches(tree, branch=None):
#print "tree", tree
#print "branch", branch
if branch is None:
branch = []
if isinstance(tree, dict):
for k in tree.iterkeys():
#print "k", k
#print 'tree[k],', tree[k]
for b in treeBranches(tree[k], branch+[k]):
yield b
else:
yield branch
def treeBranchesClasses(tree, branch=None):
#print "tree", tree
#print "branch", branch
if branch is None:
branch = []
if isinstance(tree, dict):
for k,v in tree.iteritems():
#print "k", k
#print 'tree[k],', tree[k]
for b in treeBranchesClasses(tree[k], branch+[v.__class__]):
yield b
else:
yield branch
def tree_leaves(tree):
for branch in treeBranches(tree):
yield get_leaf(tree, branch)
def tree_items(tree):
"""
"""
for branch in treeBranches(tree):
yield (branch,get_leaf(tree, branch))
def stack_trees(trees, join_func=None):
""" Stack trees (python dictionnaries) with identical structures
into one tree
so that one leaf of the resulting tree is a list of the corresponding leaves
across input trees. 'trees' is a list of dict
"""
stackedTree = trees[0].__class__()
for branch, branch_classes in izip(treeBranches(trees[0]),
treeBranchesClasses(trees[0])):
if join_func is None:
leaveList = [get_leaf(tree, branch) for tree in trees]
else:
leaveList = join_func([get_leaf(tree, branch) for tree in trees])
set_leaf(stackedTree, branch, leaveList, branch_classes)
return stackedTree
def unstack_trees(tree):
""" Return a list of tree from a tree where leaves are all lists with
the same number of items
"""
first_list = tree_leaves(tree).next()
tree_list = [ tree.__class__() for i in xrange(len(first_list)) ]
for b,l in tree_items(tree):
for t,item in zip(tree_list,l):
set_leaf(t, b, item)
return tree_list
from pprint import pprint
def apply_to_leaves(tree, func, funcArgs=None, funcKwargs=None):
"""
Apply function 'func' to all leaves in given 'tree' and return a new tree.
"""
if funcKwargs is None:
funcKwargs = {}
if funcArgs is None:
funcArgs = []
newTree = tree.__class__() # could be dict or OrderedDict
for branch,leaf in tree_items(tree):
set_leaf(newTree, branch, func(leaf,*funcArgs,**funcKwargs))
return newTree
def map_dict(func, d):
return d.__class__( (k,func(v)) for k,v in d.iteritems() )
def get_cache_filename(args, path='./', prefix=None, gz=True):
hashArgs = hashlib.sha512(repr(args)).hexdigest()
if prefix is not None:
fn = os.path.join(path, prefix + '_' + hashArgs + '.pck')
else:
fn = os.path.join(path, hashArgs + '.pck')
if gz:
return fn + '.gz'
else:
return fn
def hash_func_input(func, args, digest_code):
if isinstance(args, dict): # sort keys
to_digest = ''
for k in sorted(args.keys()):
v = args[k]
to_digest += repr(k) + repr(v)
else:
to_digest = repr(args)
if digest_code:
to_digest += inspect.getsource(func)
return hashlib.sha512(to_digest).hexdigest()
def cache_filename(func, args=None, prefix=None, path='./',
digest_code=False):
if prefix is None:
prefix = func.__name__
else:
prefix = prefix + '_' + func.__name__
hashArgs = hash_func_input(func, args, digest_code)
fn = os.path.join(path, prefix + '_' + hashArgs + '.pck.gz')
return fn
def cache_exists(func, args=None, prefix=None, path='./',
digest_code=False):
return op.exists(cache_filename(func, args=args, prefix=prefix,
path=path,digest_code=digest_code))
def cached_eval(func, args=None, new=False, save=True, prefix=None,
path='./', return_file=False, digest_code=False,
gzip_mode='cmd'):
fn = cache_filename(func, args=args, prefix=prefix,
path=path,digest_code=digest_code)
if not os.path.exists(fn) or new:
if args is None:
r = func()
elif isinstance(args, tuple) or isinstance(args, list):
r = func(*args)
elif isinstance(args, dict):
r = func(**args)
else:
raise Exception("type of arg (%s) is not valid. Should be tuple, "\
"list or dict" %str(arg.__class__))
if save:
if gzip_mode=='pygzip' and os.system('gzip -V') != 0:
#print 'use python gzip'
f = gzip.open(fn,'w')
cPickle.dump(r,f)
f.close()
else:
#print 'use gzip command'
f = open(fn[:-3],'w')
cPickle.dump(r,f)
f.close()
os.system("gzip -f %s" %fn[:-3])
if not return_file:
return r
else:
return fn
else:
if not return_file:
return cPickle.load(gzip.open(fn))
else:
return fn
def montecarlo(datagen, festim, nbit=None):
"""Perform a Monte Carlo loop with data generator 'datagen' and estimation
function 'festim'.
'datagen' have to be iterable.
'festim' must return an object on which ** and + operators can be applied.
If 'nbit' is provided then use it for the maximum iterations else loop until
datagen stops.
"""
itgen = iter(datagen)
s = itgen.next()
e = festim(s)
cumul = e
cumul2 = e**2
if nbit is None:
nbit = 0
for s in itgen:
e = festim(s)
cumul += e
cumul2 += e**2
nbit += 1
else:
for i in xrange(1, nbit):
s = itgen.next()
e = festim(s)
cumul += e
cumul2 += e**2
m = cumul / float(nbit)
v = cumul2 / float(nbit) - m**2
return m, v
def closestsorted(a, val):
i = np.searchsorted(a, val)
if i == len(a)-1:
return i
elif np.abs(a[i]-val) < np.abs(a[i+1]-val):
return i
else:
return i+1
##################
def calc_nc2D(a, b):
return a + b + 2*(a-1)*(b-1) - 2
def nc2DGrid(maxSize):
nc2D = np.zeros((maxSize, maxSize), dtype=int)
for a in xrange(maxSize):
for b in xrange(maxSize):
nc2D[a,b] = calc_nc2D(a,b)
return nc2D
# nc2D = cached_eval(nc2DGrid, (1000,))
# nbCliques = 4752
# sol = where(nc2D==nbCliques)
# print 'Configuration where nb cliques =', nbCliques, ':'
# for i in xrange(len(sol[0])):
# a,b = sol[0][i], sol[1][i]
# print a,"x",b,"=",a*b
def now():
return datetime.datetime.fromtimestamp(time())
def time_diff_str(diff):
return '%dH%dmin%dsec' %(diff.seconds/3600,
(diff.seconds%3600)/60,
(diff.seconds%3600)%60)
class AnsiColorizer:
""" Format strings with an ANSI escape sequence to encode color """
BEGINC = '\033['
COLORS = {
'purple' : '95',
'blue' : '94',
'green' : '92',
'yellow' : '93',
'red' : '91',
}
ENDC = '\033[0m'
def __init__(self):
self.disabled = False
self.do_tty_check = True
def disable(self):
self.disabled = True
def enable(self):
self.disabled = False
def no_tty_check(self):
self.do_tty_check = False
def tty_check(self):
self.do_tty_check = True
def __call__(self, s, color, bright=False, bold=False):
if color not in self.COLORS:
raise Exception('Invalid color "%s". Available colors: %s' \
%(color, str(self.COLORS)))
col = self.COLORS[color]
if self.disabled or (self.do_tty_check and not sys.stdout.isatty()):
return s
else:
ansi_codes = ";".join([['','1'][bright],col])
return '%s%sm%s%s' %(self.BEGINC, ansi_codes, s, self.ENDC)
colorizer = AnsiColorizer()
def extract_file_series(files):
"""
group all file names sharing a common prefix followed by a number, ie:
<prefix><number><extension>
Return a dictionnary with two levels (<tag>,<extension>), mapped to all
corresponding series index found.
"""
#print 'extract_file_series ...'
series = {} # will map (tag,extension) to number series
rexpSeries = re.compile("(.*?)([0-9]+)[.]([a-zA-Z.~]*?)\Z")
series = defaultdict(lambda: defaultdict(list))
for f in files:
#print 'f:', f
r = rexpSeries.match(f)
if r is not None:
(tag,idx,ext) = r.groups()
ext = '.' + ext
#print '-> %s | %s | %s' %(tag, idx, ext)
else:
tag,ext = op.splitext(f)
#print '-> %s | %s' %(tag, ext)
idx = ''
series[tag][ext].append(idx)
return series
def format_serie(istart, iend):
return colorizer(['[%d...%d]'%(istart,iend),
'[%d]'%(istart)][istart==iend],
'red')
def condense_series(numbers):
if len(numbers) == 1:
return numbers[0]
else:
inumbers = np.sort(np.array(map(int,numbers)))
if (np.diff(inumbers) == 1).all():
return format_serie(inumbers.min(),inumbers.max())
else:
segment_start = 0
s = ''
for segment_end in np.where(np.diff(inumbers)!=1)[0]:
s += format_serie(inumbers[segment_start],inumbers[segment_end])
segment_start = segment_end+1
s += format_serie(inumbers[segment_start],inumbers[-1])
return s
def group_file_series(series, group_rules=None):
if group_rules is None:
group_rules = []
groups = defaultdict(dict)
dummy_tag = 0
for tag,ext_data in series.iteritems():
tag_has_been_grouped = False
for gr in group_rules:
r = gr.match(tag)
if r is not None:
gname = r.group('group_name')
groups[gname]['...'] = gname
tag_has_been_grouped = True
break
if not tag_has_been_grouped:
for ext,numbers in ext_data.iteritems():
if '' in numbers:
groups[dummy_tag][ext] = tag
numbers.remove('')
if len(numbers) > 0:
groups[tag][ext] = tag + condense_series(numbers)
dummy_tag += 1
final_groups = []
for tag,series in groups.iteritems():
sv = series.values()
if len(sv) > 1 and len(set(sv)) == 1:
exts = [ext[1:] for ext in series.keys()]
final_groups.append(sv[0]+colorizer('.{%s}'%','.join(exts),'green'))
else:
for ext,s in series.iteritems():
if ext == '...':
ext = colorizer('...', 'purple')
final_groups.append(s + ext)
return sorted(final_groups)
# def group_file_series(files):
# """
# group all file names sharing a common prefix followed by a number, ie:
# <prefix><number><extension>.
# Return a dictionnary with two levels (<tag>,<extension>), mapped to all
# corresponding series index found.
# """
# series = {} # maps (tag,extension) to indexes
# rexpSeries = re.compile("(.*?)([0-9]+)[.]([a-zA-Z]*)\Z")
# for f in files:
# r = rexpSeries.match(f)
# if r is not None:
# (tag,idx,ext) = r.groups()
# if not series.has_key(tag):
# series[tag] = {}
# if not series[tag].has_key(ext):
# series[tag][ext] = []
# series[tag][ext].append(idx)
# return series
# def find_file_series(path):
# """ Find all files in directory 'path' which are formated as
# <tag><number><extension>. Sub-directories are ignored.
# Return a dictionnary with two levels (<tag>,<extension>), mapped to all
# corresponding series index found.
# """
# return group_file_series([f for f in os.listdir(path) \
# if op.isfile(op.join(path,f))])
def check_files_series(fseries, verbose=False):
ok_status = True
for tag,dext in fseries.iteritems():
for ext,indexes in dext.iteritems():
if 0:
print 'tag',tag,'ext',ext
print 'indexes:',indexes
sorted_indexes = sorted(indexes)
last = int(sorted_indexes[-1])
first = int(sorted_indexes[0])
diff = last - first + 1 - len(indexes)
if diff != 0:
ok_status = False
if verbose:
print '%d items missing for series %s[...].%s' \
%(diff,tag,ext)
print '-> Series should have %d items (%s to %s)' \
' - found %d items' \
%(last-first+1,first,last,len(indexes))
return ok_status
#Factorisation of the code: functions creations
def Extract_TTP_whM_hrf(hrf, dt):
"""
Extract TTP and whM from an hrf
"""
from scipy.interpolate import interp1d
from pyhrf.boldsynth.hrf import getCanoHRF
hcano = getCanoHRF()
Abscisses = np.arange(hrf.size)*dt
# TTP calculus
TTP = hrf.argmax()*dt
print 'found TTP:', TTP
#whM calculus
#1/ Round the HRF
HRF_rounded = np.round(hrf, 5)
#2/ Interpolation to obtain more values
Abscisses_round = np.arange(HRF_rounded.size)*dt
f = interp1d(Abscisses, hrf)
r = 0.00001
HRF_interp = f(np.arange(0,Abscisses_round[len(Abscisses_round)-1], r))
HRF_interp_rounded = np.round(HRF_interp, 5)
#To reconvert from interpolation to correct values in seconds
len_use = len(HRF_interp)
dt_interp = (hcano[0][len(hcano[0])-1])/len(HRF_interp)
#3/ Where the half max is found
Pts_1_2_h = np.where(abs(HRF_interp_rounded-HRF_rounded.max()/2.)<0.001)
#Pts_1_2_h = np.where(HRF_interp==HRF_interp[TTP/r]/2)
Values_pts_1_2_h = HRF_interp[Pts_1_2_h]
#print 'Max/2, Abscisses, Y :', HRF_rounded.max()/2., Pts_1_2_h, Values_pts_1_2_h
if Pts_1_2_h[0].shape[0]==0:
print '#### No point found ####'
#Selection of Pts of abscisse<max
Diff1 = abs(Pts_1_2_h - HRF_interp_rounded.argmax())*(Pts_1_2_h<HRF_interp_rounded.argmax())
#Diff1 = (Pts_1_2_h - HRF_interp[TTP/r])*(Pts_1_2_h<HRF_interp[TTP/r])
Diff1_non_zeros=Diff1[0][np.where(Diff1[0]>0)] #retrieve positions#0
Diff1_non_zeros.sort() #to sort all differences
First_diff1 = Diff1_non_zeros.mean()
#Selection of Pts of abscisse>max
Diff2 = abs(HRF_interp_rounded.argmax() - Pts_1_2_h)*(Pts_1_2_h>HRF_interp_rounded.argmax())
#Diff2 = (HRF_interp[TTP/r] - Pts_1_2_h)*(Pts_1_2_h>HRF_interp[TTP/r])
Diff2_non_zeros=Diff2[0][np.where(Diff2[0]>0)] #retrieve positions#0
Diff2_non_zeros.sort() #to sort all differences
First_diff2 = Diff2_non_zeros.mean()
#addition of the two differences and *dt_interp to obtain whM in seconds
whM = (First_diff1 + First_diff2)*dt_interp
print 'found whM:', whM
return TTP, whM
def Extract_TTP_whM_from_group(hrfs_pck_file, dt, model, Path_data, acq):
"""
Extract TTP and whM from a group of hrfs whose values are saved in a .pck (size nb_subjects * nb_coeff_hrf)
"""
from scipy.interpolate import interp1d
from pyhrf.boldsynth.hrf import getCanoHRF
hcano = getCanoHRF()
hrfs = cPickle.load(open(hrfs_pck_file))
nb_subjects = hrfs.shape[0]
TTP_tot = np.zeros((nb_subjects))
whM_tot = np.zeros((nb_subjects))
for isubj in np.arange(nb_subjects):
HRF_at_max = hrfs[isubj, :]
Abscisses = np.arange(HRF_at_max.size)*dt
# TTP calculus
TTP = HRF_at_max.argmax()*dt
print 'found TTP:', TTP
TTP_tot[isubj] = TTP
#whM calculus
#1/ Round the HRF
HRF_rounded = np.round(HRF_at_max, 5)
#2/ Interpolation to obtain more values
Abscisses_round = np.arange(HRF_rounded.size)*dt
f = interp1d(Abscisses, HRF_at_max)
r = 0.00001
HRF_interp = f(np.arange(0,Abscisses_round[len(Abscisses_round)-1], r))
HRF_interp_rounded = np.round(HRF_interp, 5)
#To reconvert from interpolation to correct values in seconds
len_use = len(HRF_interp)
dt_interp = (hcano[0][len(hcano[0])-1])/len(HRF_interp)
#3/ Where the half max is found
Pts_1_2_h = np.where(abs(HRF_interp_rounded-HRF_rounded.max()/2.)<0.001)
#Pts_1_2_h = np.where(HRF_interp==HRF_interp[TTP/r]/2)
Values_pts_1_2_h = HRF_interp[Pts_1_2_h]
#print 'Max/2, Abscisses, Y :', HRF_rounded.max()/2., Pts_1_2_h, Values_pts_1_2_h
if Pts_1_2_h[0].shape[0]==0:
print '#### No point found ####'
#Selection of Pts of abscisse<max
Diff1 = abs(Pts_1_2_h - HRF_interp_rounded.argmax())*(Pts_1_2_h<HRF_interp_rounded.argmax())
#Diff1 = (Pts_1_2_h - HRF_interp[TTP/r])*(Pts_1_2_h<HRF_interp[TTP/r])
Diff1_non_zeros=Diff1[0][np.where(Diff1[0]>0)] #retrieve positions#0
Diff1_non_zeros.sort() #to sort all differences
First_diff1 = Diff1_non_zeros.mean()
#Selection of Pts of abscisse>max
Diff2 = abs(HRF_interp_rounded.argmax() - Pts_1_2_h)*(Pts_1_2_h>HRF_interp_rounded.argmax())
#Diff2 = (HRF_interp[TTP/r] - Pts_1_2_h)*(Pts_1_2_h>HRF_interp[TTP/r])
Diff2_non_zeros=Diff2[0][np.where(Diff2[0]>0)] #retrieve positions#0
Diff2_non_zeros.sort() #to sort all differences
First_diff2 = Diff2_non_zeros.mean()
#addition of the two differences and *dt_interp to obtain whM in seconds
whM = (First_diff1 + First_diff2)*dt_interp
print 'found whM:', whM
whM_tot[isubj] = np.round(whM,1)
cPickle.dump(TTP_tot, open(Path_data + '/_TTPs_at_peak_by_hand_' + '_' + model + '_' + acq + '.pck', 'w'))
cPickle.dump(whM_tot, open(Path_data + '/_whMs_at_peak_by_hand_' + '_' + model + '_' + acq + '.pck', 'w'))
return TTP_tot, whM_tot
def PPMcalculus_jde(threshold_value, apost_mean_activ_fn, apost_var_activ_fn, \
apost_mean_inactiv_fn, apost_var_inactiv_fn, labels_activ_fn, \
labels_inactiv_fn, nrls_fn, mask_file, null_hyp=True):
'''
Function to calculate the probability that the nrl in voxel j,
condition m, is superior to a given hreshold_value
Computation for all voxels
Compute Tvalue according to null hypothesis
'''
from scipy.integrate import quad
from pyhrf.ndarray import xndarray
from scipy.stats import norm
#m1 = apost_mean_activ
#sig1 = apost_var_activ
#m2 = apost_mean_inactiv
#sig2 = apost_var_inactiv
#perc1 = labels_activ #proportion of samples drawn from the activ class
#perc2 = labels_inactiv #proportion of samples drawn from the inactiv class
mask = xndarray.load(mask_file).data
apost_mean_activ = xndarray.load(apost_mean_activ_fn)
apost_mean_inactiv = xndarray.load(apost_mean_inactiv_fn)
apost_var_activ = xndarray.load(apost_var_activ_fn)
apost_var_inactiv = xndarray.load( apost_var_inactiv_fn)
labels_activ = xndarray.load(labels_activ_fn)
labels_inactiv = xndarray.load( labels_inactiv_fn)
nrls = xndarray.load(nrls_fn)
#flattend data
m1 = apost_mean_activ.flatten(mask, axes=['sagittal', 'coronal', 'axial'], new_axis='position').data
m2 = apost_mean_inactiv.flatten(mask, axes=['sagittal', 'coronal', 'axial'], new_axis='position').data
var1 = apost_var_activ.flatten(mask, axes=['sagittal', 'coronal', 'axial'], new_axis='position').data
var2 = apost_var_inactiv.flatten(mask, axes=['sagittal', 'coronal', 'axial'], new_axis='position').data
perc1 = labels_activ.flatten(mask, axes=['sagittal', 'coronal', 'axial'], new_axis='position').data
perc2 = labels_inactiv.flatten(mask, axes=['sagittal', 'coronal', 'axial'], new_axis='position').data
nrls_values = nrls.flatten(mask, axes=['sagittal', 'coronal', 'axial'], new_axis='position').data
Probas=np.zeros(perc1.shape[0])
if null_hyp:
Pvalues=np.zeros(perc1.shape[0])
#to detect positions activ and inactiv
Comp=perc1-perc2
Pos_activ = np.where(Comp[:,0])
Pos_inactiv = np.where(Comp[:,0]<0)
Means = np.zeros(perc1.shape[0])
Vars = np.zeros(perc1.shape[0])
for i in xrange(perc1.shape[0]):
#posterior probability distribution
fmix = lambda x: perc1[i]*norm.pdf(x,m1[i], var1[i]**.5) + perc2[i]*norm.pdf(x,m2[i], var2[i]**.5)
#fmix = lambda t: perc1[i] * 1/np.sqrt(2*np.pi*sig1[i]**2)*np.exp(- (t - m1[i])**2 / (2*sig1[i]**2) ) + \
#perc2[i] * 1/np.sqrt(2*np.pi*sig2[i]**2)*np.exp(- (t - m2[i])**2 / (2*sig2[i]**2) )
Probas[i] = quad(fmix, threshold_value, float('inf'))[0]
#if Probas[i]>1: Probas[i]=1
if null_hyp:
Means[Pos_activ] = m1[Pos_activ]
Means[Pos_inactiv] = m2[Pos_inactiv]
Vars[Pos_activ] = var1[Pos_activ]
Vars[Pos_inactiv] = var2[Pos_inactiv]
for i in xrange(perc1.shape[0]):
nrl_val = nrls_values[i]
fmix = lambda x:norm.pdf(x,0, Vars[i]**.5)
Pvalues[i] = quad(fmix, nrl_val, float('inf'))[0]
#deflatten to retrieve original shape
PPM_ = xndarray(Probas, axes_names=['position'])
PPM = PPM_.expand(mask, 'position', ['sagittal','coronal','axial'])
PPMinvv = 1-Probas #to obtain more readable maps
PPMinv_ = xndarray(PPMinvv, axes_names=['position'])
PPMinv = PPMinv_.expand(mask, 'position', ['sagittal','coronal','axial'])
Pval_ = xndarray(Pvalues, axes_names=['position'])
Pval = Pval_.expand(mask, 'position', ['sagittal','coronal','axial'])
return PPM.data, PPMinv.data, Pval.data
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/series/test_asof.py | 7 | 4718 | # coding=utf-8
import nose
import numpy as np
from pandas import (offsets, Series, notnull,
isnull, date_range, Timestamp)
import pandas.util.testing as tm
from .common import TestData
class TestSeriesAsof(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_basic(self):
# array or list or dates
N = 50
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = ts.asof(dates)
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
self.assertTrue((rs == ts[lb]).all())
val = result[result.index[result.index >= ub][0]]
self.assertEqual(ts[ub], val)
def test_scalar(self):
N = 30
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.arange(N), index=rng)
ts[5:10] = np.NaN
ts[15:20] = np.NaN
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
self.assertEqual(val1, ts[4])
self.assertEqual(val2, ts[14])
# accepts strings
val1 = ts.asof(str(ts.index[7]))
self.assertEqual(val1, ts[4])
# in there
result = ts.asof(ts.index[3])
self.assertEqual(result, ts[3])
# no as of value
d = ts.index[0] - offsets.BDay()
self.assertTrue(np.isnan(ts.asof(d)))
def test_with_nan(self):
# basic asof test
rng = date_range('1/1/2000', '1/2/2000', freq='4h')
s = Series(np.arange(len(rng)), index=rng)
r = s.resample('2h').mean()
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
r.iloc[3:5] = np.nan
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 5, 5, 6.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
r.iloc[-3:] = np.nan
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4, 4.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
def test_periodindex(self):
from pandas import period_range, PeriodIndex
# array or list or dates
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='37min')
result = ts.asof(dates)
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
pix = PeriodIndex(result.index.values, freq='H')
mask = (pix >= lb) & (pix < ub)
rs = result[mask]
self.assertTrue((rs == ts[lb]).all())
ts[5:10] = np.nan
ts[15:20] = np.nan
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
self.assertEqual(val1, ts[4])
self.assertEqual(val2, ts[14])
# accepts strings
val1 = ts.asof(str(ts.index[7]))
self.assertEqual(val1, ts[4])
# in there
self.assertEqual(ts.asof(ts.index[3]), ts[3])
# no as of value
d = ts.index[0].to_timestamp() - offsets.BDay()
self.assertTrue(isnull(ts.asof(d)))
def test_errors(self):
s = Series([1, 2, 3],
index=[Timestamp('20130101'),
Timestamp('20130103'),
Timestamp('20130102')])
# non-monotonic
self.assertFalse(s.index.is_monotonic)
with self.assertRaises(ValueError):
s.asof(s.index[0])
# subset with Series
N = 10
rng = date_range('1/1/1990', periods=N, freq='53s')
s = Series(np.random.randn(N), index=rng)
with self.assertRaises(ValueError):
s.asof(s.index[0], subset='foo')
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.