repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
tawsifkhan/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
acshi/osf.io | scripts/annotate_rsvps.py | 60 | 2256 | """Utilities for annotating workshop RSVP data.
Example ::
import pandas as pd
from scripts import annotate_rsvps
frame = pd.read_csv('workshop.csv')
annotated = annotate_rsvps.process(frame)
annotated.to_csv('workshop-annotated.csv')
"""
import re
import logging
from dateutil.parser import parse as parse_date
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from website.models import User, Node, NodeLog
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def find_by_email(email):
try:
return User.find_one(Q('username', 'iexact', email))
except ModularOdmException:
return None
def find_by_name(name):
try:
parts = re.split(r'\s+', name.strip())
except:
return None
if len(parts) < 2:
return None
users = User.find(
reduce(
lambda acc, value: acc & value,
[
Q('fullname', 'icontains', part.decode('utf-8', 'ignore'))
for part in parts
]
)
).sort('-date_created')
if not users:
return None
if len(users) > 1:
logger.warn('Multiple users found for name {}'.format(name))
return users[0]
def logs_since(user, date):
return NodeLog.find(
Q('user', 'eq', user._id) &
Q('date', 'gt', date)
)
def nodes_since(user, date):
return Node.find(
Q('creator', 'eq', user._id) &
Q('date_created', 'gt', date)
)
def process(frame):
frame = frame.copy()
frame['user_id'] = ''
frame['user_logs'] = ''
frame['user_nodes'] = ''
frame['last_log'] = ''
for idx, row in frame.iterrows():
user = (
find_by_email(row['Email address'].strip()) or
find_by_name(row['Name'])
)
if user:
date = parse_date(row['Workshop_date'])
frame.loc[idx, 'user_id'] = user._id
logs = logs_since(user, date)
frame.loc[idx, 'user_logs'] = logs.count()
frame.loc[idx, 'user_nodes'] = nodes_since(user, date).count()
if logs:
frame.loc[idx, 'last_log'] = logs.sort('-date')[0].date.strftime('%c')
return frame
| apache-2.0 |
maheshakya/scikit-learn | sklearn/ensemble/tests/test_base.py | 28 | 1334 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
"""Check BaseEnsemble methods."""
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
"""Check that instantiating a BaseEnsemble with n_estimators<=0 raises
a ValueError."""
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
eike-welk/clair | src/clairweb/libclair/test/test_descriptors.py | 1 | 4284 | # -*- coding: utf-8 -*-
###############################################################################
# Clair - Project to discover prices on e-commerce sites. #
# #
# Copyright (C) 2016 by Eike Welk #
# [email protected] #
# #
# License: GPL Version 3 #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
"""
Test module ``descriptors``, which contains tools to define the structure
of a table or database.
"""
#import pytest #contains `skip`, `fail`, `raises`, `config` #IGNORE:W0611
#from numpy import isnan #, nan #IGNORE:E0611
#from pandas.util.testing import assert_frame_equal
#import time
#import logging
#from logging import info
#logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',
# level=logging.DEBUG)
##Time stamps must be in UTC
#logging.Formatter.converter = time.gmtime
def test_TypeTag_s():
print("Start")
from numpy import nan #IGNORE:E0611
from datetime import datetime
from libclair.descriptors import \
NoneD, StrD, IntD, FloatD, DateTimeD, SumTypeD, ListD, DictD
assert NoneD.is_py_instance(None)
assert not NoneD.is_py_instance(3)
assert StrD.is_py_instance("foo")
assert not StrD.is_py_instance(3)
assert IntD.is_py_instance(23)
assert not IntD.is_py_instance(23.5)
assert FloatD.is_py_instance(4.2)
assert FloatD.is_py_instance(nan)
assert not FloatD.is_py_instance(3)
assert DateTimeD.is_py_instance(datetime(2000, 1, 1))
assert not DateTimeD.is_py_instance(3)
ts = SumTypeD(IntD, FloatD)
assert ts.is_py_instance(1)
assert ts.is_py_instance(1.41)
assert not ts.is_py_instance("a")
tl = ListD(FloatD)
assert tl.is_py_instance([])
assert tl.is_py_instance([1.2, 3.4])
assert not tl.is_py_instance([1, 3])
tl2 = ListD(SumTypeD(FloatD, IntD))
assert tl2.is_py_instance([1.2, 3, 4])
assert not tl.is_py_instance([1, "a"])
tm = DictD(StrD, IntD)
assert tm.is_py_instance({})
assert tm.is_py_instance({"foo": 2, "bar": 3})
assert not tm.is_py_instance({"foo": 2, "bar": 3.1415})
def test_FieldDescriptor():
print("Start")
from libclair.descriptors import FieldDescriptor, IntD
FieldDescriptor("foo", IntD, 1, "A foo integer.")
FieldDescriptor("foo", IntD, None, "A foo integer or None.")
def test_TableDescriptor():
print("Start")
from libclair.descriptors import TableDescriptor, FieldDescriptor, IntD
F = FieldDescriptor
TableDescriptor("foo_table", "1.0", "fot", "A table of foo elements",
[F("foo1", IntD, 0, "A foo integer."),
F("foo2", IntD, None, "A foo integer or None.")
])
if __name__ == "__main__":
# test_TypeTag_s()
# test_FieldDescriptor()
# test_TableDescriptor()
pass #IGNORE:W0107
| gpl-3.0 |
depet/scikit-learn | examples/linear_model/plot_logistic.py | 8 | 1400 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import pylab as pl
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
pl.figure(1, figsize=(4, 3))
pl.clf()
pl.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
pl.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
pl.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
pl.axhline(.5, color='.5')
pl.ylabel('y')
pl.xlabel('X')
pl.xticks(())
pl.yticks(())
pl.ylim(-.25, 1.25)
pl.xlim(-4, 10)
pl.show()
| bsd-3-clause |
urschrei/geopandas | examples/nyc_boros.py | 8 | 1394 | """
Generate example images for GeoPandas documentation.
TODO: autogenerate these from docs themselves
Kelsey Jordahl
Time-stamp: <Tue May 6 12:17:29 EDT 2014>
"""
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Point
from geopandas import GeoSeries, GeoDataFrame
np.random.seed(1)
DPI = 100
# http://www.nyc.gov/html/dcp/download/bytes/nybb_14aav.zip
boros = GeoDataFrame.from_file('nybb.shp')
boros.set_index('BoroCode', inplace=True)
boros.sort()
boros.plot()
plt.xticks(rotation=90)
plt.savefig('nyc.png', dpi=DPI, bbox_inches='tight')
#plt.show()
boros.geometry.convex_hull.plot()
plt.xticks(rotation=90)
plt.savefig('nyc_hull.png', dpi=DPI, bbox_inches='tight')
#plt.show()
N = 2000 # number of random points
R = 2000 # radius of buffer in feet
xmin, xmax = plt.gca().get_xlim()
ymin, ymax = plt.gca().get_ylim()
#xmin, xmax, ymin, ymax = 900000, 1080000, 120000, 280000
xc = (xmax - xmin) * np.random.random(N) + xmin
yc = (ymax - ymin) * np.random.random(N) + ymin
pts = GeoSeries([Point(x, y) for x, y in zip(xc, yc)])
mp = pts.buffer(R).unary_union
boros_with_holes = boros.geometry - mp
boros_with_holes.plot()
plt.xticks(rotation=90)
plt.savefig('boros_with_holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
holes = boros.geometry & mp
holes.plot()
plt.xticks(rotation=90)
plt.savefig('holes.png', dpi=DPI, bbox_inches='tight')
plt.show()
| bsd-3-clause |
aounlutfi/E-commerce-Opimization | src/modeling.py | 2 | 4483 | # This file is part of E-Commerce Optimization (ECO)
# The (ECO) can be obtained at https://github.com/aounlutfi/E-commerce-Opimization
# ECO Copyright (C) 2017 Aoun Lutfi, University of Wollongong in Dubai
# Inquiries: [email protected]
# The ECO is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later version.
# ECO is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Less General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with TSAM.
# If not, see <http://www.gnu.org/licenses/>.
# If you use the ECO or any part of it in any program or publication, please acknowledge
# its authors by adding a reference to this publication:
# Lutfi, A., Fasciani, S. (2017) Towards Automated Optimization of Web Interfaces and
# Application in E-commerce, Accepted for publications at International Journal of
# Computing and Information Sciences.
import networkx as nx
import matplotlib.pyplot as plt
import math
import time
TOP = 0
LEFT = 1
RIGHT = 2
BUTTOM = 3
X = 0
Y = 1
H = 0
W = 1
def modeling(elements, image = None, verbose = False):
#remove redundant elements
elements = clean_duplicates(elements, verbose)
print "number of clean elements: " + str(len(elements))
#setup graph
G = nx.Graph()
G.clear()
i = 0
labels = {}
positions = []
#attach elements to nodes
for element in elements:
G.add_node(i)
G.node[i] = element
labels[i] = i
positions.append(element["center"])
i += 1
dist = []
i = 0
#attach distances to edges
for element in elements:
temp = list(elements)
temp.remove(element)
for elem in temp:
G.add_edge(elem["id"], element['id'])
d = distance(elem, element)
dist.append((i, "distance: " + str(d)))
G.edge[elem['id']][element['id']] = d
i+=1
if verbose:
for i in range(0, G.number_of_nodes()):
print G.node[i]
print "links: " + str(G.number_of_edges())
print G.edges()
for element in elements:
temp = list(elements)
temp.remove(element)
for elem in temp:
e = G.edge[elem['id']][element['id']]
if e:
print e
#save model details
model = "\n-------------------MODEL-------------------------------\n"
model += "nodes: " + str(G.number_of_nodes()) + "\n"
for i in range(0, G.number_of_nodes()):
model += str(G.node[i]) + "\n"
model += "links: " + str(G.number_of_edges()) + "\n"
model += str(G.edges()) + "\n"
for element in elements:
temp = list(elements)
temp.remove(element)
for elem in temp:
e = G.edge[elem['id']][element['id']]
if e:
model += str(e) + "\n"
#save to file
file = open("tests/model.txt", 'a')
file.write(model)
file.close()
#draw model
nx.draw_networkx_edges(G, positions)
nx.draw_networkx_edge_labels(G, positions, font_size = 6)
nx.draw_networkx_nodes(G, positions, node_size=500, alpha=0.7)
nx.draw_networkx_labels(G,positions,labels, font_color='w')
if image is not None:
plt.imshow(image, "gray")
#save model into an image
try:
if verbose:
plt.show()
plt.savefig("tests/" + str(time.time()) + "_model.jpg", dpi=900)
except Exception, e:
plt.savefig("tests/" + str(time.time()) + "_model.jpg", dpi=900)
plt.clf()
return (G, positions, labels)
def clean_duplicates(elements, verbose):
#to clean duplicates, go through each element, and look through the remaining elemnts and see if there is a match
#if a match exists, remove the duplicate
for element in elements:
temp = list(elements)
temp.remove(element)
if verbose:
print "checking id: " + str(element['id'])
for elem in temp:
if abs(elem["center"][X] - element["center"][X])<element["dimentions"][H] and abs(elem["center"][Y] - element["center"][Y])<element["dimentions"][W]:
elements.remove(elem)
if verbose:
print 'removed ' + str(elem['id'])
else:
if verbose:
print 'kept ' + str(elem['id'])
id = 0
#reset ids of elements
for element in elements:
element["id"] = id
id += 1
return elements
def distance(elem1, elem2):
#calculate the distance using euclidean distance
return int(round(math.sqrt((elem1["center"][X] - elem2["center"][X])**2 + (elem1["center"][Y] - elem2["center"][Y])**2))) | gpl-3.0 |
fatadama/estimation | challenge_problem/sim_data/data_loader.py | 1 | 2118 | """@package data_loader
Module with functions for loading data from an output file generated by generate_data.py
"""
import ConfigParser
import numpy as np
import matplotlib.pyplot as plt
def main():
name = 'sims_01_fast'
(tsim,XK,YK,mu0,P0,Ns,dt,tf) = load_data(name)
print(tsim,XK,YK)
print("Loaded simulation with %d runs, initial mean = %f,%f,\n and initial covariance P = [[%f,%f],[%f,%f]]" % (Ns,mu0[0],mu0[1],P0[0,0],P0[0,1],P0[1,0],P0[1,1]))
## load simulation data from a particular case
#
#@param[in] name name of simulation batch to load; "<name>_settings.ini" and "<name>_data.csv" must exist
#@param[in] fpath path to data files
#@param[out] tsim simulation time vector
#@param[out] XK len(tsim) x 2*Ns matrix of true system states; even columns are position history, odd are velocity
#@param[out] YK len(tsim) x Ns matrix of system measurements for Ns Monte Carlo runs
#@param[out] mu0 mean initial state
#@param[out] P0 initial covariance
#@param[out] Ns number of simulations
#@param[out] dt sample period of measurements
#@param[out] tf final simulation time
def load_data(name,fpath='./'):
config = ConfigParser.ConfigParser()
config.read(fpath + name + "_settings.ini")
# sample time
dt = float(config.get(name,'ts'))
# number of data points
Ns = int(config.get(name,'ns'))
# final time
tf = float(config.get(name,'tf'))
# initial covariance
P0 = np.zeros((2,2))
P0[0,0] = float(config.get(name,'p0_11'))
P0[0,1] = float(config.get(name,'p0_12'))
P0[1,0] = float(config.get(name,'p0_21'))
P0[1,1] = float(config.get(name,'p0_22'))
# initial state mean
mu0 = np.zeros(2)
mu0[0] = float(config.get(name,'mux_1'))
mu0[1] = float(config.get(name,'mux_2'))
# load data
datain = np.genfromtxt(fpath + name+'_data.csv','float',delimiter=',')
## tsim: simulation time
tsim = datain[:,0]
inx = sorted( range(1,3*Ns+1,3) + range(2,3*Ns+1,3) )
## XK: nSteps x 2*Nsims array of state histories
XK = datain[:,inx]
## YK: nSteps x Nsims array of measurement of position
YK = datain[:,range(3,3*Ns+1,3)]
return (tsim,XK,YK,mu0,P0,Ns,dt,tf)
if __name__ == "__main__":
main() | gpl-2.0 |
mathemage/h2o-3 | py2/h2o_cmd.py | 20 | 16497 |
import h2o_nodes
from h2o_test import dump_json, verboseprint
import h2o_util
import h2o_print as h2p
from h2o_test import OutputObj
#************************************************************************
def runStoreView(node=None, **kwargs):
print "FIX! disabling runStoreView for now"
return {}
if not node: node = h2o_nodes.nodes[0]
print "\nStoreView:"
# FIX! are there keys other than frames and models
a = node.frames(**kwargs)
# print "storeview frames:", dump_json(a)
frameList = [af['key']['name'] for af in a['frames']]
for f in frameList:
print "frame:", f
print "# of frames:", len(frameList)
b = node.models()
# print "storeview models:", dump_json(b)
modelList = [bm['key'] for bm in b['models']]
for m in modelList:
print "model:", m
print "# of models:", len(modelList)
return {'keys': frameList + modelList}
#************************************************************************
def runExec(node=None, **kwargs):
if not node: node = h2o_nodes.nodes[0]
a = node.rapids(**kwargs)
return a
def runInspect(node=None, key=None, verbose=False, **kwargs):
if not key: raise Exception('No key for Inspect')
if not node: node = h2o_nodes.nodes[0]
a = node.frames(key, **kwargs)
if verbose:
print "inspect of %s:" % key, dump_json(a)
return a
#************************************************************************
def infoFromParse(parse):
if not parse:
raise Exception("parse is empty for infoFromParse")
# assumes just one result from Frames
if 'frames' not in parse:
raise Exception("infoFromParse expects parse= param from parse result: %s" % parse)
if len(parse['frames'])!=1:
raise Exception("infoFromParse expects parse= param from parse result: %s " % parse['frames'])
# it it index[0] or key '0' in a dictionary?
frame = parse['frames'][0]
# need more info about this dataset for debug
numCols = len(frame['columns'])
numRows = frame['rows']
key_name = frame['frame_id']['name']
return numRows, numCols, key_name
#************************************************************************
# make this be the basic way to get numRows, numCols
def infoFromInspect(inspect):
if not inspect:
raise Exception("inspect is empty for infoFromInspect")
# assumes just one result from Frames
if 'frames' not in inspect:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s" % inspect)
if len(inspect['frames'])!=1:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s " % inspect['frames'])
# it it index[0] or key '0' in a dictionary?
frame = inspect['frames'][0]
# need more info about this dataset for debug
columns = frame['columns']
key_name = frame['frame_id']['name']
missingList = []
labelList = []
typeList = []
for i, colDict in enumerate(columns): # columns is a list
if 'missing_count' not in colDict:
# debug
print "\ncolDict"
for k in colDict:
print " key: %s" % k
# data
# domain
# string_data
# type
# label
# percentiles
# precision
# mins
# maxs
# mean
# histogram_base
# histogram_bins
# histogram_stride
# zero_count
# missing_count
# positive_infinity_count
# negative_infinity_count
# __meta
mins = colDict['mins']
maxs = colDict['maxs']
missing = colDict['missing_count']
label = colDict['label']
stype = colDict['type']
missingList.append(missing)
labelList.append(label)
typeList.append(stype)
if missing!=0:
print "%s: col: %s %s, missing: %d" % (key_name, i, label, missing)
print "inspect typeList:", typeList
# make missingList empty if all 0's
if sum(missingList)==0:
missingList = []
# no type per col in inspect2
numCols = len(frame['columns'])
numRows = frame['rows']
print "\n%s numRows: %s, numCols: %s" % (key_name, numRows, numCols)
return missingList, labelList, numRows, numCols
#************************************************************************
# does all columns unless you specify column index.
# only will return first or specified column
def runSummary(node=None, key=None, column=None, expected=None, maxDelta=None, noPrint=False, **kwargs):
if not key: raise Exception('No key for Summary')
if not node: node = h2o_nodes.nodes[0]
# return node.summary(key, **kwargs)
i = InspectObj(key=key)
# just so I don't have to change names below
missingList = i.missingList
labelList = i.labelList
numRows = i.numRows
numCols = i.numCols
print "labelList:", labelList
assert labelList is not None
# doesn't take indices? only column labels?
# return first column, unless specified
if not (column is None or isinstance(column, (basestring, int))):
raise Exception("column param should be string or integer index or None %s %s" % (type(column), column))
# either return the first col, or the col indentified by label. the column identifed could be string or index?
if column is None: # means the summary json when we ask for col 0, will be what we return (do all though)
colNameToDo = labelList
colIndexToDo = range(len(labelList))
elif isinstance(column, int):
colNameToDo = [labelList[column]]
colIndexToDo = [column]
elif isinstance(column, basestring):
colNameToDo = [column]
if column not in labelList:
raise Exception("% not in labellist: %s" % (column, labellist))
colIndexToDo = [labelList.index(column)]
else:
raise Exception("wrong type %s for column %s" % (type(column), column))
# we get the first column as result after walking across all, if no column parameter
desiredResult = None
for (colIndex, colName) in zip(colIndexToDo, colNameToDo):
print "doing summary on %s %s" % (colIndex, colName)
# ugly looking up the colIndex
co = SummaryObj(key=key, colIndex=colIndex, colName=colName)
if not desiredResult:
desiredResult = co
if not noPrint:
for k,v in co:
# only print [0] of mins and maxs because of the e308 values when they don't have dataset values
if k=='mins' or k=='maxs':
print "%s[0]" % k, v[0]
else:
print k, v
if expected is not None:
print "len(co.histogram_bins):", len(co.histogram_bins)
print "co.label:", co.label, "mean (2 places):", h2o_util.twoDecimals(co.mean)
# what is precision. -1?
print "co.label:", co.label, "std dev. (2 places):", h2o_util.twoDecimals(co.sigma)
# print "FIX! hacking the co.percentiles because it's short by two"
# if co.percentiles:
# percentiles = [0] + co.percentiles + [0]
# else:
# percentiles = None
percentiles = co.percentiles
assert len(co.percentiles) == len(co.default_percentiles)
# the thresholds h2o used, should match what we expected
# expected = [0] * 5
# Fix. doesn't check for expected = 0?
# max of one bin
if maxDelta is None:
maxDelta = (co.maxs[0] - co.mins[0])/1000
if expected[0]: h2o_util.assertApproxEqual(co.mins[0], expected[0], tol=maxDelta,
msg='min is not approx. expected')
if expected[1]: h2o_util.assertApproxEqual(percentiles[2], expected[1], tol=maxDelta,
msg='25th percentile is not approx. expected')
if expected[2]: h2o_util.assertApproxEqual(percentiles[4], expected[2], tol=maxDelta,
msg='50th percentile (median) is not approx. expected')
if expected[3]: h2o_util.assertApproxEqual(percentiles[6], expected[3], tol=maxDelta,
msg='75th percentile is not approx. expected')
if expected[4]: h2o_util.assertApproxEqual(co.maxs[0], expected[4], tol=maxDelta,
msg='max is not approx. expected')
# figure out the expected max error
# use this for comparing to sklearn/sort
MAX_QBINS = 1000
if expected[0] and expected[4]:
expectedRange = expected[4] - expected[0]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
pt = h2o_util.twoDecimals(percentiles)
# only look at [0] for now...bit e308 numbers if unpopulated due to not enough unique values in dataset column
mx = h2o_util.twoDecimals(co.maxs[0])
mn = h2o_util.twoDecimals(co.mins[0])
print "co.label:", co.label, "co.percentiles (2 places):", pt
print "co.default_percentiles:", co.default_percentiles
print "co.label:", co.label, "co.maxs: (2 places):", mx
print "co.label:", co.label, "co.mins: (2 places):", mn
# FIX! why would percentiles be None? enums?
if pt is None:
compareActual = mn, [None] * 3, mx
else:
compareActual = mn, pt[2], pt[4], pt[6], mx
h2p.green_print("actual min/25/50/75/max co.label:", co.label, "(2 places):", compareActual)
h2p.green_print("expected min/25/50/75/max co.label:", co.label, "(2 places):", expected)
return desiredResult
# this parses the json object returned for one col from runSummary...returns an OutputObj object
# summaryResult = h2o_cmd.runSummary(key=hex_key, column=0)
# co = h2o_cmd.infoFromSummary(summaryResult)
# print co.label
# legacy
def infoFromSummary(summaryResult, column=None):
return SummaryObj(summaryResult, column=column)
class ParseObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, parseResult, expectedNumRows=None, expectedNumCols=None, noPrint=False, **kwargs):
super(ParseObj, self).__init__(parseResult['frames'][0], "Parse", noPrint=noPrint)
# add my stuff
self.numRows, self.numCols, self.parse_key = infoFromParse(parseResult)
# h2o_import.py does this for test support
if 'python_elapsed' in parseResult:
self.python_elapsed = parseResult['python_elapsed']
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
print "ParseObj created for:", self.parse_key # vars(self)
# Let's experiment with creating new objects that are an api I control for generic operations (Inspect)
class InspectObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, key,
expectedNumRows=None, expectedNumCols=None, expectedMissingList=None, expectedLabelList=None,
noPrint=False, **kwargs):
inspectResult = runInspect(key=key)
super(InspectObj, self).__init__(inspectResult['frames'][0], "Inspect", noPrint=noPrint)
# add my stuff
self.missingList, self.labelList, self.numRows, self.numCols = infoFromInspect(inspectResult)
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
if expectedMissingList is not None:
assert self.missingList == expectedMissingList, "%s %s" % (self.MissingList, expectedMissingList)
if expectedLabelList is not None:
assert self.labelList == expectedLabelList, "%s %s" % (self.labelList, expectedLabelList)
print "InspectObj created for:", key #, vars(self)
class SummaryObj(OutputObj):
@classmethod
def check(self,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, **kwargs):
if expectedLabel is not None:
assert self.label != expectedLabel
if expectedType is not None:
assert self.type != expectedType
if expectedMissing is not None:
assert self.missing != expectedMissing
if expectedDomain is not None:
assert self.domain != expectedDomain
if expectedBinsSum is not None:
assert self.binsSum != expectedBinsSum
# column is column name?
def __init__(self, key, colIndex, colName,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, timeoutSecs=30, **kwargs):
# we need both colInndex and colName for doing Summary efficiently
# ugly.
assert colIndex is not None
assert colName is not None
summaryResult = h2o_nodes.nodes[0].summary(key=key, column=colName, timeoutSecs=timeoutSecs, **kwargs)
# this should be the same for all the cols? Or does the checksum change?
frame = summaryResult['frames'][0]
default_percentiles = frame['default_percentiles']
checksum = frame['checksum']
rows = frame['rows']
# assert colIndex < len(frame['columns']), "You're asking for colIndex %s but there are only %s. " % \
# (colIndex, len(frame['columns']))
# coJson = frame['columns'][colIndex]
# is it always 0 now? the one I asked for ?
coJson = frame['columns'][0]
assert checksum !=0 and checksum is not None
assert rows!=0 and rows is not None
# FIX! why is frame['key'] = None here?
# assert frame['key'] == key, "%s %s" % (frame['key'], key)
super(SummaryObj, self).__init__(coJson, "Summary for %s" % colName, noPrint=noPrint)
# how are enums binned. Stride of 1? (what about domain values)
# touch all
# print "vars", vars(self)
coList = [
len(self.data),
self.domain,
self.string_data,
self.type,
self.label,
self.percentiles,
self.precision,
self.mins,
self.maxs,
self.mean,
self.histogram_base,
len(self.histogram_bins),
self.histogram_stride,
self.zero_count,
self.missing_count,
self.positive_infinity_count,
self.negative_infinity_count,
]
assert self.label==colName, "%s You must have told me the wrong colName %s for the given colIndex %s" % \
(self.label, colName, colIndex)
print "you can look at this attributes in the returned object (which is OutputObj if you assigned to 'co')"
for k,v in self:
print "%s" % k,
# hack these into the column object from the full summary
self.default_percentiles = default_percentiles
self.checksum = checksum
self.rows = rows
print "\nSummaryObj for", key, "for colName", colName, "colIndex:", colIndex
print "SummaryObj created for:", key # vars(self)
# now do the assertion checks
self.check(expectedNumRows, expectedNumCols,
expectedLabel, expectedType, expectedMissing, expectedDomain, expectedBinsSum,
noPrint=noPrint, **kwargs)
| apache-2.0 |
jor-/scipy | scipy/interpolate/interpolate.py | 4 | 97600 | from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
import warnings
import functools
import operator
import numpy as np
from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
ravel, poly1d, asarray, intp)
import scipy.special as spec
from scipy.special import comb
from scipy._lib.six import xrange, integer_types, string_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
from ._bsplines import make_interp_spline, BSpline
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
r"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : `numpy.poly1d` instance
The Lagrange interpolating polynomial.
Examples
--------
Interpolate :math:`f(x) = x^3` by 3 points.
>>> from scipy.interpolate import lagrange
>>> x = np.array([0, 1, 2])
>>> y = x**3
>>> poly = lagrange(x, y)
Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly,
it is given by
.. math::
\begin{aligned}
L(x) &= 1\times \frac{x (x - 2)}{-1} + 8\times \frac{x (x-1)}{2} \\
&= x (-2 + 3x)
\end{aligned}
>>> from numpy.polynomial.polynomial import Polynomial
>>> Polynomial(poly).coef
array([ 3., -2., 0.])
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated via nearest-neighbor extrapolation.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, string_types) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Note that calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'previous', 'next', where 'zero', 'slinear', 'quadratic' and 'cubic'
refer to a spline interpolation of zeroth, first, second or third
order; 'previous' and 'next' simply return the previous or next value
of the point) or as an integer specifying the order of the spline
interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless ``fill_value="extrapolate"``.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated.
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Attributes
----------
fill_value
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest', 'previous', 'next'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: https://docs.python.org/reference/datamodel.html
if kind in ('linear', 'nearest', 'previous', 'next'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer
# overflow
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
elif kind == 'previous':
# Side for np.searchsorted and index for clipping
self._side = 'left'
self._ind = 0
# Move x by one floating point value to the left
self._x_shift = np.nextafter(self.x, -np.inf)
self._call = self.__class__._call_previousnext
elif kind == 'next':
self._side = 'right'
self._ind = 1
# Move x by one floating point value to the right
self._x_shift = np.nextafter(self.x, np.inf)
self._call = self.__class__._call_previousnext
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
rewrite_nan = False
xx, yy = self.x, self._y
if order > 1:
# Quadratic or cubic spline. If input contains even a single
# nan, then the output is all nans. We cannot just feed data
# with nans to make_interp_spline because it calls LAPACK.
# So, we make up a bogus x and y with no nans and use it
# to get the correct shape of the output, which we then fill
# with nans.
# For slinear or zero order spline, we just pass nans through.
if np.isnan(self.x).any():
xx = np.linspace(min(self.x), max(self.x), len(self.x))
rewrite_nan = True
if np.isnan(self._y).any():
yy = np.ones_like(self._y)
rewrite_nan = True
self._spline = make_interp_spline(xx, yy, k=order,
check_finite=False)
if rewrite_nan:
self._call = self.__class__._call_nan_spline
else:
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
"""The fill value."""
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the original data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_previousnext(self, x_new):
"""Use previous/next neighbour of x_new, y_new = f(x_new)."""
# 1. Get index of left/right value
x_new_indices = searchsorted(self._x_shift, x_new, side=self._side)
# 2. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(1-self._ind,
len(self.x)-self._ind).astype(intp)
# 3. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices+self._ind-1]
return y_new
def _call_spline(self, x_new):
return self._spline(x_new)
def _call_nan_spline(self, x_new):
out = self._spline(x_new)
out[...] = np.nan
return out
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase(object):
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if self.c.ndim < 2:
raise ValueError("Coefficients array must be at least "
"2-dimensional.")
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("axis=%s must be between 0 and %s" %
(axis, self.c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
``c`` and ``x`` must be arrays of the correct shape and type. The
``c`` array can only be of dtypes float and complex, and ``x``
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
``self.x`` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
``self.x`` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu, :].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.solve()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep` or a BSpline object.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
if extrapolate is None:
extrapolate = tck.extrapolate
else:
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if not isinstance(bp, BPoly):
raise TypeError(".from_bernstein_basis only accepts BPoly instances. "
"Got %s instead." % type(bp))
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**a * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature,
see for example [1]_ [2]_ [3]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the
# breakpoint). Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if not isinstance(pp, PPoly):
raise TypeError(".from_power_basis only accepts PPoly instances. "
"Got %s instead." % type(pp))
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
``x = 1`` and ``x = 2``.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (integer_types, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
mesg = ("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" % (
xi[i], len(y1), xi[i+1], len(y2), orders[i]))
raise ValueError(mesg)
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
r"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on ``[xa, xb]`` and having the values and derivatives at the
endpoints `xa` and `xb` as specified by `ya`` and `yb`.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of `ya` and `yb` are `na` and `nb`, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at `xa`. `ya[0]` is the value of the function, and
`ya[i]` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at `xb`.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At ``x = xb`` it's the same with ``a = n - q``.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
r"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class NdPPoly(object):
"""
Piecewise tensor product polynomial
The value at point ``xp = (x', y', z', ...)`` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
``c`` and ``x`` must be arrays of the correct shape and type. The
``c`` array can only be of dtypes float and complex, and ``x``
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[tuple(sl)]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[tuple(sl)]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1-dimensional, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# reuse 1D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
If any of `points` have a dimension of size 1, linear interpolation will
return an array of `nan` values. Nearest-neighbor interpolation will work
as usual in this case.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Wikipedia, "Trilinear interpolation",
https://en.wikipedia.org/wiki/Trilinear_interpolation
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices,
norm_distances,
out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices,
norm_distances,
out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = [np.where(yi <= .5, i, i + 1)
for i, yi in zip(indices, norm_distances)]
return self.values[tuple(idx_res)]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class _ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("_ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
| bsd-3-clause |
hainm/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
fibbo/DIRAC | Core/Utilities/Graphs/GraphUtilities.py | 10 | 14613 | ########################################################################
# $HeadURL$
########################################################################
""" GraphUtilities is a a collection of utility functions and classes used
in the DIRAC Graphs package.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
import types, time, datetime, calendar, math, pytz, numpy, os
from matplotlib.ticker import ScalarFormatter
from matplotlib.dates import AutoDateLocator, AutoDateFormatter, DateFormatter, RRuleLocator, \
rrulewrapper, HOURLY, MINUTELY, SECONDLY, YEARLY, MONTHLY, DAILY
from dateutil.relativedelta import relativedelta
def evalPrefs( *args, **kw ):
""" Interpret arguments as preferencies dictionaries or key-value pairs. The overriding order
is right most - most important one. Returns a single dictionary of preferencies
"""
prefs = {}
for pDict in list( args ) + [kw]:
if type( pDict ) == types.DictType:
for key in pDict:
if key == "metadata":
for mkey in pDict[key]:
prefs[mkey] = pDict[key][mkey]
else:
prefs[key] = pDict[key]
return prefs
def pixelToPoint( size, dpi ):
""" Convert size expressed in pixels into points for a given dpi resolution
"""
return float( size ) * 100. / float( dpi )
datestrings = ['%x %X', '%x', '%Y-%m-%d %H:%M:%S']
def convert_to_datetime( string ):
orig_string = str( string )
try:
if type( string ) == datetime.datetime:
results = string
else:
results = eval( str( string ), {'__builtins__':None, 'time':time, 'math':math}, {} )
if type( results ) == types.FloatType or type( results ) == types.IntType:
results = datetime.datetime.fromtimestamp( int( results ) )
elif type( results ) == datetime.datetime:
pass
else:
raise ValueError( "Unknown datetime type!" )
except Exception, e:
t = None
for dateformat in datestrings:
try:
t = time.strptime( string, dateformat )
timestamp = calendar.timegm( t ) #-time.timezone
results = datetime.datetime.fromtimestamp( timestamp )
break
except:
pass
if t == None:
try:
string = string.split( '.', 1 )[0]
t = time.strptime( string, dateformat )
timestamp = time.mktime( t ) #-time.timezone
results = datetime.datetime.fromtimestamp( timestamp )
except:
raise
raise ValueError( "Unable to create time from string!\nExpecting " \
"format of: '12/06/06 12:54:67'\nRecieved:%s" % orig_string )
return results
def to_timestamp( val ):
try:
v = float( val )
if v > 1000000000 and v < 1900000000:
return v
except:
pass
val = convert_to_datetime( val )
#return calendar.timegm( val.timetuple() )
return time.mktime( val.timetuple() )
# If the graph has more than `hour_switch` minutes, we print
# out hours in the subtitle.
hour_switch = 7
# If the graph has more than `day_switch` hours, we print
# out days in the subtitle.
day_switch = 7
# If the graph has more than `week_switch` days, we print
# out the weeks in the subtitle.
week_switch = 7
def add_time_to_title( begin, end, metadata = {} ):
""" Given a title and two times, adds the time info to the title.
Example results:
"Number of Attempted Transfers\n(24 Hours from 4:45 12-14-2006 to
5:56 12-15-2006)"
There are two important pieces to the subtitle we add - the duration
(i.e., '48 Hours') and the time interval (i.e., 11:00 07-02-2007 to
11:00 07-04-2007).
We attempt to make the duration match the size of the span (for a bar
graph, this would be the width of the individual bar) in order for it
to make the most sense. The formatting of the time interval is based
upon how much real time there is from the beginning to the end.
We made the distinction because some would want to show graphs
representing 168 Hours, but needed the format to show the date as
well as the time.
"""
if 'span' in metadata:
interval = metadata['span']
else:
interval = time_interval( begin, end )
formatting_interval = time_interval( begin, end )
if formatting_interval == 600:
format_str = '%H:%M:%S'
elif formatting_interval == 3600:
format_str = '%Y-%m-%d %H:%M'
elif formatting_interval == 86400:
format_str = '%Y-%m-%d'
elif formatting_interval == 86400 * 7:
format_str = 'Week %U of %Y'
if interval < 600:
format_name = 'Seconds'
time_slice = 1
elif interval < 3600 and interval >= 600:
format_name = 'Minutes'
time_slice = 60
elif interval >= 3600 and interval < 86400:
format_name = 'Hours'
time_slice = 3600
elif interval >= 86400 and interval < 86400 * 7:
format_name = 'Days'
time_slice = 86400
elif interval >= 86400 * 7:
format_name = 'Weeks'
time_slice = 86400 * 7
else:
format_str = '%x %X'
format_name = 'Seconds'
time_slice = 1
begin_tuple = time.localtime( begin )
end_tuple = time.localtime( end )
added_title = '%i %s from ' % ( int( ( end - begin ) / time_slice ), format_name )
added_title += time.strftime( '%s to' % format_str, begin_tuple )
if time_slice < 86400:
add_utc = ' UTC'
else:
add_utc = ''
added_title += time.strftime( ' %s%s' % ( format_str, add_utc ), end_tuple )
return added_title
def time_interval( begin, end ):
"""
Determine the appropriate time interval based upon the length of
time as indicated by the `starttime` and `endtime` keywords.
"""
if end - begin < 600 * hour_switch:
return 600
if end - begin < 86400 * day_switch:
return 3600
elif end - begin < 86400 * 7 * week_switch:
return 86400
else:
return 86400 * 7
def comma_format( x_orig ):
x = float( x_orig )
if x >= 1000:
after_comma = x % 1000
before_comma = int( x ) / 1000
return '%s,%03g' % ( comma_format( before_comma ), after_comma )
else:
return str( x_orig )
class PrettyScalarFormatter( ScalarFormatter ):
def _set_orderOfMagnitude( self, range ):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the offset
locs = numpy.absolute( self.locs )
if self.offset: oom = math.floor( math.log10( range ) )
else:
if locs[0] > locs[-1]:
val = locs[0]
else:
val = locs[-1]
if val == 0:
oom = 0
else:
oom = math.floor( math.log10( val ) )
if oom <= -7:
self.orderOfMagnitude = oom
elif oom >= 9:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def pprint_val( self, x ):
pstring = ScalarFormatter.pprint_val( self, x )
return comma_format( pstring )
class PrettyDateFormatter( AutoDateFormatter ):
""" This class provides a formatter which conforms to the
desired date formates for the Phedex system.
"""
def __init__( self, locator ):
tz = pytz.timezone( 'UTC' )
AutoDateFormatter.__init__( self, locator, tz = tz )
def __call__( self, x, pos = 0 ):
scale = float( self._locator._get_unit() )
if ( scale == 365.0 ):
self._formatter = DateFormatter( "%Y", self._tz )
elif ( scale == 30.0 ):
self._formatter = DateFormatter( "%b %Y", self._tz )
elif ( ( scale >= 1.0 ) and ( scale <= 7.0 ) ):
self._formatter = DateFormatter( "%Y-%m-%d", self._tz )
elif ( scale == ( 1.0 / 24.0 ) ):
self._formatter = DateFormatter( "%H:%M", self._tz )
elif ( scale == ( 1.0 / ( 24 * 60 ) ) ):
self._formatter = DateFormatter( "%H:%M", self._tz )
elif ( scale == ( 1.0 / ( 24 * 3600 ) ) ):
self._formatter = DateFormatter( "%H:%M:%S", self._tz )
else:
self._formatter = DateFormatter( "%b %d %Y %H:%M:%S", self._tz )
return self._formatter( x, pos )
class PrettyDateLocator( AutoDateLocator ):
def get_locator( self, dmin, dmax ):
'pick the best locator based on a distance'
delta = relativedelta( dmax, dmin )
numYears = ( delta.years * 1.0 )
numMonths = ( numYears * 12.0 ) + delta.months
numDays = ( numMonths * 31.0 ) + delta.days
numHours = ( numDays * 24.0 ) + delta.hours
numMinutes = ( numHours * 60.0 ) + delta.minutes
numSeconds = ( numMinutes * 60.0 ) + delta.seconds
numticks = 5
# self._freq = YEARLY
interval = 1
bymonth = 1
bymonthday = 1
byhour = 0
byminute = 0
bysecond = 0
if ( numYears >= numticks ):
self._freq = YEARLY
elif ( numMonths >= numticks ):
self._freq = MONTHLY
bymonth = range( 1, 13 )
if ( ( 0 <= numMonths ) and ( numMonths <= 14 ) ):
interval = 1 # show every month
elif ( ( 15 <= numMonths ) and ( numMonths <= 29 ) ):
interval = 3 # show every 3 months
elif ( ( 30 <= numMonths ) and ( numMonths <= 44 ) ):
interval = 4 # show every 4 months
else: # 45 <= numMonths <= 59
interval = 6 # show every 6 months
elif ( numDays >= numticks ):
self._freq = DAILY
bymonth = None
bymonthday = range( 1, 32 )
if ( ( 0 <= numDays ) and ( numDays <= 9 ) ):
interval = 1 # show every day
elif ( ( 10 <= numDays ) and ( numDays <= 19 ) ):
interval = 2 # show every 2 days
elif ( ( 20 <= numDays ) and ( numDays <= 35 ) ):
interval = 3 # show every 3 days
elif ( ( 36 <= numDays ) and ( numDays <= 80 ) ):
interval = 7 # show every 1 week
else: # 100 <= numDays <= ~150
interval = 14 # show every 2 weeks
elif ( numHours >= numticks ):
self._freq = HOURLY
bymonth = None
bymonthday = None
byhour = range( 0, 24 ) # show every hour
if ( ( 0 <= numHours ) and ( numHours <= 14 ) ):
interval = 1 # show every hour
elif ( ( 15 <= numHours ) and ( numHours <= 30 ) ):
interval = 2 # show every 2 hours
elif ( ( 30 <= numHours ) and ( numHours <= 45 ) ):
interval = 3 # show every 3 hours
elif ( ( 45 <= numHours ) and ( numHours <= 68 ) ):
interval = 4 # show every 4 hours
elif ( ( 68 <= numHours ) and ( numHours <= 90 ) ):
interval = 6 # show every 6 hours
else: # 90 <= numHours <= 120
interval = 12 # show every 12 hours
elif ( numMinutes >= numticks ):
self._freq = MINUTELY
bymonth = None
bymonthday = None
byhour = None
byminute = range( 0, 60 )
if ( numMinutes > ( 10.0 * numticks ) ):
interval = 10
# end if
elif ( numSeconds >= numticks ):
self._freq = SECONDLY
bymonth = None
bymonthday = None
byhour = None
byminute = None
bysecond = range( 0, 60 )
if ( numSeconds > ( 10.0 * numticks ) ):
interval = 10
# end if
else:
# do what?
# microseconds as floats, but floats from what reference point?
pass
rrule = rrulewrapper( self._freq, interval = interval, \
dtstart = dmin, until = dmax, \
bymonth = bymonth, bymonthday = bymonthday, \
byhour = byhour, byminute = byminute, \
bysecond = bysecond )
locator = RRuleLocator( rrule, self.tz )
locator.set_axis( self.axis )
locator.set_view_interval( *self.axis.get_view_interval() )
locator.set_data_interval( *self.axis.get_data_interval() )
return locator
def pretty_float( num ):
if num > 1000:
return comma_format( int( num ) )
try:
floats = int( max( 2 - max( numpy.floor( numpy.log( abs( num ) + 1e-3 ) / numpy.log( 10. ) ), 0 ), 0 ) )
except:
floats = 2
format = "%." + str( floats ) + "f"
if type( num ) == types.TupleType:
return format % float( num[0] )
else:
try:
retval = format % float( num )
except:
raise Exception( "Unable to convert %s into a float." % ( str( num ) ) )
return retval
def statistics( results, span = None, is_timestamp = False ):
results = dict( results )
if span != None:
parsed_data = {}
min_key = min( results.keys() )
max_key = max( results.keys() )
for i in range( min_key, max_key + span, span ):
if i in results:
parsed_data[i] = results[i]
del results[i]
else:
parsed_data[i] = 0.0
if len( results ) > 0:
raise Exception( "Unable to use all the values for the statistics" )
else:
parsed_data = results
values = parsed_data.values()
data_min = min( values )
data_max = max( values )
data_avg = numpy.average( values )
if is_timestamp:
current_time = max( parsed_data.keys() )
data_current = parsed_data[ current_time ]
return data_min, data_max, data_avg, data_current
else:
return data_min, data_max, data_avg
def makeDataFromCSV( csv ):
""" Generate plot data dictionary from a csv file or string
"""
if os.path.exists( csv ):
fdata = open( csv, 'r' )
flines = fdata.readlines()
fdata.close()
else:
flines = csv.split( '\n' )
graph_data = {}
labels = flines[0].strip().split( ',' )
if len( labels ) == 2:
# simple plot data
for line in flines:
line = line.strip()
if line[0] != '#':
key, value = line.split( ',' )
graph_data[key] = value
elif len( flines ) == 2:
values = flines[1].strip().split( ',' )
for key,value in zip(labels,values):
graph_data[key] = value
elif len( labels ) > 2:
# stacked graph data
del labels[0]
del flines[0]
for label in labels:
plot_data = {}
index = labels.index( label ) + 1
for line in flines:
values = line.strip().split( ',' )
value = values[index].strip()
#if value:
plot_data[values[0]] = values[index]
#else:
#plot_data[values[0]] = '0.'
#pass
graph_data[label] = dict( plot_data )
return graph_data
def darkenColor( color, factor=2 ):
c1 = int( color[1:3], 16 )
c2 = int( color[3:5], 16 )
c3 = int( color[5:7], 16 )
c1 /= factor
c2 /= factor
c3 /= factor
result = '#' + (str( hex( c1) ).replace( '0x', '' ).zfill( 2 ) +
str( hex( c2) ).replace( '0x', '' ).zfill( 2 ) +
str( hex( c3) ).replace( '0x', '' ).zfill( 2 ) )
return result
| gpl-3.0 |
PedroTrujilloV/nest-simulator | testsuite/manualtests/stdp_dopa_check.py | 14 | 10098 | # -*- coding: utf-8 -*-
#
# stdp_dopa_check.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from matplotlib.pylab import *
import numpy as n
# Test script to reproduce changes in weight of a dopamine modulated STDP synapse in an event-driven way.
# Pre- and post-synaptic spike trains are read in from spikes-6-0.gdf
# (output of test_stdp_dopa.py).
# output: pre/post/dopa \t spike time \t weight
#
# Synaptic dynamics for dopamine modulated STDP synapses as used in [1], based on [2]
#
# References:
# [1] Potjans W, Morrison A and Diesmann M (2010). Enabling functional neural circuit simulations with distributed computing of neuromodulated plasticity. Front. Comput. Neurosci. 4:141. doi:10.3389/fncom.2010.00141
# [2] Izhikevich, E. M. (2007). Solving the distal reward problem through linkage of STDP and dopamine signaling. Cereb. Cortex 17(10), 2443-2452.
#
# author: Wiebke Potjans, October 2010
def stdp_dopa(w_init, pre_spikes, post_spikes, dopa_spikes, tau_e, tau_d, A_minus, A_plus, tau_plus, tau_minus, dendritic_delay, delay_d):
w = w_init # initial weight
w_min = 0. # minimal weight
w_max = 200. #maximal weight
i=0 # index of presynaptic spike
j=0 # index of postsynaptic spike
k=0 # index of dopamine spike
last_post_spike = dendritic_delay
Etrace = 0.
Dtrace = 0.
last_e_update = 0.
last_w_update = 0.
last_pre_spike = 0.
last_dopa_spike = 0.
advance = True
while advance:
advance = False
# next spike is presynaptic
if ((pre_spikes[i] < post_spikes[j]) and (pre_spikes[i] < dopa_spikes[k])):
dt = pre_spikes[i] - last_post_spike
# weight update
w = w + Etrace * Dtrace / (1./tau_e+1./tau_d) *(exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d)-exp((last_e_update-pre_spikes[i])/tau_e)*exp((last_dopa_spike-pre_spikes[i])/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "pre\t%.4f\t%.4f" % (pre_spikes[i],w)
last_w_update = pre_spikes[i]
Etrace = Etrace * exp((last_e_update - pre_spikes[i])/tau_e) - A_minus*exp(-dt/tau_minus)
last_e_update = pre_spikes[i]
last_pre_spike = pre_spikes[i]
if i < len(pre_spikes) - 1:
i += 1
advance = True
# next spike is postsynaptic
if( (post_spikes[j] < pre_spikes[i]) and (post_spikes[j] < dopa_spikes[k])):
dt = post_spikes[j] - last_pre_spike
# weight update
w = w - Etrace * Dtrace / (1./tau_e+1./tau_d)*(exp((last_e_update-post_spikes[j])/tau_e)*exp((last_dopa_spike-post_spikes[j])/tau_d)-exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "post\t%.4f\t%.4f" % (post_spikes[j],w)
last_w_update = post_spikes[j]
Etrace = Etrace * exp((last_e_update - post_spikes[j])/tau_e) + A_plus*exp(-dt/tau_plus)
last_e_update = post_spikes[j]
last_post_spike = post_spikes[j]
if j < len(post_spikes) - 1:
j += 1
advance = True
# next spike is dopamine spike
if ((dopa_spikes[k] < pre_spikes[i]) and (dopa_spikes[k] < post_spikes[j])):
# weight update
w = w - Etrace * Dtrace / (1./tau_e+1./tau_d) *(exp((last_e_update-dopa_spikes[k])/tau_e)*exp((last_dopa_spike-dopa_spikes[k])/tau_d)-exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "dopa\t%.4f\t%.4f" % (dopa_spikes[k],w)
last_w_update = dopa_spikes[k]
Dtrace = Dtrace * exp((last_dopa_spike - dopa_spikes[k])/tau_d) + 1/tau_d
last_dopa_spike = dopa_spikes[k]
if k < len(dopa_spikes) - 1:
k += 1
advance = True
if(dopa_spikes[k]==dopa_spikes[k-1]):
advance = False
Dtrace = Dtrace + 1/tau_d
if k < len(dopa_spikes) - 1:
k += 1
advance = True
# pre and postsynaptic spikes are at the same time
# Etrace is not updated for this case; therefore no weight update is required
if ((pre_spikes[i]==post_spikes[j]) and (pre_spikes[i] < dopa_spikes[k])):
if i < len(pre_spikes) - 1:
i += 1
advance = True
if j < len(post_spikes) -1:
j +=1
advance = True
# presynaptic spike and dopamine spike are at the same time
if ((pre_spikes[i]==dopa_spikes[k]) and (pre_spikes[i] < post_spikes[j])):
dt = pre_spikes[i] - last_post_spike
w = w + Etrace * Dtrace / (1./tau_e+1./tau_d) *(exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d)-exp((last_e_update-pre_spikes[i])/tau_e)*exp((last_dopa_spike-pre_spikes[i])/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "pre\t%.4f\t%.4f" % (pre_spikes[i],w)
last_w_update = pre_spikes[i]
Etrace = Etrace * exp((last_e_update - pre_spikes[i])/tau_e) - A_minus*exp(-dt/tau_minus)
last_e_update = pre_spikes[i]
last_pre_spike = pre_spikes[i]
if i < len(pre_spikes) - 1:
i += 1
advance = True
Dtrace = Dtrace * exp((last_dopa_spike - dopa_spikes[k])/tau_d) + 1/tau_d
last_dopa_spike = dopa_spikes[k]
if k < len(dopa_spikes) - 1:
k += 1
advance = True
# postsynaptic spike and dopamine spike are at the same time
if ((post_spikes[j]==dopa_spikes[k]) and (post_spikes[j] < pre_spikes[i])):
# weight update
w = w - Etrace * Dtrace / (1./tau_e+1./tau_d)*(exp((last_e_update-post_spikes[j])/tau_e)*exp((last_dopa_spike-post_spikes[j])/tau_d)-exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "post\t%.4f\t%.4f" % (post_spikes[j],w)
last_w_update = post_spikes[j]
Etrace = Etrace * exp((last_e_update - post_spikes[j])/tau_e) + A_plus*exp(-dt/tau_plus)
last_e_update = post_spikes[j]
last_post_spike = post_spikes[j]
if j < len(post_spikes) - 1:
j += 1
advance = True
Dtrace = Dtrace * exp((last_dopa_spike - dopa_spikes[k])/tau_d) + 1/tau_d
last_dopa_spike = dopa_spikes[k]
if k < len(dopa_spikes) - 1:
k += 1
advance = True
# all three spikes are at the same time
if ((post_spikes[j]==dopa_spikes[k]) and (post_spikes[j]==pre_spikes[i])):
# weight update
w = w - Etrace * Dtrace / (1./tau_e+1./tau_d) *(exp((last_e_update-dopa_spikes[k])/tau_e)*exp((last_dopa_spike-dopa_spikes[k])/tau_d)-exp((last_e_update-last_w_update)/tau_e)*exp((last_dopa_spike-last_w_update)/tau_d))
if(w<w_min):
w=w_min
if(w>w_max):
w=w_max
print "dopa\t%.4f\t%.4f" % (dopa_spikes[k],w)
last_w_update = dopa_spikes[k]
Dtrace = Dtrace * exp((last_dopa_spike - dopa_spikes[k])/tau_d) + 1/tau_d
last_dopa_spike = dopa_spikes[k]
if k < len(dopa_spikes) - 1:
k += 1
advance = True
if(dopa_spikes[k]==dopa_spikes[k-1]):
advance = False
Dtrace = Dtrace + 1/tau_d
if k < len(dopa_spikes) - 1:
k += 1
advance = True
return w
# stdp dopa parameters
w_init = 35.
tau_plus = 20.
tau_minus = 15.
tau_e = 1000.
tau_d = 200.
A_minus = 1.5
A_plus = 1.0
dendritic_delay = 1.0
delay_d = 1.
# load spikes from simulation with test_stdp_dopa.py
spikes = n.loadtxt("spikes-3-0.gdf")
pre_spikes = spikes[find(spikes[:,0]==4),1]
# delay is purely dendritic
# postsynaptic spike arrives at sp_j + dendritic_delay at the synapse
post_spikes =spikes[find(spikes[:,0]==5),1] + dendritic_delay
# dopa spike arrives at sp_j + delay_d at the synapse
dopa_spikes = spikes[find(spikes[:,0]==6),1] + delay_d
# calculate development of stdp weight
w = stdp_dopa(w_init, pre_spikes, post_spikes, dopa_spikes, tau_e, tau_d, A_minus, A_plus, tau_plus, tau_minus, dendritic_delay, delay_d)
print w
| gpl-2.0 |
louispotok/pandas | pandas/tests/frame/test_operators.py | 1 | 42821 | # -*- coding: utf-8 -*-
from __future__ import print_function
from collections import deque
from datetime import datetime
import operator
import pytest
from numpy import nan, random
import numpy as np
from pandas.compat import range
from pandas import compat
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.core.common as com
import pandas.io.formats.printing as printing
import pandas as pd
from pandas.util.testing import (assert_numpy_array_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import (TestData, _check_mixed_float,
_check_mixed_int)
class TestDataFrameOperators(TestData):
def test_operators(self):
garbage = random.random(4)
colSeries = Series(garbage, index=np.array(self.frame.columns))
idSum = self.frame + self.frame
seriesSum = self.frame + colSeries
for col, series in compat.iteritems(idSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] * 2
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
for col, series in compat.iteritems(seriesSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] + colSeries[col]
if not np.isnan(val):
assert val == origVal
else:
assert np.isnan(origVal)
added = self.frame2 + self.frame2
expected = self.frame2 * 2
assert_frame_equal(added, expected)
df = DataFrame({'a': ['a', None, 'b']})
assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))
# Test for issue #10181
for dtype in ('float', 'int64'):
frames = [
DataFrame(dtype=dtype),
DataFrame(columns=['A'], dtype=dtype),
DataFrame(index=[0], dtype=dtype),
]
for df in frames:
assert (df + df).equals(df)
assert_frame_equal(df + df, df)
def test_ops_np_scalar(self):
vals, xs = np.random.rand(5, 3), [nan, 7, -23, 2.718, -3.14, np.inf]
f = lambda x: DataFrame(x, index=list('ABCDE'),
columns=['jim', 'joe', 'jolie'])
df = f(vals)
for x in xs:
assert_frame_equal(df / np.array(x), f(vals / x))
assert_frame_equal(np.array(x) * df, f(vals * x))
assert_frame_equal(df + np.array(x), f(vals + x))
assert_frame_equal(np.array(x) - df, f(x - vals))
def test_operators_boolean(self):
# GH 5808
# empty frames, non-mixed dtype
result = DataFrame(index=[1]) & DataFrame(index=[1])
assert_frame_equal(result, DataFrame(index=[1]))
result = DataFrame(index=[1]) | DataFrame(index=[1])
assert_frame_equal(result, DataFrame(index=[1]))
result = DataFrame(index=[1]) & DataFrame(index=[1, 2])
assert_frame_equal(result, DataFrame(index=[1, 2]))
result = DataFrame(index=[1], columns=['A']) & DataFrame(
index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(index=[1], columns=['A']))
result = DataFrame(True, index=[1], columns=['A']) & DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
result = DataFrame(True, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
# boolean ops
result = DataFrame(1, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(1, index=[1], columns=['A']))
def f():
DataFrame(1.0, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
pytest.raises(TypeError, f)
def f():
DataFrame('foo', index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
pytest.raises(TypeError, f)
def test_operators_none_as_na(self):
df = DataFrame({"col1": [2, 5.0, 123, None],
"col2": [1, 2, 3, 4]}, dtype=object)
ops = [operator.add, operator.sub, operator.mul, operator.truediv]
# since filling converts dtypes from object, changed expected to be
# object
for op in ops:
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
assert_frame_equal(result, expected, check_dtype=False)
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
pytest.raises(TypeError, lambda: x == y)
pytest.raises(TypeError, lambda: x != y)
pytest.raises(TypeError, lambda: x >= y)
pytest.raises(TypeError, lambda: x > y)
pytest.raises(TypeError, lambda: x < y)
pytest.raises(TypeError, lambda: x <= y)
# GH4968
# invalid date/int comparisons
df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
df['dates'] = date_range('20010101', periods=len(df))
df2 = df.copy()
df2['dates'] = df['a']
check(df, df2)
df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])
df2 = DataFrame({'a': date_range('20010101', periods=len(
df)), 'b': date_range('20100101', periods=len(df))})
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
df = DataFrame({'dates1': date_range('20010101', periods=10),
'dates2': date_range('20010102', periods=10),
'intcol': np.random.randint(1000000000, size=10),
'floatcol': np.random.randn(10),
'stringcol': list(tm.rands(10))})
df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(df, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), df)
assert_frame_equal(result, expected)
# nats
expected = left_f(df, Timestamp('nat'))
result = right_f(Timestamp('nat'), df)
assert_frame_equal(result, expected)
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(op(df1.values, df2.values), index=df1.index,
columns=df1.columns)
assert result.values.dtype == np.bool_
assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index,
columns=df1.columns)
assert result.values.dtype == np.bool_
assert_frame_equal(result, expected)
df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': False, 'b': False, 'c': True,
'd': False, 'e': False},
'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}
df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'd': {'a': False, 'b': False, 'c': False,
'd': True, 'e': False},
'e': {'a': False, 'b': False, 'c': False,
'd': False, 'e': True}}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
# operator.neg is deprecated in numpy >= 1.9
_check_unary_op(operator.inv)
@pytest.mark.parametrize('op,res', [('__eq__', False),
('__ne__', True)])
def test_logical_typeerror_with_non_valid(self, op, res):
# we are comparing floats vs a string
result = getattr(self.frame, op)('foo')
assert bool(result.all().all()) is res
def test_logical_with_nas(self):
d = DataFrame({'a': [np.nan, False], 'b': [True, True]})
# GH4947
# bool comparisons should return bool
result = d['a'] | d['b']
expected = Series([False, True])
assert_series_equal(result, expected)
# GH4604, automatic casting here
result = d['a'].fillna(False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
result = d['a'].fillna(False, downcast=False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
@pytest.mark.parametrize('df,expected', [
(pd.DataFrame({'a': [-1, 1]}), pd.DataFrame({'a': [1, -1]})),
(pd.DataFrame({'a': [False, True]}),
pd.DataFrame({'a': [True, False]})),
(pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
pd.DataFrame({'a': pd.Series(pd.to_timedelta([1, -1]))}))
])
def test_neg_numeric(self, df, expected):
assert_frame_equal(-df, expected)
assert_series_equal(-df['a'], expected['a'])
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': ['a', 'b']}),
pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
])
def test_neg_raises(self, df):
with pytest.raises(TypeError):
(- df)
with pytest.raises(TypeError):
(- df['a'])
def test_invert(self):
assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': [-1, 1]}),
pd.DataFrame({'a': [False, True]}),
pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}),
])
def test_pos_numeric(self, df):
# GH 16073
assert_frame_equal(+df, df)
assert_series_equal(+df['a'], df['a'])
@pytest.mark.parametrize('df', [
pd.DataFrame({'a': ['a', 'b']}),
pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}),
])
def test_pos_raises(self, df):
with pytest.raises(TypeError):
(+ df)
with pytest.raises(TypeError):
(+ df['a'])
def test_arith_flex_frame(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
for op in ops:
try:
alias = aliases.get(op, op)
f = getattr(operator, alias)
result = getattr(self.frame, op)(2 * self.frame)
exp = f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype=dict(C=None))
# vs mix int
if op in ['add', 'sub', 'mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# no overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B='uint64', C=None)
elif op in ['add', 'mul']:
dtype = dict(C=None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype=dtype)
# rops
r_f = lambda x, y: f(y, x)
result = getattr(self.frame, 'r' + op)(2 * self.frame)
exp = r_f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(
2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype=dict(C=None))
result = getattr(self.intframe, op)(2 * self.intframe)
exp = f(self.intframe, 2 * self.intframe)
assert_frame_equal(result, exp)
# vs mix int
if op in ['add', 'sub', 'mul']:
result = getattr(self.mixed_int, op)(
2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# no overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B='uint64', C=None)
elif op in ['add', 'mul']:
dtype = dict(C=None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype=dtype)
except:
printing.pprint_thing("Failing operation %r" % op)
raise
# ndim >= 3
ndim_5 = np.ones(self.frame.shape + (3, 4, 5))
msg = "Unable to coerce to Series/DataFrame"
with tm.assert_raises_regex(ValueError, msg):
f(self.frame, ndim_5)
with tm.assert_raises_regex(ValueError, msg):
getattr(self.frame, op)(ndim_5)
# res_add = self.frame.add(self.frame)
# res_sub = self.frame.sub(self.frame)
# res_mul = self.frame.mul(self.frame)
# res_div = self.frame.div(2 * self.frame)
# assert_frame_equal(res_add, self.frame + self.frame)
# assert_frame_equal(res_sub, self.frame - self.frame)
# assert_frame_equal(res_mul, self.frame * self.frame)
# assert_frame_equal(res_div, self.frame / (2 * self.frame))
const_add = self.frame.add(1)
assert_frame_equal(const_add, self.frame + 1)
# corner cases
result = self.frame.add(self.frame[:0])
assert_frame_equal(result, self.frame * np.nan)
result = self.frame[:0].add(self.frame)
assert_frame_equal(result, self.frame * np.nan)
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], fill_value=3)
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], axis='index', fill_value=3)
def test_arith_flex_zero_len_raises(self):
# GH#19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([])
df_len0 = pd.DataFrame([], columns=['A', 'B'])
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
df.add(ser_len0, fill_value='E')
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
df_len0.sub(df['A'], axis=None, fill_value=3)
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product([list('abc'),
['one', 'two', 'three'],
[1, 2, 3]],
names=['first', 'second', 'third'])
df = DataFrame(np.arange(27 * 3).reshape(27, 3),
index=index,
columns=['value1', 'value2', 'value3']).sort_index()
idx = pd.IndexSlice
for op in ['add', 'sub', 'mul', 'div', 'truediv']:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level='third', axis=0)
expected = pd.concat([opa(df.loc[idx[:, :, i], :], v)
for i, v in x.iteritems()]).sort_index()
assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ['two', 'three'])
result = getattr(df, op)(x, level='second', axis=0)
expected = (pd.concat([opa(df.loc[idx[:, i], :], v)
for i, v in x.iteritems()])
.reindex_like(df).sort_index())
assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([['A', 'B'], ['a', 'b']])
df = DataFrame(np.ones((2, 4), dtype='int64'), columns=midx)
s = pd.Series({'a': 1, 'b': 2})
df2 = df.copy()
df2.columns.names = ['lvl0', 'lvl1']
s2 = s.copy()
s2.index.name = 'lvl1'
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level='lvl1')
res6 = df2.mul(s2, axis=1, level='lvl1')
exp = DataFrame(np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype='int64'),
columns=midx)
for res in [res1, res2]:
assert_frame_equal(res, exp)
exp.columns.names = ['lvl0', 'lvl1']
for res in [res3, res4, res5, res6]:
assert_frame_equal(res, exp)
def test_arith_mixed(self):
left = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 2, 3]})
result = left + left
expected = DataFrame({'A': ['aa', 'bb', 'cc'],
'B': [2, 4, 6]})
assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
self._test_op(df, operator.add)
self._test_op(df, operator.sub)
self._test_op(df, operator.mul)
self._test_op(df, operator.truediv)
self._test_op(df, operator.floordiv)
self._test_op(df, operator.pow)
self._test_op(df, lambda x, y: y + x)
self._test_op(df, lambda x, y: y - x)
self._test_op(df, lambda x, y: y * x)
self._test_op(df, lambda x, y: y / x)
self._test_op(df, lambda x, y: y ** x)
self._test_op(df, lambda x, y: x + y)
self._test_op(df, lambda x, y: x - y)
self._test_op(df, lambda x, y: x * y)
self._test_op(df, lambda x, y: x / y)
self._test_op(df, lambda x, y: x ** y)
@staticmethod
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
assert_series_equal(result[col], op(df[col], 1))
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = DataFrame(data)
other = DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
assert_frame_equal(f(other.values), o(df, other.values))
# scalar
assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
assert_frame_equal(f(np.nan), o(df, np.nan))
with tm.assert_raises_regex(ValueError, msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
assert_frame_equal(col_eq, df == Series(col_ser))
assert_frame_equal(col_eq, -col_ne)
assert_frame_equal(idx_eq, -idx_ne)
assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
assert_frame_equal(col_eq, df.eq(list(col_ser)))
assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
assert_frame_equal(col_gt, df > Series(col_ser))
assert_frame_equal(col_gt, -col_le)
assert_frame_equal(idx_gt, -idx_le)
assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
assert_frame_equal(col_ge, df >= Series(col_ser))
assert_frame_equal(col_ge, -col_lt)
assert_frame_equal(idx_ge, -idx_lt)
assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = Series(np.random.randn(5))
col_ser = Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = DataFrame({'a': arr})
df2 = DataFrame({'a': arr2})
rs = df.gt(df2)
assert not rs.values.any()
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = DataFrame({'a': arr3})
rs = df3.gt(2j)
assert not rs.values.any()
# corner, dtype=object
df1 = DataFrame({'col': ['foo', np.nan, 'bar']})
df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})
result = df1.ne(df2)
exp = DataFrame({'col': [False, True, False]})
assert_frame_equal(result, exp)
def test_dti_tz_convert_to_utc(self):
base = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz='UTC')
idx1 = base.tz_convert('Asia/Tokyo')[:2]
idx2 = base.tz_convert('US/Eastern')[1:]
df1 = DataFrame({'A': [1, 2]}, index=idx1)
df2 = DataFrame({'A': [1, 1]}, index=idx2)
exp = DataFrame({'A': [np.nan, 3, np.nan]}, index=base)
assert_frame_equal(df1 + df2, exp)
def test_arith_flex_series(self):
df = self.simple
row = df.xs('a')
col = df['two']
# after arithmetic refactor, add truediv here
ops = ['add', 'sub', 'mul', 'mod']
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
assert_frame_equal(f(row), op(df, row))
assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
assert_frame_equal(df.div(row), df / row)
assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH7325
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='int64')
expected = DataFrame([[nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
assert_frame_equal(result, expected)
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='float64')
expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = self.simple
val1 = df.xs('a').values
added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
assert_frame_equal(df + val1, added)
added = DataFrame((df.values.T + val1).T,
index=df.index, columns=df.columns)
assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df['two'])
added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
assert_frame_equal(df + val2, added)
added = DataFrame((df.values.T + val2).T, index=df.index,
columns=df.columns)
assert_frame_equal(df.add(val2, axis='index'), added)
val3 = np.random.rand(*df.shape)
added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
assert_frame_equal(df.add(val3), added)
@pytest.mark.parametrize('values', [[1, 2], (1, 2), np.array([1, 2]),
range(1, 3), deque([1, 2])])
def test_arith_alignment_non_pandas_object(self, values):
# GH 17901
df = DataFrame({'A': [1, 1], 'B': [1, 1]})
expected = DataFrame({'A': [2, 2], 'B': [3, 3]})
result = df + values
assert_frame_equal(result, expected)
def test_combineFrame(self):
frame_copy = self.frame.reindex(self.frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = nan
added = self.frame + frame_copy
indexer = added['A'].dropna().index
exp = (self.frame['A'] * 2).copy()
tm.assert_series_equal(added['A'].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added['A'], exp.loc[added['A'].index])
assert np.isnan(added['C'].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added['D']).all()
self_added = self.frame + self.frame
tm.assert_index_equal(self_added.index, self.frame.index)
added_rev = frame_copy + self.frame
assert np.isnan(added['D']).all()
assert np.isnan(added_rev['D']).all()
# corner cases
# empty
plus_empty = self.frame + self.empty
assert np.isnan(plus_empty.values).all()
empty_plus = self.empty + self.frame
assert np.isnan(empty_plus.values).all()
empty_empty = self.empty + self.empty
assert empty_empty.empty
# out of order
reverse = self.frame.reindex(columns=self.frame.columns[::-1])
assert_frame_equal(reverse + self.frame, self.frame * 2)
# mix vs float64, upcast
added = self.frame + self.mixed_float
_check_mixed_float(added, dtype='float64')
added = self.mixed_float + self.frame
_check_mixed_float(added, dtype='float64')
# mix vs mix
added = self.mixed_float + self.mixed_float2
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float2 + self.mixed_float
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = self.frame + self.mixed_int
_check_mixed_float(added, dtype='float64')
def test_combineSeries(self):
# Series
series = self.frame.xs(self.frame.index[0])
added = self.frame + series
for key, s in compat.iteritems(added):
assert_series_equal(s, self.frame[key] + series[key])
larger_series = series.to_dict()
larger_series['E'] = 1
larger_series = Series(larger_series)
larger_added = self.frame + larger_series
for key, s in compat.iteritems(self.frame):
assert_series_equal(larger_added[key], s + series[key])
assert 'E' in larger_added
assert np.isnan(larger_added['E']).all()
# no upcast needed
added = self.mixed_float + series
_check_mixed_float(added)
# vs mix (upcast) as needed
added = self.mixed_float + series.astype('float32')
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float + series.astype('float16')
_check_mixed_float(added, dtype=dict(C=None))
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = self.mixed_int + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = self.mixed_int + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = self.tsframe['A']
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = self.tsframe.add(ts, axis='index')
for key, col in compat.iteritems(self.tsframe):
result = col + ts
assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == 'A'
else:
assert result.name is None
smaller_frame = self.tsframe[:-5]
smaller_added = smaller_frame.add(ts, axis='index')
tm.assert_index_equal(smaller_added.index, self.tsframe.index)
smaller_ts = ts[:-5]
smaller_added2 = self.tsframe.add(smaller_ts, axis='index')
assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = self.tsframe.add(ts[:0], axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# Frame is all-nan
result = self.tsframe[:0].add(ts, axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# empty but with non-empty index
frame = self.tsframe[:1].reindex(columns=[])
result = frame.mul(ts, axis='index')
assert len(result) == len(ts)
def test_combineFunc(self):
result = self.frame * 2
tm.assert_numpy_array_equal(result.values, self.frame.values * 2)
# vs mix
result = self.mixed_float * 2
for c, s in compat.iteritems(result):
tm.assert_numpy_array_equal(
s.values, self.mixed_float[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = self.empty * 2
assert result.index is self.empty.index
assert len(result.columns) == 0
def test_comparisons(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = self.simple.xs('a')
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values,
func(df1.values, df2.values))
with tm.assert_raises_regex(ValueError,
'Wrong number of dimensions'):
func(df1, ndim_5)
result2 = func(self.simple, row)
tm.assert_numpy_array_equal(result2.values,
func(self.simple.values, row.values))
result3 = func(self.frame, 0)
tm.assert_numpy_array_equal(result3.values,
func(self.frame.values, 0))
with tm.assert_raises_regex(ValueError,
'Can only compare identically'
'-labeled DataFrame'):
func(self.simple, self.simple[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]['A'] = np.nan
with np.errstate(invalid='ignore'):
expected = missing_df.values < 0
with np.errstate(invalid='raise'):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
l = (2, 2, 2)
tup = tuple(l)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
assert_frame_equal(result, expected)
result = df.values > b
assert_numpy_array_equal(result, expected.values)
result = df > l
assert_frame_equal(result, expected)
result = df > tup
assert_frame_equal(result, expected)
result = df > b_r
assert_frame_equal(result, expected)
result = df.values > b_r
assert_numpy_array_equal(result, expected.values)
pytest.raises(ValueError, df.__gt__, b_c)
pytest.raises(ValueError, df.values.__gt__, b_c)
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
assert_frame_equal(result, expected)
result = df == l
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
result = df == b_r
assert_frame_equal(result, expected)
result = df.values == b_r
assert_numpy_array_equal(result, expected.values)
pytest.raises(ValueError, lambda: df == b_c)
assert df.values.shape != b_c.shape
# with alignment
df = DataFrame(np.arange(6).reshape((3, 2)),
columns=list('AB'), index=list('abc'))
expected.index = df.index
expected.columns = df.columns
result = df == l
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
def test_combine_generic(self):
df1 = self.frame
df2 = self.frame.loc[self.frame.index[:-5], ['A', 'B', 'C']]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
assert combined['D'].isna().all()
assert combined2['D'].isna().all()
chunk = combined.loc[combined.index[:-5], ['A', 'B', 'C']]
chunk2 = combined2.loc[combined2.index[:-5], ['A', 'B', 'C']]
exp = self.frame.loc[self.frame.index[:-5],
['A', 'B', 'C']].reindex_like(chunk) * 2
assert_frame_equal(chunk, exp)
assert_frame_equal(chunk2, exp)
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list('abcdefg')
X_orig = DataFrame(np.arange(10 * len(columns))
.reshape(-1, len(columns)),
columns=columns, index=range(10))
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list('bedcf')
subs = list('bcdef')
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._data is s2._data
df = df_orig.copy()
df2 = df
df += 1
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._data is df2._data
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1.5, df)
assert df is df2
assert df._data is df2._data
# mixed dtype
arr = np.random.randint(0, 10, size=5)
df_orig = DataFrame({'A': arr.copy(), 'B': 'foo'})
df = df_orig.copy()
df2 = df
df['A'] += 1
expected = DataFrame({'A': arr.copy() + 1, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
assert df._data is df2._data
df = df_orig.copy()
df2 = df
df['A'] += 1.5
expected = DataFrame({'A': arr.copy() + 1.5, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
assert df._data is df2._data
@pytest.mark.parametrize('op', ['add', 'and', 'div', 'floordiv', 'mod',
'mul', 'or', 'pow', 'sub', 'truediv',
'xor'])
def test_inplace_ops_identity2(self, op):
if compat.PY3 and op == 'div':
return
df = DataFrame({'a': [1., 2., 3.],
'b': [1, 2, 3]})
operand = 2
if op in ('and', 'or', 'xor'):
# cannot use floats for boolean ops
df['a'] = [True, False, True]
df_copy = df.copy()
iop = '__i{}__'.format(op)
op = '__{}__'.format(op)
# no id change and value is correct
getattr(df, iop)(operand)
expected = getattr(df_copy, op)(operand)
assert_frame_equal(df, expected)
expected = id(df)
assert id(df) == expected
def test_alignment_non_pandas(self):
index = ['A', 'B', 'C']
columns = ['X', 'Y', 'Z']
df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns)
align = pd.core.ops._align_method_FRAME
for val in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype=np.int64),
range(1, 4)]:
tm.assert_series_equal(align(df, val, 'index'),
Series([1, 2, 3], index=df.index))
tm.assert_series_equal(align(df, val, 'columns'),
Series([1, 2, 3], index=df.columns))
# length mismatch
msg = 'Unable to coerce to Series, length must be 3: given 2'
for val in [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]:
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'index')
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'columns')
val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(align(df, val, 'index'),
DataFrame(val, index=df.index,
columns=df.columns))
tm.assert_frame_equal(align(df, val, 'columns'),
DataFrame(val, index=df.index,
columns=df.columns))
# shape mismatch
msg = 'Unable to coerce to DataFrame, shape must be'
val = np.array([[1, 2, 3], [4, 5, 6]])
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'index')
with tm.assert_raises_regex(ValueError, msg):
align(df, val, 'columns')
val = np.zeros((3, 3, 3))
with pytest.raises(ValueError):
align(df, val, 'index')
with pytest.raises(ValueError):
align(df, val, 'columns')
| bsd-3-clause |
parroyo/Zappa | tests/tests.py | 1 | 64700 | # -*- coding: utf8 -*-
import base64
import collections
import json
from contextlib import nested
from cStringIO import StringIO as OldStringIO
from io import BytesIO, StringIO
import flask
import mock
import os
import random
import string
import zipfile
import re
import unittest
import shutil
import sys
import tempfile
from click.exceptions import ClickException
from lambda_packages import lambda_packages
from .utils import placebo_session, patch_open
from zappa.cli import ZappaCLI, shamelessly_promote
from zappa.ext.django_zappa import get_django_wsgi
from zappa.handler import LambdaHandler, lambda_handler
from zappa.letsencrypt import get_cert_and_update_domain, create_domain_key, create_domain_csr, create_chained_certificate, get_cert, cleanup, parse_account_key, parse_csr, sign_certificate, encode_certificate, register_account, verify_challenge
from zappa.util import (detect_django_settings, copytree, detect_flask_apps,
add_event_source, remove_event_source,
get_event_source_status, parse_s3_url, human_size, string_to_timestamp)
from zappa.wsgi import create_wsgi_request, common_log
from zappa.zappa import Zappa, ASSUME_POLICY, ATTACH_POLICY
def random_string(length):
return ''.join(random.choice(string.printable) for _ in range(length))
class TestZappa(unittest.TestCase):
def setUp(self):
self.sleep_patch = mock.patch('time.sleep', return_value=None)
# Tests expect us-east-1.
# If the user has set a different region in env variables, we set it aside for now and use us-east-1
self.users_current_region_name = os.environ.get('AWS_DEFAULT_REGION', None)
os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'
if not os.environ.get('PLACEBO_MODE') == 'record':
self.sleep_patch.start()
def tearDown(self):
if not os.environ.get('PLACEBO_MODE') == 'record':
self.sleep_patch.stop()
del os.environ['AWS_DEFAULT_REGION']
if self.users_current_region_name is not None:
# Give the user their AWS region back, we're done testing with us-east-1.
os.environ['AWS_DEFAULT_REGION'] = self.users_current_region_name
##
# Sanity Tests
##
def test_test(self):
self.assertTrue(True)
##
# Basic Tests
##
def test_zappa(self):
self.assertTrue(True)
Zappa()
@mock.patch('zappa.zappa.find_packages')
@mock.patch('os.remove')
def test_copy_editable_packages(self, mock_remove, mock_find_packages):
temp_package_dir = '/var/folders/rn/9tj3_p0n1ln4q4jn1lgqy4br0000gn/T/1480455339'
egg_links = [
'/user/test/.virtualenvs/test/lib/python2.7/site-packages/package-python.egg-link'
]
egg_path = "/some/other/directory/package"
mock_find_packages.return_value = ["package", "package.subpackage", "package.another"]
temp_egg_link = os.path.join(temp_package_dir, 'package-python.egg-link')
z = Zappa()
with nested(
patch_open(), mock.patch('glob.glob'), mock.patch('zappa.zappa.copytree')
) as ((mock_open, mock_file), mock_glob, mock_copytree):
# We read in the contents of the egg-link file
mock_file.read.return_value = "{}\n.".format(egg_path)
# we use glob.glob to get the egg-links in the temp packages directory
mock_glob.return_value = [temp_egg_link]
z.copy_editable_packages(egg_links, temp_package_dir)
# make sure we copied the right directories
mock_copytree.assert_called_with(
os.path.join(egg_path, 'package'),
os.path.join(temp_package_dir, 'package'),
symlinks=False
)
self.assertEqual(mock_copytree.call_count, 1)
# make sure it removes the egg-link from the temp packages directory
mock_remove.assert_called_with(temp_egg_link)
self.assertEqual(mock_remove.call_count, 1)
def test_create_lambda_package(self):
# mock the pip.get_installed_distributions() to include a package in lambda_packages so that the code
# for zipping pre-compiled packages gets called
mock_named_tuple = collections.namedtuple('mock_named_tuple', ['project_name', 'location'])
mock_return_val = [mock_named_tuple(lambda_packages.keys()[0], '/path')] # choose name of 1st package in lambda_packages
with mock.patch('pip.get_installed_distributions', return_value=mock_return_val):
z = Zappa()
path = z.create_lambda_zip(handler_file=os.path.realpath(__file__))
self.assertTrue(os.path.isfile(path))
os.remove(path)
def test_get_manylinux(self):
z = Zappa()
self.assertNotEqual(z.get_manylinux_wheel('pandas'), None)
self.assertEqual(z.get_manylinux_wheel('derpderpderpderp'), None)
# mock the pip.get_installed_distributions() to include a package in manylinux so that the code
# for zipping pre-compiled packages gets called
mock_named_tuple = collections.namedtuple('mock_named_tuple', ['project_name', 'location'])
mock_return_val = [mock_named_tuple('pandas', '/path')]
with mock.patch('pip.get_installed_distributions', return_value=mock_return_val):
z = Zappa()
path = z.create_lambda_zip(handler_file=os.path.realpath(__file__))
self.assertTrue(os.path.isfile(path))
os.remove(path)
def test_load_credentials(self):
z = Zappa()
z.aws_region = 'us-east-1'
z.load_credentials()
self.assertEqual(z.boto_session.region_name, 'us-east-1')
self.assertEqual(z.aws_region, 'us-east-1')
z.aws_region = 'eu-west-1'
z.profile_name = 'default'
z.load_credentials()
self.assertEqual(z.boto_session.region_name, 'eu-west-1')
self.assertEqual(z.aws_region, 'eu-west-1')
creds = {
'AWS_ACCESS_KEY_ID': 'AK123',
'AWS_SECRET_ACCESS_KEY': 'JKL456',
'AWS_DEFAULT_REGION': 'us-west-1'
}
with mock.patch.dict('os.environ', creds):
z.aws_region = None
z.load_credentials()
loaded_creds = z.boto_session._session.get_credentials()
self.assertEqual(loaded_creds.access_key, 'AK123')
self.assertEqual(loaded_creds.secret_key, 'JKL456')
self.assertEqual(z.boto_session.region_name, 'us-west-1')
def test_create_api_gateway_routes_with_different_auth_methods(self):
z = Zappa()
z.parameter_depth = 1
z.integration_response_codes = [200]
z.method_response_codes = [200]
z.http_methods = ['GET']
z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution'
lambda_arn = 'arn:aws:lambda:us-east-1:12345:function:helloworld'
# No auth at all
z.create_stack_template(lambda_arn, 'helloworld', False, {}, False, None)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("NONE", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("NONE", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
self.assertEqual(False, parsable_template["Resources"]["GET0"]["Properties"]["ApiKeyRequired"])
self.assertEqual(False, parsable_template["Resources"]["GET1"]["Properties"]["ApiKeyRequired"])
# IAM auth
z.create_stack_template(lambda_arn, 'helloworld', False, {}, True, None)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
self.assertEqual(False, parsable_template["Resources"]["GET0"]["Properties"]["ApiKeyRequired"])
self.assertEqual(False, parsable_template["Resources"]["GET1"]["Properties"]["ApiKeyRequired"])
# CORS with auth
z.create_stack_template(lambda_arn, 'helloworld', False, {}, True, None, True)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
self.assertEqual("NONE", parsable_template["Resources"]["OPTIONS0"]["Properties"]["AuthorizationType"])
self.assertEqual("NONE", parsable_template["Resources"]["OPTIONS1"]["Properties"]["AuthorizationType"])
self.assertEqual("MOCK", parsable_template["Resources"]["OPTIONS0"]["Properties"]["Integration"]["Type"])
self.assertEqual("MOCK", parsable_template["Resources"]["OPTIONS1"]["Properties"]["Integration"]["Type"])
self.assertEqual("'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'",
parsable_template["Resources"]["OPTIONS0"]["Properties"]["Integration"]["IntegrationResponses"][0]["ResponseParameters"]["method.response.header.Access-Control-Allow-Headers"])
self.assertEqual("'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'",
parsable_template["Resources"]["OPTIONS1"]["Properties"]["Integration"]["IntegrationResponses"][0]["ResponseParameters"]["method.response.header.Access-Control-Allow-Headers"])
self.assertTrue(parsable_template["Resources"]["OPTIONS0"]["Properties"]["MethodResponses"][0]["ResponseParameters"]["method.response.header.Access-Control-Allow-Headers"])
self.assertTrue(parsable_template["Resources"]["OPTIONS1"]["Properties"]["MethodResponses"][0]["ResponseParameters"]["method.response.header.Access-Control-Allow-Headers"])
self.assertEqual(False, parsable_template["Resources"]["GET0"]["Properties"]["ApiKeyRequired"])
self.assertEqual(False, parsable_template["Resources"]["GET1"]["Properties"]["ApiKeyRequired"])
# API Key auth
z.create_stack_template(lambda_arn, 'helloworld', True, {}, True, None)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
self.assertEqual(True, parsable_template["Resources"]["GET0"]["Properties"]["ApiKeyRequired"])
self.assertEqual(True, parsable_template["Resources"]["GET1"]["Properties"]["ApiKeyRequired"])
# Authorizer and IAM
authorizer = {
"function": "runapi.authorization.gateway_authorizer.evaluate_token",
"result_ttl": 300,
"token_header": "Authorization",
"validation_expression": "xxx"
}
z.create_stack_template(lambda_arn, 'helloworld', False, {}, True, authorizer)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("AWS_IAM", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
with self.assertRaises(KeyError):
parsable_template["Resources"]["Authorizer"]
# Authorizer with validation expression
invocations_uri = 'arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/' + lambda_arn + '/invocations'
z.create_stack_template(lambda_arn, 'helloworld', False, {}, False, authorizer)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("CUSTOM", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("CUSTOM", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
self.assertEqual("TOKEN", parsable_template["Resources"]["Authorizer"]["Properties"]["Type"])
self.assertEqual("ZappaAuthorizer", parsable_template["Resources"]["Authorizer"]["Properties"]["Name"])
self.assertEqual(300, parsable_template["Resources"]["Authorizer"]["Properties"]["AuthorizerResultTtlInSeconds"])
self.assertEqual(invocations_uri, parsable_template["Resources"]["Authorizer"]["Properties"]["AuthorizerUri"])
self.assertEqual(z.credentials_arn, parsable_template["Resources"]["Authorizer"]["Properties"]["AuthorizerCredentials"])
self.assertEqual("xxx", parsable_template["Resources"]["Authorizer"]["Properties"]["IdentityValidationExpression"])
# Authorizer without validation expression
authorizer.pop('validation_expression', None)
z.create_stack_template(lambda_arn, 'helloworld', False, {}, False, authorizer)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual("CUSTOM", parsable_template["Resources"]["GET0"]["Properties"]["AuthorizationType"])
self.assertEqual("CUSTOM", parsable_template["Resources"]["GET1"]["Properties"]["AuthorizationType"])
self.assertEqual("TOKEN", parsable_template["Resources"]["Authorizer"]["Properties"]["Type"])
with self.assertRaises(KeyError):
parsable_template["Resources"]["Authorizer"]["Properties"]["IdentityValidationExpression"]
# Authorizer with arn
authorizer = {
"arn": "arn:aws:lambda:us-east-1:123456789012:function:my-function",
}
z.create_stack_template(lambda_arn, 'helloworld', False, {}, False, authorizer)
parsable_template = json.loads(z.cf_template.to_json())
self.assertEqual('arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1:123456789012:function:my-function/invocations', parsable_template["Resources"]["Authorizer"]["Properties"]["AuthorizerUri"])
def test_policy_json(self):
# ensure the policy docs are valid JSON
json.loads(ASSUME_POLICY)
json.loads(ATTACH_POLICY)
def test_schedule_events(self):
z = Zappa()
path = os.getcwd()
# z.schedule_events # TODO
##
# Logging
##
def test_logging(self):
"""
TODO
"""
Zappa()
##
# Mapping and pattern tests
##
def test_redirect_pattern(self):
test_urls = [
# a regular endpoint url
'https://asdf1234.execute-api.us-east-1.amazonaws.com/env/path/to/thing',
# an external url (outside AWS)
'https://github.com/Miserlou/zappa/issues?q=is%3Aissue+is%3Aclosed',
# a local url
'/env/path/to/thing'
]
for code in ['301', '302']:
pattern = Zappa.selection_pattern(code)
for url in test_urls:
self.assertRegexpMatches(url, pattern)
def test_b64_pattern(self):
head = '\{"http_status": '
for code in ['400', '401', '402', '403', '404', '500']:
pattern = Zappa.selection_pattern(code)
document = head + code + random_string(50)
self.assertRegexpMatches(document, pattern)
for bad_code in ['200', '301', '302']:
document = base64.b64encode(head + bad_code + random_string(50))
self.assertNotRegexpMatches(document, pattern)
def test_200_pattern(self):
pattern = Zappa.selection_pattern('200')
self.assertEqual(pattern, '')
##
# WSGI
##
def test_wsgi_event(self):
## This is a pre-proxy+ event
# event = {
# "body": "",
# "headers": {
# "Via": "1.1 e604e934e9195aaf3e36195adbcb3e18.cloudfront.net (CloudFront)",
# "Accept-Language": "en-US,en;q=0.5",
# "Accept-Encoding": "gzip",
# "CloudFront-Is-SmartTV-Viewer": "false",
# "CloudFront-Forwarded-Proto": "https",
# "X-Forwarded-For": "109.81.209.118, 216.137.58.43",
# "CloudFront-Viewer-Country": "CZ",
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
# "X-Forwarded-Proto": "https",
# "X-Amz-Cf-Id": "LZeP_TZxBgkDt56slNUr_H9CHu1Us5cqhmRSswOh1_3dEGpks5uW-g==",
# "CloudFront-Is-Tablet-Viewer": "false",
# "X-Forwarded-Port": "443",
# "CloudFront-Is-Mobile-Viewer": "false",
# "CloudFront-Is-Desktop-Viewer": "true",
# "Content-Type": "application/json"
# },
# "params": {
# "parameter_1": "asdf1",
# "parameter_2": "asdf2",
# },
# "method": "POST",
# "query": {
# "dead": "beef"
# }
# }
event = {
u'body': None,
u'resource': u'/',
u'requestContext': {
u'resourceId': u'6cqjw9qu0b',
u'apiId': u'9itr2lba55',
u'resourcePath': u'/',
u'httpMethod': u'GET',
u'requestId': u'c17cb1bf-867c-11e6-b938-ed697406e3b5',
u'accountId': u'724336686645',
u'identity': {
u'apiKey': None,
u'userArn': None,
u'cognitoAuthenticationType': None,
u'caller': None,
u'userAgent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:48.0) Gecko/20100101 Firefox/48.0',
u'user': None,
u'cognitoIdentityPoolId': None,
u'cognitoIdentityId': None,
u'cognitoAuthenticationProvider': None,
u'sourceIp': u'50.191.225.98',
u'accountId': None,
},
u'stage': u'devorr',
},
u'queryStringParameters': None,
u'httpMethod': u'GET',
u'pathParameters': None,
u'headers': {
u'Via': u'1.1 6801928d54163af944bf854db8d5520e.cloudfront.net (CloudFront)',
u'Accept-Language': u'en-US,en;q=0.5',
u'Accept-Encoding': u'gzip, deflate, br',
u'CloudFront-Is-SmartTV-Viewer': u'false',
u'CloudFront-Forwarded-Proto': u'https',
u'X-Forwarded-For': u'50.191.225.98, 204.246.168.101',
u'CloudFront-Viewer-Country': u'US',
u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
u'Upgrade-Insecure-Requests': u'1',
u'Host': u'9itr2lba55.execute-api.us-east-1.amazonaws.com',
u'X-Forwarded-Proto': u'https',
u'X-Amz-Cf-Id': u'qgNdqKT0_3RMttu5KjUdnvHI3OKm1BWF8mGD2lX8_rVrJQhhp-MLDw==',
u'CloudFront-Is-Tablet-Viewer': u'false',
u'X-Forwarded-Port': u'443',
u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:48.0) Gecko/20100101 Firefox/48.0',
u'CloudFront-Is-Mobile-Viewer': u'false',
u'CloudFront-Is-Desktop-Viewer': u'true',
},
u'stageVariables': None,
u'path': u'/',
}
request = create_wsgi_request(event)
# def test_wsgi_path_info(self):
# # Test no parameters (site.com/)
# event = {
# "body": {},
# "headers": {},
# "pathParameters": {},
# "path": u'/',
# "httpMethod": "GET",
# "queryStringParameters": {}
# }
# request = create_wsgi_request(event, trailing_slash=True)
# self.assertEqual("/", request['PATH_INFO'])
# request = create_wsgi_request(event, trailing_slash=False)
# self.assertEqual("/", request['PATH_INFO'])
# # Test parameters (site.com/asdf1/asdf2 or site.com/asdf1/asdf2/)
# event_asdf2 = {u'body': None, u'resource': u'/{proxy+}', u'requestContext': {u'resourceId': u'dg451y', u'apiId': u'79gqbxq31c', u'resourcePath': u'/{proxy+}', u'httpMethod': u'GET', u'requestId': u'766df67f-8991-11e6-b2c4-d120fedb94e5', u'accountId': u'724336686645', u'identity': {u'apiKey': None, u'userArn': None, u'cognitoAuthenticationType': None, u'caller': None, u'userAgent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0', u'user': None, u'cognitoIdentityPoolId': None, u'cognitoIdentityId': None, u'cognitoAuthenticationProvider': None, u'sourceIp': u'96.90.37.59', u'accountId': None}, u'stage': u'devorr'}, u'queryStringParameters': None, u'httpMethod': u'GET', u'pathParameters': {u'proxy': u'asdf1/asdf2'}, u'headers': {u'Via': u'1.1 b2aeb492548a8a2d4036401355f928dd.cloudfront.net (CloudFront)', u'Accept-Language': u'en-US,en;q=0.5', u'Accept-Encoding': u'gzip, deflate, br', u'X-Forwarded-Port': u'443', u'X-Forwarded-For': u'96.90.37.59, 54.240.144.50', u'CloudFront-Viewer-Country': u'US', u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', u'Upgrade-Insecure-Requests': u'1', u'Host': u'79gqbxq31c.execute-api.us-east-1.amazonaws.com', u'X-Forwarded-Proto': u'https', u'X-Amz-Cf-Id': u'BBFP-RhGDrQGOzoCqjnfB2I_YzWt_dac9S5vBcSAEaoM4NfYhAQy7Q==', u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0', u'CloudFront-Forwarded-Proto': u'https'}, u'stageVariables': None, u'path': u'/asdf1/asdf2'}
# event_asdf2_slash = {u'body': None, u'resource': u'/{proxy+}', u'requestContext': {u'resourceId': u'dg451y', u'apiId': u'79gqbxq31c', u'resourcePath': u'/{proxy+}', u'httpMethod': u'GET', u'requestId': u'd6fda925-8991-11e6-8bd8-b5ec6db19d57', u'accountId': u'724336686645', u'identity': {u'apiKey': None, u'userArn': None, u'cognitoAuthenticationType': None, u'caller': None, u'userAgent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0', u'user': None, u'cognitoIdentityPoolId': None, u'cognitoIdentityId': None, u'cognitoAuthenticationProvider': None, u'sourceIp': u'96.90.37.59', u'accountId': None}, u'stage': u'devorr'}, u'queryStringParameters': None, u'httpMethod': u'GET', u'pathParameters': {u'proxy': u'asdf1/asdf2'}, u'headers': {u'Via': u'1.1 c70173a50d0076c99b5e680eb32d40bb.cloudfront.net (CloudFront)', u'Accept-Language': u'en-US,en;q=0.5', u'Accept-Encoding': u'gzip, deflate, br', u'X-Forwarded-Port': u'443', u'X-Forwarded-For': u'96.90.37.59, 54.240.144.53', u'CloudFront-Viewer-Country': u'US', u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', u'Upgrade-Insecure-Requests': u'1', u'Host': u'79gqbxq31c.execute-api.us-east-1.amazonaws.com', u'X-Forwarded-Proto': u'https', u'Cookie': u'zappa=AQ4', u'X-Amz-Cf-Id': u'aU_i-iuT3llVUfXv2zv6uU-m77Oga7ANhd5ZYrCoqXBy4K7I2x3FZQ==', u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0', u'CloudFront-Forwarded-Proto': u'https'}, u'stageVariables': None, u'path': u'/asdf1/asdf2/'}
# request = create_wsgi_request(event, trailing_slash=True)
# self.assertEqual("/asdf1/asdf2/", request['PATH_INFO'])
# request = create_wsgi_request(event, trailing_slash=False)
# self.assertEqual("/asdf1/asdf2", request['PATH_INFO'])
# request = create_wsgi_request(event, trailing_slash=False, script_name='asdf1')
# self.assertEqual("/asdf1/asdf2", request['PATH_INFO'])
def test_wsgi_path_info_unquoted(self):
event = {
"body": {},
"headers": {},
"pathParameters": {},
"path": '/path%3A1', # encoded /path:1
"httpMethod": "GET",
"queryStringParameters": {},
"requestContext": {}
}
request = create_wsgi_request(event, trailing_slash=True)
self.assertEqual("/path:1", request['PATH_INFO'])
def test_wsgi_logging(self):
# event = {
# "body": {},
# "headers": {},
# "params": {
# "parameter_1": "asdf1",
# "parameter_2": "asdf2",
# },
# "httpMethod": "GET",
# "query": {}
# }
event = {u'body': None, u'resource': u'/{proxy+}', u'requestContext': {u'resourceId': u'dg451y', u'apiId': u'79gqbxq31c', u'resourcePath': u'/{proxy+}', u'httpMethod': u'GET', u'requestId': u'766df67f-8991-11e6-b2c4-d120fedb94e5', u'accountId': u'724336686645', u'identity': {u'apiKey': None, u'userArn': None, u'cognitoAuthenticationType': None, u'caller': None, u'userAgent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0', u'user': None, u'cognitoIdentityPoolId': None, u'cognitoIdentityId': None, u'cognitoAuthenticationProvider': None, u'sourceIp': u'96.90.37.59', u'accountId': None}, u'stage': u'devorr'}, u'queryStringParameters': None, u'httpMethod': u'GET', u'pathParameters': {u'proxy': u'asdf1/asdf2'}, u'headers': {u'Via': u'1.1 b2aeb492548a8a2d4036401355f928dd.cloudfront.net (CloudFront)', u'Accept-Language': u'en-US,en;q=0.5', u'Accept-Encoding': u'gzip, deflate, br', u'X-Forwarded-Port': u'443', u'X-Forwarded-For': u'96.90.37.59, 54.240.144.50', u'CloudFront-Viewer-Country': u'US', u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', u'Upgrade-Insecure-Requests': u'1', u'Host': u'79gqbxq31c.execute-api.us-east-1.amazonaws.com', u'X-Forwarded-Proto': u'https', u'X-Amz-Cf-Id': u'BBFP-RhGDrQGOzoCqjnfB2I_YzWt_dac9S5vBcSAEaoM4NfYhAQy7Q==', u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:49.0) Gecko/20100101 Firefox/49.0', u'CloudFront-Forwarded-Proto': u'https'}, u'stageVariables': None, u'path': u'/asdf1/asdf2'}
environ = create_wsgi_request(event, trailing_slash=False)
response_tuple = collections.namedtuple('Response', ['status_code', 'content'])
response = response_tuple(200, 'hello')
le = common_log(environ, response, response_time=True)
le = common_log(environ, response, response_time=False)
def test_wsgi_multipart(self):
#event = {u'body': u'LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS03Njk1MjI4NDg0Njc4MTc2NTgwNjMwOTYxDQpDb250ZW50LURpc3Bvc2l0aW9uOiBmb3JtLWRhdGE7IG5hbWU9Im15c3RyaW5nIg0KDQpkZGQNCi0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tNzY5NTIyODQ4NDY3ODE3NjU4MDYzMDk2MS0tDQo=', u'headers': {u'Content-Type': u'multipart/form-data; boundary=---------------------------7695228484678176580630961', u'Via': u'1.1 38205a04d96d60185e88658d3185ccee.cloudfront.net (CloudFront)', u'Accept-Language': u'en-US,en;q=0.5', u'Accept-Encoding': u'gzip, deflate, br', u'CloudFront-Is-SmartTV-Viewer': u'false', u'CloudFront-Forwarded-Proto': u'https', u'X-Forwarded-For': u'71.231.27.57, 104.246.180.51', u'CloudFront-Viewer-Country': u'US', u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:45.0) Gecko/20100101 Firefox/45.0', u'Host': u'xo2z7zafjh.execute-api.us-east-1.amazonaws.com', u'X-Forwarded-Proto': u'https', u'Cookie': u'zappa=AQ4', u'CloudFront-Is-Tablet-Viewer': u'false', u'X-Forwarded-Port': u'443', u'Referer': u'https://xo8z7zafjh.execute-api.us-east-1.amazonaws.com/former/post', u'CloudFront-Is-Mobile-Viewer': u'false', u'X-Amz-Cf-Id': u'31zxcUcVyUxBOMk320yh5NOhihn5knqrlYQYpGGyOngKKwJb0J0BAQ==', u'CloudFront-Is-Desktop-Viewer': u'true'}, u'params': {u'parameter_1': u'post'}, u'method': u'POST', u'query': {}}
event = {
u'body': u'LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS03Njk1MjI4NDg0Njc4MTc2NTgwNjMwOTYxDQpDb250ZW50LURpc3Bvc2l0aW9uOiBmb3JtLWRhdGE7IG5hbWU9Im15c3RyaW5nIg0KDQpkZGQNCi0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tNzY5NTIyODQ4NDY3ODE3NjU4MDYzMDk2MS0tDQo=',
u'resource': u'/',
u'requestContext': {
u'resourceId': u'6cqjw9qu0b',
u'apiId': u'9itr2lba55',
u'resourcePath': u'/',
u'httpMethod': u'POST',
u'requestId': u'c17cb1bf-867c-11e6-b938-ed697406e3b5',
u'accountId': u'724336686645',
u'identity': {
u'apiKey': None,
u'userArn': None,
u'cognitoAuthenticationType': None,
u'caller': None,
u'userAgent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:48.0) Gecko/20100101 Firefox/48.0',
u'user': None,
u'cognitoIdentityPoolId': None,
u'cognitoIdentityId': None,
u'cognitoAuthenticationProvider': None,
u'sourceIp': u'50.191.225.98',
u'accountId': None,
},
u'stage': u'devorr',
},
u'queryStringParameters': None,
u'httpMethod': u'POST',
u'pathParameters': None,
u'headers': {u'Content-Type': u'multipart/form-data; boundary=---------------------------7695228484678176580630961', u'Via': u'1.1 38205a04d96d60185e88658d3185ccee.cloudfront.net (CloudFront)', u'Accept-Language': u'en-US,en;q=0.5', u'Accept-Encoding': u'gzip, deflate, br', u'CloudFront-Is-SmartTV-Viewer': u'false', u'CloudFront-Forwarded-Proto': u'https', u'X-Forwarded-For': u'71.231.27.57, 104.246.180.51', u'CloudFront-Viewer-Country': u'US', u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:45.0) Gecko/20100101 Firefox/45.0', u'Host': u'xo2z7zafjh.execute-api.us-east-1.amazonaws.com', u'X-Forwarded-Proto': u'https', u'Cookie': u'zappa=AQ4', u'CloudFront-Is-Tablet-Viewer': u'false', u'X-Forwarded-Port': u'443', u'Referer': u'https://xo8z7zafjh.execute-api.us-east-1.amazonaws.com/former/post', u'CloudFront-Is-Mobile-Viewer': u'false', u'X-Amz-Cf-Id': u'31zxcUcVyUxBOMk320yh5NOhihn5knqrlYQYpGGyOngKKwJb0J0BAQ==', u'CloudFront-Is-Desktop-Viewer': u'true'},
u'stageVariables': None,
u'path': u'/',
}
environ = create_wsgi_request(event, trailing_slash=False)
response_tuple = collections.namedtuple('Response', ['status_code', 'content'])
response = response_tuple(200, 'hello')
def test_wsgi_without_body(self):
event = {
u'body': None,
u'resource': u'/',
u'requestContext': {
u'resourceId': u'6cqjw9qu0b',
u'apiId': u'9itr2lba55',
u'resourcePath': u'/',
u'httpMethod': u'POST',
u'requestId': u'c17cb1bf-867c-11e6-b938-ed697406e3b5',
u'accountId': u'724336686645',
u'identity': {
u'apiKey': None,
u'userArn': None,
u'cognitoAuthenticationType': None,
u'caller': None,
u'userAgent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:48.0) Gecko/20100101 Firefox/48.0',
u'user': None,
u'cognitoIdentityPoolId': None,
u'cognitoIdentityId': None,
u'cognitoAuthenticationProvider': None,
u'sourceIp': u'50.191.225.98',
u'accountId': None,
},
u'stage': u'devorr',
},
u'queryStringParameters': None,
u'httpMethod': u'POST',
u'pathParameters': None,
u'headers': {u'Via': u'1.1 38205a04d96d60185e88658d3185ccee.cloudfront.net (CloudFront)', u'Accept-Language': u'en-US,en;q=0.5', u'Accept-Encoding': u'gzip, deflate, br', u'CloudFront-Is-SmartTV-Viewer': u'false', u'CloudFront-Forwarded-Proto': u'https', u'X-Forwarded-For': u'71.231.27.57, 104.246.180.51', u'CloudFront-Viewer-Country': u'US', u'Accept': u'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:45.0) Gecko/20100101 Firefox/45.0', u'Host': u'xo2z7zafjh.execute-api.us-east-1.amazonaws.com', u'X-Forwarded-Proto': u'https', u'Cookie': u'zappa=AQ4', u'CloudFront-Is-Tablet-Viewer': u'false', u'X-Forwarded-Port': u'443', u'Referer': u'https://xo8z7zafjh.execute-api.us-east-1.amazonaws.com/former/post', u'CloudFront-Is-Mobile-Viewer': u'false', u'X-Amz-Cf-Id': u'31zxcUcVyUxBOMk320yh5NOhihn5knqrlYQYpGGyOngKKwJb0J0BAQ==', u'CloudFront-Is-Desktop-Viewer': u'true'},
u'stageVariables': None,
u'path': u'/',
}
environ = create_wsgi_request(event, trailing_slash=False)
response_tuple = collections.namedtuple('Response', ['status_code', 'content'])
response = response_tuple(200, 'hello')
##
# Handler
##
##
# CLI
##
def test_cli_sanity(self):
zappa_cli = ZappaCLI()
return
def test_load_settings(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.load_settings('test_settings.json')
self.assertEqual(False, zappa_cli.stage_config['touch'])
def test_load_extended_settings(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'extendo'
zappa_cli.load_settings('test_settings.json')
self.assertEqual('lmbda', zappa_cli.stage_config['s3_bucket'])
self.assertEqual(True, zappa_cli.stage_config['touch'])
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'extendofail'
with self.assertRaises(ClickException):
zappa_cli.load_settings('test_settings.json')
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
with self.assertRaises(RuntimeError):
zappa_cli.load_settings('tests/test_bad_circular_extends_settings.json')
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'extendo2'
zappa_cli.load_settings('test_settings.json')
self.assertEqual('lmbda2', zappa_cli.stage_config['s3_bucket']) # Second Extension
self.assertTrue(zappa_cli.stage_config['touch']) # First Extension
self.assertTrue(zappa_cli.stage_config['delete_local_zip']) # The base
def test_load_settings_yaml(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.load_settings('tests/test_settings.yml')
self.assertEqual(False, zappa_cli.stage_config['touch'])
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'extendo'
zappa_cli.load_settings('tests/test_settings.yml')
self.assertEqual('lmbda', zappa_cli.stage_config['s3_bucket'])
self.assertEqual(True, zappa_cli.stage_config['touch'])
def test_load_settings_toml(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.load_settings('tests/test_settings.toml')
self.assertEqual(False, zappa_cli.stage_config['touch'])
def test_settings_extension(self):
"""
Make sure Zappa uses settings in the proper order: JSON, TOML, YAML.
"""
tempdir = tempfile.mkdtemp(prefix="zappa-test-settings")
shutil.copy("tests/test_one_env.json", tempdir + "/zappa_settings.json")
shutil.copy("tests/test_settings.yml", tempdir + "/zappa_settings.yml")
shutil.copy("tests/test_settings.toml", tempdir + "/zappa_settings.toml")
orig_cwd = os.getcwd()
os.chdir(tempdir)
try:
zappa_cli = ZappaCLI()
# With all three, we should get the JSON file first.
self.assertEqual(zappa_cli.get_json_or_yaml_settings(),
"zappa_settings.json")
zappa_cli.load_settings_file()
self.assertIn("lonely", zappa_cli.zappa_settings)
os.unlink("zappa_settings.json")
# Without the JSON file, we should get the TOML file.
self.assertEqual(zappa_cli.get_json_or_yaml_settings(),
"zappa_settings.toml")
zappa_cli.load_settings_file()
self.assertIn("ttt888", zappa_cli.zappa_settings)
self.assertNotIn("devor", zappa_cli.zappa_settings)
os.unlink("zappa_settings.toml")
# With just the YAML file, we should get it.
self.assertEqual(zappa_cli.get_json_or_yaml_settings(),
"zappa_settings.yml")
zappa_cli.load_settings_file()
self.assertIn("ttt888", zappa_cli.zappa_settings)
self.assertIn("devor", zappa_cli.zappa_settings)
os.unlink("zappa_settings.yml")
# Without anything, we should get an exception.
self.assertRaises(
ClickException, zappa_cli.get_json_or_yaml_settings)
finally:
os.chdir(orig_cwd)
shutil.rmtree(tempdir)
def test_cli_utility(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.load_settings('test_settings.json')
zappa_cli.create_package()
zappa_cli.remove_local_zip()
logs = [
{
'timestamp': '12345',
'message': '[START RequestId] test'
},
{
'timestamp': '12345',
'message': '[REPORT RequestId] test'
},
{
'timestamp': '12345',
'message': '[END RequestId] test'
},
{
'timestamp': '12345',
'message': 'test'
},
{
'timestamp': '1480001341214',
'message': '[INFO] 2016-11-24T15:29:13.326Z c0cb52d1-b25a-11e6-9b73-f940ce24319a 59.111.125.48 - - [24/Nov/2016:15:29:13 +0000] "GET / HTTP/1.1" 200 2590 "" "python-requests/2.11.0" 0/4.672'
},
{
'timestamp': '1480001341214',
'message': '[INFO] 2016-11-24T15:29:13.326Z c0cb52d1-b25a-11e6-9b73-f940ce24319a 59.111.125.48 - - [24/Nov/2016:15:29:13 +0000] "GET / HTTP/1.1" 400 2590 "" "python-requests/2.11.0" 0/4.672'
},
{
'timestamp': '1480001341215',
'message': '[1480001341258] [DEBUG] 2016-11-24T15:29:01.258Z b890d8f6-b25a-11e6-b6bc-718f7ec807df Zappa Event: {}'
}
]
zappa_cli.print_logs(logs)
zappa_cli.print_logs(logs, colorize=False)
zappa_cli.print_logs(logs, colorize=False, http=True)
zappa_cli.print_logs(logs, colorize=True, http=True)
zappa_cli.print_logs(logs, colorize=True, http=False)
zappa_cli.print_logs(logs, colorize=True, non_http=True)
zappa_cli.print_logs(logs, colorize=True, non_http=False)
zappa_cli.print_logs(logs, colorize=True, non_http=True, http=True)
zappa_cli.print_logs(logs, colorize=True, non_http=False, http=False)
zappa_cli.check_for_update()
def test_cli_args(self):
zappa_cli = ZappaCLI()
# Sanity
argv = '-s test_settings.json derp ttt888'.split()
with self.assertRaises(SystemExit) as system_exit:
zappa_cli.handle(argv)
self.assertEqual(system_exit.exception.code, 2)
def test_cli_error_exit_code(self):
# Discussion: https://github.com/Miserlou/Zappa/issues/407
zappa_cli = ZappaCLI()
# Sanity
argv = '-s test_settings.json status devor'.split()
with self.assertRaises(SystemExit) as system_exit:
zappa_cli.handle(argv)
self.assertEqual(system_exit.exception.code, 1)
def test_cli_default(self):
# Discussion: https://github.com/Miserlou/Zappa/issues/422
zappa_cli = ZappaCLI()
argv = '-s tests/test_one_env.json status'.split()
# It'll fail, but at least it'll cover it.
with self.assertRaises(SystemExit) as system_exit:
zappa_cli.handle(argv)
self.assertEqual(system_exit.exception.code, 1)
zappa_cli = ZappaCLI()
argv = '-s tests/test_one_env.json status --all'.split()
# It'll fail, but at least it'll cover it.
with self.assertRaises(SystemExit) as system_exit:
zappa_cli.handle(argv)
self.assertEqual(system_exit.exception.code, 1)
zappa_cli = ZappaCLI()
argv = '-s test_settings.json status'.split()
with self.assertRaises(SystemExit) as system_exit:
zappa_cli.handle(argv)
self.assertEqual(system_exit.exception.code, 2)
def test_cli_negative_rollback(self):
zappa_cli = ZappaCLI()
argv = '-s test_settings.json rollback -n -1 dev'.split()
output = StringIO()
old_stderr, sys.stderr = sys.stderr, output
with self.assertRaises(SystemExit) as system_exit:
zappa_cli.handle(argv)
self.assertEqual(system_exit.exception.code, 2)
error_msg = output.getvalue().strip()
expected = r".*This argument must be positive \(got -1\)$"
self.assertRegexpMatches(error_msg, expected)
sys.stderr = old_stderr
@mock.patch('zappa.cli.ZappaCLI.dispatch_command')
def test_cli_invoke(self, _):
zappa_cli = ZappaCLI()
argv = '-s test_settings.json invoke '.split()
raw_tests = (
['--raw', 'devor', '"print 1+2"'],
['devor', '"print 1+2"', '--raw']
)
for cmd in raw_tests:
zappa_cli.handle(argv + cmd)
args = zappa_cli.vargs
self.assertFalse(args['all'])
self.assertTrue(args['raw'])
self.assertEquals(args['command_rest'], '"print 1+2"')
self.assertEquals(args['command_env'], 'devor')
all_raw_tests = (
['--all', '--raw', '"print 1+2"'],
['"print 1+2"', '--all', '--raw'],
['--raw', '"print 1+2"', '--all'],
['--all', '"print 1+2"', '--raw']
)
for cmd in all_raw_tests:
zappa_cli.handle(argv + cmd)
args = zappa_cli.vargs
self.assertTrue(args['all'])
self.assertTrue(args['raw'])
self.assertEquals(args['command_rest'], '"print 1+2"')
self.assertEquals(args['command_env'], None)
zappa_cli.handle(argv + ['devor', 'myapp.my_func'])
args = zappa_cli.vargs
self.assertEquals(args['command_rest'], 'myapp.my_func')
all_func_tests = (
['--all', 'myapp.my_func'],
['myapp.my_func', '--all']
)
for cmd in all_func_tests:
zappa_cli.handle(argv + cmd)
args = zappa_cli.vargs
self.assertTrue(args['all'])
self.assertEquals(args['command_rest'], 'myapp.my_func')
@mock.patch('zappa.cli.ZappaCLI.dispatch_command')
def test_cli_manage(self, _):
zappa_cli = ZappaCLI()
argv = '-s test_settings.json manage '.split()
all_tests = (
['--all', 'showmigrations', 'admin'],
['showmigrations', 'admin', '--all']
)
for cmd in all_tests:
zappa_cli.handle(argv + cmd)
args = zappa_cli.vargs
self.assertTrue(args['all'])
self.assertItemsEqual(
args['command_rest'], ['showmigrations', 'admin']
)
cmd = ['devor', 'showmigrations', 'admin']
zappa_cli.handle(argv + cmd)
args = zappa_cli.vargs
self.assertFalse(args['all'])
self.assertItemsEqual(
args['command_rest'], ['showmigrations', 'admin']
)
cmd = ['devor', '"shell --version"']
zappa_cli.handle(argv + cmd)
args = zappa_cli.vargs
self.assertFalse(args['all'])
self.assertItemsEqual(args['command_rest'], ['"shell --version"'])
def test_bad_json_catch(self):
zappa_cli = ZappaCLI()
self.assertRaises(ValueError, zappa_cli.load_settings_file, 'tests/test_bad_settings.json')
def test_bad_stage_name_catch(self):
zappa_cli = ZappaCLI()
self.assertRaises(ValueError, zappa_cli.load_settings, 'tests/test_bad_stage_name_settings.json')
def test_bad_environment_vars_catch(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
self.assertRaises(ValueError, zappa_cli.load_settings, 'tests/test_bad_environment_vars.json')
def test_cli_init(self):
if os.path.isfile('zappa_settings.json'):
os.remove('zappa_settings.json')
# Test directly
zappa_cli = ZappaCLI()
# Via http://stackoverflow.com/questions/2617057/how-to-supply-stdin-files-and-environment-variable-inputs-to-python-unit-tests
inputs = ['dev', 'lmbda', 'test_settings', 'y', '']
def test_for(inputs):
input_generator = (i for i in inputs)
with mock.patch('__builtin__.raw_input', lambda prompt: next(input_generator)):
zappa_cli.init()
if os.path.isfile('zappa_settings.json'):
os.remove('zappa_settings.json')
test_for(inputs)
test_for(['dev', 'lmbda', 'test_settings', 'n', ''])
test_for(['dev', 'lmbda', 'test_settings', '', ''])
test_for(['dev', 'lmbda', 'test_settings', 'p', ''])
test_for(['dev', 'lmbda', 'test_settings', 'y', ''])
test_for(['dev', 'lmbda', 'test_settings', 'p', 'n'])
# Test via handle()
input_generator = (i for i in inputs)
with mock.patch('__builtin__.raw_input', lambda prompt: next(input_generator)):
zappa_cli = ZappaCLI()
argv = ['init']
zappa_cli.handle(argv)
if os.path.isfile('zappa_settings.json'):
os.remove('zappa_settings.json')
def test_domain_name_match(self):
# Simple sanity check
zone = Zappa.get_best_match_zone(all_zones={ 'HostedZones': [
{
'Name': 'example.com.au.',
'Id': 'zone-correct',
'Config': {
'PrivateZone': False
}
}
]},
domain='www.example.com.au')
assert zone == 'zone-correct'
# No match test
zone = Zappa.get_best_match_zone(all_zones={'HostedZones': [
{
'Name': 'example.com.au.',
'Id': 'zone-incorrect',
'Config': {
'PrivateZone': False
}
}
]},
domain='something-else.com.au')
assert zone is None
# More involved, better match should win.
zone = Zappa.get_best_match_zone(all_zones={'HostedZones': [
{
'Name': 'example.com.au.',
'Id': 'zone-incorrect',
'Config': {
'PrivateZone': False
}
},
{
'Name': 'subdomain.example.com.au.',
'Id': 'zone-correct',
'Config': {
'PrivateZone': False
}
}
]},
domain='www.subdomain.example.com.au')
assert zone == 'zone-correct'
# Check private zone is not matched
zone = Zappa.get_best_match_zone(all_zones={ 'HostedZones': [
{
'Name': 'example.com.au.',
'Id': 'zone-private',
'Config': {
'PrivateZone': True
}
}
]},
domain='www.example.com.au')
assert zone is None
# More involved, should ignore the private zone and match the public.
zone = Zappa.get_best_match_zone(all_zones={'HostedZones': [
{
'Name': 'subdomain.example.com.au.',
'Id': 'zone-private',
'Config': {
'PrivateZone': True
}
},
{
'Name': 'subdomain.example.com.au.',
'Id': 'zone-public',
'Config': {
'PrivateZone': False
}
}
]},
domain='www.subdomain.example.com.au')
assert zone == 'zone-public'
##
# Let's Encrypt / ACME
##
def test_lets_encrypt_sanity(self):
# We need a fake account key and crt
import subprocess
proc = subprocess.Popen(["openssl genrsa 2048 > /tmp/account.key"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if proc.returncode != 0:
raise IOError("OpenSSL Error: {0}".format(err))
proc = subprocess.Popen(["openssl req -x509 -newkey rsa:2048 -subj '/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.example.com' -passout pass:foo -keyout /tmp/key.key -out test_signed.crt -days 1 > /tmp/signed.crt"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if proc.returncode != 0:
raise IOError("OpenSSL Error: {0}".format(err))
DEFAULT_CA = "https://acme-staging.api.letsencrypt.org"
CA = "https://acme-staging.api.letsencrypt.org"
try:
result = register_account()
except ValueError as e:
pass # that's fine.
create_domain_key()
create_domain_csr('herp.derp.wtf')
parse_account_key()
parse_csr()
create_chained_certificate()
try:
result = sign_certificate()
except ValueError as e:
pass # that's fine.
result = verify_challenge('http://echo.jsontest.com/status/valid')
try:
result = verify_challenge('http://echo.jsontest.com/status/fail')
except ValueError as e:
pass # that's fine.
try:
result = verify_challenge('http://bing.com')
except ValueError as e:
pass # that's fine.
encode_certificate(b'123')
# without domain testing..
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'ttt888'
zappa_cli.load_settings('test_settings.json')
get_cert_and_update_domain(zappa_cli, 'kerplah', 'zzzz', domain=None, clean_up=True)
os.remove('test_signed.crt')
cleanup()
def test_certify_sanity_checks(self):
"""
Make sure 'zappa certify':
* Writes a warning with the --no-cleanup flag.
* Errors out when a deployment hasn't taken place.
* Writes errors when certificate settings haven't been specified.
* Calls Zappa correctly for creates vs. updates.
"""
old_stdout = sys.stderr
sys.stdout = OldStringIO() # print() barfs on io.* types.
try:
zappa_cli = ZappaCLI()
try:
zappa_cli.certify(no_cleanup=True)
except AttributeError:
# Since zappa_cli.zappa isn't initalized, the certify() call
# fails when it tries to inspect what Zappa has deployed.
pass
log_output = sys.stdout.getvalue()
self.assertIn("You are calling certify with", log_output)
self.assertIn("--no-cleanup", log_output)
class ZappaMock(object):
def __init__(self):
self.function_versions = []
self.domain_names = {}
self.calls = []
def get_lambda_function_versions(self, function_name):
return self.function_versions
def get_domain_name(self, domain):
return self.domain_names.get(domain)
def create_domain_name(self, *args, **kw):
self.calls.append(("create_domain_name", args, kw))
def update_domain_name(self, *args, **kw):
self.calls.append(("update_domain_name", args, kw))
zappa_cli.zappa = ZappaMock()
self.assertRaises(ClickException, zappa_cli.certify)
# Make sure we get an error if we don't configure the domain.
zappa_cli.zappa.function_versions = ["$LATEST"]
zappa_cli.api_stage = "stage"
zappa_cli.zappa_settings = {"stage": {}}
try:
zappa_cli.certify()
except ClickException as e:
log_output = str(e)
self.assertIn("Can't certify a domain without", log_output)
self.assertIn("domain", log_output)
# Without any LetsEncrypt settings, we should get a message about
# not having a lets_encrypt_key setting.
zappa_cli.zappa_settings["stage"]["domain"] = "test.example.com"
try:
zappa_cli.certify()
self.fail("Expected a ClickException")
except ClickException as e:
log_output = str(e)
self.assertIn("Can't certify a domain without", log_output)
self.assertIn("lets_encrypt_key", log_output)
# With partial settings, we should get a message about not having
# certificate, certificate_key, and certificate_chain
zappa_cli.zappa_settings["stage"]["certificate"] = "foo"
try:
zappa_cli.certify()
self.fail("Expected a ClickException")
except ClickException as e:
log_output = str(e)
self.assertIn("Can't certify a domain without", log_output)
self.assertIn("certificate_key", log_output)
self.assertIn("certificate_chain", log_output)
zappa_cli.zappa_settings["stage"]["certificate_key"] = "key"
try:
zappa_cli.certify()
self.fail("Expected a ClickException")
except ClickException as e:
log_output = str(e)
self.assertIn("Can't certify a domain without", log_output)
self.assertIn("certificate_key", log_output)
self.assertIn("certificate_chain", log_output)
zappa_cli.zappa_settings["stage"]["certificate_chain"] = "chain"
del zappa_cli.zappa_settings["stage"]["certificate_key"]
try:
zappa_cli.certify()
self.fail("Expected a ClickException")
except ClickException as e:
log_output = str(e)
self.assertIn("Can't certify a domain without", log_output)
self.assertIn("certificate_key", log_output)
self.assertIn("certificate_chain", log_output)
# With all certificate settings, make sure Zappa's domain calls
# are executed.
cert_file = tempfile.NamedTemporaryFile()
cert_file.write("Hello world")
cert_file.flush()
zappa_cli.zappa_settings["stage"].update({
"certificate": cert_file.name,
"certificate_key": cert_file.name,
"certificate_chain": cert_file.name
})
sys.stdout.truncate(0)
zappa_cli.certify(no_cleanup=True)
self.assertEquals(len(zappa_cli.zappa.calls), 1)
self.assertTrue(zappa_cli.zappa.calls[0][0] == "create_domain_name")
log_output = sys.stdout.getvalue()
self.assertIn("Created a new domain name", log_output)
zappa_cli.zappa.calls = []
zappa_cli.zappa.domain_names["test.example.com"] = "*.example.com"
sys.stdout.truncate(0)
zappa_cli.certify(no_cleanup=True)
self.assertEquals(len(zappa_cli.zappa.calls), 1)
self.assertTrue(zappa_cli.zappa.calls[0][0] == "update_domain_name")
log_output = sys.stdout.getvalue()
self.assertNotIn("Created a new domain name", log_output)
finally:
sys.stdout = old_stdout
##
# Django
##
def test_detect_dj(self):
# Sanity
settings_modules = detect_django_settings()
def test_dj_wsgi(self):
# Sanity
settings_modules = detect_django_settings()
settings = """
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'alskdfjalsdkf=0*%do-ayvy*m2k=vss*$7)j8q!@u0+d^na7mi2(^!l!d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'blah.urls'
WSGI_APPLICATION = 'hackathon_starter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
"""
djts = open("dj_test_settings.py", "w")
djts.write(settings)
djts.close()
app = get_django_wsgi('dj_test_settings')
os.remove('dj_test_settings.py')
os.remove('dj_test_settings.pyc')
##
# Util / Misc
##
def test_human_units(self):
human_size(1)
human_size(9999999999999)
def test_string_to_timestamp(self):
boo = string_to_timestamp("asdf")
self.assertTrue(boo == 0)
yay = string_to_timestamp("1h")
self.assertTrue(type(yay) == int)
self.assertTrue(yay > 0)
yay = string_to_timestamp("4m")
self.assertTrue(type(yay) == int)
self.assertTrue(yay > 0)
yay = string_to_timestamp("1mm")
self.assertTrue(type(yay) == int)
self.assertTrue(yay > 0)
yay = string_to_timestamp("1mm1w1d1h1m1s1ms1us")
self.assertTrue(type(yay) == int)
self.assertTrue(yay > 0)
def test_event_name(self):
zappa = Zappa()
truncated = zappa.get_event_name("basldfkjalsdkfjalsdkfjaslkdfjalsdkfjadlsfkjasdlfkjasdlfkjasdflkjasdf-asdfasdfasdfasdfasdf", "this.is.my.dang.function.wassup.yeah.its.long")
self.assertTrue(len(truncated) <= 64)
self.assertTrue(truncated.endswith("this.is.my.dang.function.wassup.yeah.its.long"))
truncated = zappa.get_event_name("basldfkjalsdkfjalsdkfjaslkdfjalsdkfjadlsfkjasdlfkjasdlfkjasdflkjasdf-asdfasdfasdfasdfasdf", "thisidoasdfaljksdfalskdjfalsdkfjasldkfjalsdkfjalsdkfjalsdfkjalasdfasdfasdfasdklfjasldkfjalsdkjfaslkdfjasldkfjasdflkjdasfskdj")
self.assertTrue(len(truncated) <= 64)
truncated = zappa.get_event_name("a", "b")
self.assertTrue(len(truncated) <= 64)
self.assertEqual(truncated, "a-b")
def test_detect_dj(self):
# Sanity
settings_modules = detect_django_settings()
def test_detect_flask(self):
# Sanity
settings_modules = detect_flask_apps()
def test_shameless(self):
shamelessly_promote()
def test_s3_url_parser(self):
remote_bucket, remote_file = parse_s3_url('s3://my-project-config-files/filename.json')
self.assertEqual(remote_bucket, 'my-project-config-files')
self.assertEqual(remote_file, 'filename.json')
remote_bucket, remote_file = parse_s3_url('s3://your-bucket/account.key')
self.assertEqual(remote_bucket, 'your-bucket')
self.assertEqual(remote_file, 'account.key')
remote_bucket, remote_file = parse_s3_url('s3://my-config-bucket/super-secret-config.json')
self.assertEqual(remote_bucket, 'my-config-bucket')
self.assertEqual(remote_file, 'super-secret-config.json')
remote_bucket, remote_file = parse_s3_url('s3://your-secure-bucket/account.key')
self.assertEqual(remote_bucket, 'your-secure-bucket')
self.assertEqual(remote_file, 'account.key')
remote_bucket, remote_file = parse_s3_url('s3://your-bucket/subfolder/account.key')
self.assertEqual(remote_bucket, 'your-bucket')
self.assertEqual(remote_file, 'subfolder/account.key')
# Sad path
remote_bucket, remote_file = parse_s3_url('/dev/null')
self.assertEqual(remote_bucket, '')
def test_remote_env_package(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'depricated_remote_env'
zappa_cli.load_settings('test_settings.json')
self.assertEqual('lmbda-env', zappa_cli.stage_config['remote_env_bucket'])
self.assertEqual('dev/env.json', zappa_cli.stage_config['remote_env_file'])
zappa_cli.create_package()
with zipfile.ZipFile(zappa_cli.zip_path, 'r') as lambda_zip:
content = lambda_zip.read('zappa_settings.py')
zappa_cli.remove_local_zip()
m = re.search("REMOTE_ENV='(.*)'", content)
self.assertEqual(m.group(1), 's3://lmbda-env/dev/env.json')
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'remote_env'
zappa_cli.load_settings('test_settings.json')
self.assertEqual('s3://lmbda-env/prod/env.json', zappa_cli.stage_config['remote_env'])
zappa_cli.create_package()
with zipfile.ZipFile(zappa_cli.zip_path, 'r') as lambda_zip:
content = lambda_zip.read('zappa_settings.py')
zappa_cli.remove_local_zip()
m = re.search("REMOTE_ENV='(.*)'", content)
self.assertEqual(m.group(1), 's3://lmbda-env/prod/env.json')
def test_package_only(self):
for delete_local_zip in [True, False]:
zappa_cli = ZappaCLI()
if delete_local_zip:
zappa_cli.api_stage = 'build_package_only_delete_local_zip_true'
else:
zappa_cli.api_stage = 'build_package_only_delete_local_zip_false'
zappa_cli.load_settings('test_settings.json')
zappa_cli.package()
zappa_cli.on_exit() # simulate the command exits
# the zip should never be removed
self.assertEqual(os.path.isfile(zappa_cli.zip_path), True)
# cleanup
os.remove(zappa_cli.zip_path)
def test_flask_logging_bug(self):
"""
This checks whether Flask can write errors sanely.
https://github.com/Miserlou/Zappa/issues/283
"""
event = {
"body": {},
"headers": {},
"pathParameters": {},
"path": '/',
"httpMethod": "GET",
"queryStringParameters": {},
"requestContext": {}
}
old_stderr = sys.stderr
sys.stderr = BytesIO()
try:
environ = create_wsgi_request(event)
app = flask.Flask(__name__)
with app.request_context(environ):
app.logger.error(u"This is a test")
log_output = sys.stderr.getvalue()
self.assertNotIn(
"'str' object has no attribute 'write'", log_output)
self.assertNotIn(
"Logged from file tests.py", log_output)
finally:
sys.stderr = old_stderr
def test_slim_handler(self):
zappa_cli = ZappaCLI()
zappa_cli.api_stage = 'slim_handler'
zappa_cli.load_settings('test_settings.json')
zappa_cli.create_package()
self.assertTrue(os.path.isfile(zappa_cli.handler_path))
self.assertTrue(os.path.isfile(zappa_cli.zip_path))
zappa_cli.remove_local_zip()
if __name__ == '__main__':
unittest.main()
| mit |
yw374cornell/e-mission-server | emission/analysis/modelling/tour_model/prior_unused/cluster_pipeline.py | 1 | 11691 | # Standard imports
import logging
import os, sys
import math
import numpy as np
import matplotlib
# matplotlib.use('Agg')
import pygmaps
from sklearn.cluster import KMeans
from sklearn import manifold
import matplotlib.pyplot as plt
# Our imports
import emission.analysis.modelling.tour_model.prior_unused.route_matching as etmr
import emission.analysis.modelling.tour_model.kmedoid as emkm
import emission.core.get_database as edb
import emission.analysis.modelling.tour_model.trajectory_matching.route_matching as eart
import emission.analysis.modelling.tour_model.trajectory_matching.prior_unused.util as eaut
"""
Notes
Usage: python cluster_pipeline.py <username>
Username must be associated with UUID in user_uuid.secret
High level overview:
-This script provides a series of tools to help you evaluate your clustering algorithm across different methods of calculating distance.
-For a particular user that you pass in to this script, we will generate and plot clusters on a 2d-plane using MDS. colors correspond
to a kmedoid generated clusters
-we also compare kmedoid generated clusters to ground truth clusters and returns accuracy score
"""
if not os.path.exists('mds_plots'):
os.makedirs('mds_plots')
def extract_features(model_name, user_id, method=None, is_ground_truth=False):
data = None
if model_name == 'kmeans':
data = generate_section_matrix(user_id)
return data
elif model_name == 'kmedoid':
data = get_user_disMat(user_id, method=method, is_ground_truth=is_ground_truth)
return data
def generate_clusters(model_name, data, user_id, method=None, is_ground_truth=False):
clusters = None
if model_name == 'kmeans':
clusters = kmeans(data)
elif model_name == 'kmedoid':
clusters = get_user_clusters(user_id, method=method, nClusters=-1, is_ground_truth=is_ground_truth)
return clusters
def evaluate_clusters():
pass
#########################################################################################################
# LOW LEVEL ABSTRACTION #
#########################################################################################################
def get_user_sections(user_id):
sections = list(edb.get_section_db().find({'$and':[{'user_id': user_id},{'type': 'move'}]}))
return sections
def get_user_disMat(user, method, is_ground_truth=False):
## update route clusters:
logging.debug("Generating route clusters for %s" % user)
if is_ground_truth:
cluster_section_ids = edb.get_ground_truth_sections(user)
routes_user = emkm.user_route_data2(cluster_section_ids)
user_disMat = etmr.update_user_routeDistanceMatrix(str(user) + '_ground_truth',routes_user,step1=100000,step2=100000,method=method)
else:
routes_user = user_route_data(user,edb.get_section_db())
#print(routes_user)
user_disMat = etmr.update_user_routeDistanceMatrix(user,routes_user,step1=100000,step2=100000,method=method)
logging.debug((type(user_disMat)))
return user_disMat
def get_user_clusters(user, method, nClusters, is_ground_truth=False):
if is_ground_truth:
routes_user = user_route_data2(user)
else:
routes_user = user_route_data(user,edb.get_section_db())
if nClusters == -1:
nClusters = int(math.ceil(len(routes_user)/8) + 1)
clusters_user = emkm.kmedoids(routes_user,nClusters,user,method=method)
#update_user_routeClusters(user,clusters_user[2],method=method)
return clusters_user
def get_user_list():
user_list = edb.get_section_db().distinct('user_id')
return user_list
def plot_cluster_trajectories():
for cluster_label in clusters:
sections = clusters[cluster_label]
section = sections[0]
start_point = section['track_points'][0]['track_location']['coordinates']
mymap = pygmaps.maps(start_point[1], start_point[0], 16)
#mymap = pygmaps.maps(37.428, -122.145, 16)
for section in sections:
path = []
for track_point in section['track_points']:
coordinates = track_point['track_location']['coordinates']
#path.append(coordinates)
path.append((coordinates[1], coordinates[0]))
#path = [(37.429, -122.145),(37.428, -122.145),(37.427, -122.145),(37.427, -122.146),(37.427, -122.146)]
mymap.addpath(path,"#00FF00")
mymap.draw(str(cluster_label) + '_cluster.html')
def plot_mds(clusters, user_disMat, method, user_id, is_ground_truth=False):
routes_dict = {}
c = 0
for key in user_disMat.keys():
routes_dict[key] = c
c += 1
num_routes = len(routes_dict.keys())
matrix_shape = (num_routes, num_routes)
similarity_matrix = np.zeros(matrix_shape)
for route1 in user_disMat.keys():
for route2 in user_disMat[route1]:
route1_index = routes_dict[route1]
route2_index = routes_dict[route2]
similarity_matrix[route1_index][route2_index] = user_disMat[route1][route2]
#similarity_matrix[route2_index][route1_index] = user_disMat[route1][route2]
seed = np.random.RandomState(seed=3)
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
reduced_coordinates = mds.fit_transform(similarity_matrix)
cluster_num = 0
cleaned_clusters = {}
for cluster in clusters[2]:
for route in clusters[2][cluster]:
#print(route)
route_index = routes_dict[route]
if cluster_num in cleaned_clusters:
cleaned_clusters[cluster_num].append(reduced_coordinates[route_index])
else:
cleaned_clusters[cluster_num] = [reduced_coordinates[route_index]]
cluster_num += 1
used_colors = []
cluster_colors = {}
for cluster_index in cleaned_clusters:
stop = False
while not stop:
random_color = np.random.rand(1)[0]
if random_color not in used_colors:
stop = True
cluster_colors[cluster_index] = random_color
used_colors.append(random_color)
plot_colors = []
x_coords = []
y_coords = []
for cluster_index in cleaned_clusters:
route_coordinates = cleaned_clusters[cluster_index]
for coord in route_coordinates:
plot_colors.append(cluster_colors[cluster_index])
x_coords.append(coord[0])
y_coords.append(coord[1])
plt.scatter(x_coords, y_coords, c=plot_colors)
x1 = np.mean(x_coords) - 10*np.std(x_coords)
x2 = np.mean(x_coords) + 10*np.std(x_coords)
y1 = np.mean(y_coords) - 10*np.std(y_coords)
y2 = np.mean(y_coords) + 10*np.std(y_coords)
plt.axis((x1, x2, y1, y2))
if is_ground_truth:
f_name = 'mds_plots/ground_truth_' + str(user_id) + '_' + method + '.png'
else:
f_name = 'mds_plots/' + str(user_id) + '_' + method + '.png'
plt.savefig(f_name)
#K MEANS HELPER FUNCTIONS
def generate_section_matrix(user_id):
sections = get_user_sections(user_id)
inv_feature_dict = {0: 'start_lat', 1: 'start_lng', 2: 'end_lat', 3: 'end_lng', 4: 'duration', 5: 'distance'}
feature_dict = {'start_lat': 0, 'start_lng': 1, 'end_lat': 2, 'end_lng': 3, 'duration': 4, 'distance': 5}
data = np.zeros((len(sections), len(feature_dict)))
c = 0
while c < len(sections):
section = sections[c]
start_point = section['track_points'][0]['track_location']['coordinates']
end_point = section['track_points'][-1]['track_location']['coordinates']
start_lat = start_point[1]
start_lng = start_point[0]
end_lat = end_point[1]
end_lng = end_point[0]
duration = section['duration']
distance = section['distance']
data[c][feature_dict['start_lat']] = start_lat
data[c][feature_dict['start_lng']] = start_lng
data[c][feature_dict['end_lat']] = end_lat
data[c][feature_dict['end_lng']] = end_lng
data[c][feature_dict['duration']] = duration
data[c][feature_dict['distance']] = distance
c += 1
return data
def kmeans(data):
sections = get_user_sections(user_id)
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
k_means.fit(data)
k_means_labels = k_means.labels_
c = 0
clusters = {}
while c < k_means_labels.shape[0]:
if k_means_labels[c] not in clusters:
clusters[k_means_labels[c]] = [sections[c]]
else:
clusters[k_means_labels[c]].append(sections[c])
c += 1
return clusters
def user_route_data2(section_ids):
data_feature = {}
# for section in database.find({'$and':[{'user_id': user_id},{'type': 'move'},{'confirmed_mode': {'$ne': ''}}]}):
for _id in section_ids:
try:
data_feature[_id] = eart.getRoute(_id)
except Exception as e:
pass
#print(data_feature.keys())
return data_feature
#########################################################################################################
# END OF LOW LEVEL ABSTRACTION #
#########################################################################################################
#user_list = get_user_list()
user_uuid = eaut.read_uuids()
if len(sys.argv) == 2:
user_id = user_uuid[sys.argv[1]]
logging.debug(user_id)
#PARAMETERS
methods = ['dtw', 'lcs', 'Frechet'] #what metrics for distance to use
#EXPERIMENT 1: KMeans with following features: start_lat, start_lng, end_lat, end_lng, duration, distance
"""
print("Working on KMeans with simple features...")
data = extract_features('kmeans', user_id)
clusters = generate_clusters('kmeans', data, user_id)
print("Finished.")
"""
#EXPERIMENT 2-4: KMedoids with various methods of calculating distance between route A and route B
for method in methods:
logging.debug("Working on KMedoid with %s as distance metric." % method)
#user_disMat, clusters_user = generate_route_clusters(user_id, method=method, nClusters=-1)
data = extract_features('kmedoid', user_id, method)
clusters = generate_clusters('kmedoid', data, user_id, method)
logging.debug(data)
plot_mds(clusters, data, method, user_id)
logging.debug("Finished %s." % method)
def get_ground_truth_sections(username, section_collection):
"""
Returns all of the routes associated with a username's ground truthed sections
"""
ground_cluster_collection = edb.get_groundClusters_db()
clusters = ground_cluster_collection.find_one({"clusters":{"$exists":True}})["clusters"]
ground_truth_sections = []
get_username = lambda x: x[0].split("_")[0]
clusters = filter(lambda x: username == get_username(x), clusters.items())
for key, section_ids in clusters:
ground_truth_sections.extend(section_ids)
ground_truth_section_data = {}
for section_id in ground_truth_sections:
section_data = section_collection.find_one({'_id' : section_id})
if section_data is not None:
ground_truth_section_data[section_data['_id']] = getRoute(section_data['_id'])
else:
logging.debug("%s not found" % section_id)
return ground_truth_section_data
"""
methods = ['dtw']
for method in methods:
print("test")
data = extract_features('kmedoid', 'jeff', method, is_ground_truth=True)
print(data)
clusters = generate_clusters('kmedoid', data, 'jeff', method, is_ground_truth=True)
plot_mds(clusters, data, method, 'jeff')
"""
| bsd-3-clause |
danielfrg/cyhdfs3 | cyhdfs3/tests/test_avro.py | 2 | 2559 | from __future__ import print_function, absolute_import
import sys
import posixpath
import subprocess
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import cyavro
from utils import *
avroschema = """ {"type": "record",
"name": "from_bytes_test",
"fields":[
{"name": "id", "type": "int"},
{"name": "name", "type": "string"}
]
}
"""
@pytest.mark.parametrize(("codec",), [("null", ), ("deflate", ), ("snappy", )])
def test_avro_move_read(hdfs, request, tmpdir, codec):
testname = request.node.name.replace('[', '_').replace(']', '_')
hdfs_path = posixpath.join(TEST_DIR, testname + '.avro')
local_path = tmpdir.join(testname + '.avro').realpath().strpath
# Create an avrofile
writer = cyavro.AvroWriter(local_path, codec, avroschema)
ids = np.random.randint(100, size=10)
ids = np.arange(10)
names = pdt.rands_array(10, 10)
df_write = pd.DataFrame({"id": ids, "name": names})
df_write = cyavro.prepare_pandas_df_for_write(df_write, avroschema, copy=False)
writer.write(df_write)
writer.close()
# Move file to hdfs
out = subprocess.call("hadoop fs -put {} {}".format(local_path, hdfs_path), shell=True)
assert out == 0
# Read avro and compare data
with hdfs.open(hdfs_path, 'r') as f:
reader = f.read_avro()
reader.init_buffers()
df_read = pd.DataFrame(reader.read_chunk())
pdt.assert_frame_equal(df_write, df_read)
reader.close()
@pytest.mark.parametrize(("codec",), [("null", ), ("deflate", ), ("snappy", )])
def test_avro_write_read(hdfs, request, tmpdir, codec):
testname = request.node.name
hdfs_path = posixpath.join(TEST_DIR, testname + '.avro')
local_path = tmpdir.join(testname + '.avro').realpath().strpath
# Create an avrofile
writer = cyavro.AvroWriter(local_path, codec, avroschema)
ids = np.random.randint(100, size=10)
ids = np.arange(10)
names = pdt.rands_array(10, 10)
df_write = pd.DataFrame({"id": ids, "name": names})
df_write = cyavro.prepare_pandas_df_for_write(df_write, avroschema, copy=False)
writer.write(df_write)
writer.close()
# Read avro file bytes from localfile and write them to hdfs
data = ''
with open(local_path, 'rb') as f:
data = f.read()
with hdfs.open(hdfs_path, 'w') as f:
f.write(data)
# Read avro file bytes from hdfs and compare
with hdfs.open(hdfs_path, 'r') as f:
read_data = f.read()
assert len(data) == len(read_data)
assert data == read_data
| apache-2.0 |
michigraber/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
hamish2014/optTune | examples/tMOPSO_simulated_annealing_f2py.py | 1 | 3155 | """
Tune the simulated annealing algorithm from the scipy package to the generalized Rosenbrock problem, for multiple objective function evaluation (OFE) budgets simulatenously.
Same as the other example, except a fortran version of fast sa is used.
"""
import numpy, os
from optTune import tMOPSO, get_F_vals_at_specified_OFE_budgets, linearFunction
print('Please note this example only works on Linux, and requires gfortran')
if not os.path.exists('anneal_fortran.so'):
os.system('f2py -c -m anneal_fortran anneal.f90')
from anneal_fortran import anneal_module
D = 5 #number of dimensions for Rosenbrock problem
def anneal(CPVs, OFE_budgets, randomSeed):
#fast_sa_run - Function signature:
# fast_sa_run(prob_id,x0,t0,dwell,m,n,quench,boltzmann,maxevals,lower,upper,random_seed,[d])
anneal_module.fast_sa_run(prob_id = 1 ,
x0 = -2.048 + 2*2.048*numpy.random.rand(D),
t0 = 500.0,
dwell = int(CPVs[0]),
m = CPVs[1],
n = 1.0,
quench = 1.0,
boltzmann = 1.0,
maxevals = max(OFE_budgets),
lower = -2.048*numpy.ones(D),
upper = 2.048*numpy.ones(D),
random_seed = randomSeed)
return get_F_vals_at_specified_OFE_budgets(F=anneal_module.fval_hist.copy(), E=anneal_module.eval_hist.copy(), E_desired=OFE_budgets)
def CPV_valid(CPVs, OFE_budget):
if CPVs[0] < 5:
return False,'dwell,CPVs[0] < 5'
if CPVs[1] < 0.0001:
return False,'CPVs[1] < 0.0001'
return True,''
tuningOpt = tMOPSO(
optAlg = anneal,
CPV_lb = numpy.array([10, 0.0]),
CPV_ub = numpy.array([50, 5.0]),
CPV_validity_checks = CPV_valid,
OFE_budgets=numpy.logspace(1,3,30).astype(int),
sampleSizes = [2,8,20], #resampling size of 30
resampling_interruption_confidence = 0.6,
gammaBudget = 30*1000*50, #increase to get a smoother result ...
OFE_assessment_overshoot_function = linearFunction(2, 100 ),
N = 10,
printLevel=1,
)
print(tuningOpt)
Fmin_values = [ d.fv[1] for d in tuningOpt.PFA.designs ]
OFE_budgets = [ d.fv[0] for d in tuningOpt.PFA.designs ]
dwell_values = [ int(d.xv[1]) for d in tuningOpt.PFA.designs ]
m_values = [ d.xv[2] for d in tuningOpt.PFA.designs ]
print('OFE budget Fmin dwell m ')
for a,b,c,d in zip(OFE_budgets, Fmin_values, dwell_values, m_values):
print(' %i %6.4f %i %4.2f' % (a,b,c,d))
from matplotlib import pyplot
p1 = pyplot.semilogx(OFE_budgets, dwell_values, 'g.')[0]
pyplot.ylabel('dwell')
pyplot.ylim( min(dwell_values) - 1, max( dwell_values) + 1)
pyplot.twinx()
p2 = pyplot.semilogx(OFE_budgets, m_values, 'bx')[0]
pyplot.ylim( 0, max(m_values)*1.1)
pyplot.ylabel('m (rate of cool)')
pyplot.legend([p1,p2],['dwell','m'], loc='best')
pyplot.xlabel('OFE budget')
pyplot.xlim(min(OFE_budgets)-1,max(OFE_budgets)+60)
pyplot.title('Optimal CPVs for different OFE budgets')
pyplot.show()
| gpl-3.0 |
DistrictDataLabs/03-censusables | censusables/stars1.py | 1 | 5966 | """MVP (Really week 1 progress)
This script assumes that geo joins have already been done by the
geojoin script and that there is a business/county join that's passed
in on the command line.
"""
import argparse
import json
import matplotlib.pyplot as plt
import pandas as pd
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("join", help="Business/county join file")
parser.add_argument("businesses", help="Yelp business file")
parser.add_argument("reviews", help="Yelp review file")
parser.add_argument("census2010", help="ACS1 county estimates for 2010")
parser.add_argument("census2011", help="ACS1 county estimates for 2010")
parser.add_argument("census2012", help="ACS1 county estimates for 2010")
parser.add_argument("census2013", help="ACS1 county estimates for 2010")
parser.add_argument("-V", "--no-vegas", action='store_true')
parser.add_argument("-f", "--image-format", default='png')
args = parser.parse_args()
oname = 'nolv_' if args.no_vegas else ''
wolv = ' (without Las Vegas)' if args.no_vegas else ''
imsuff = '.' + args.image_format
# Load reviews
reviews = pd.DataFrame(json.loads(l) for l in open(args.reviews))
reviews['YEAR'] = reviews.date.str.slice(0, 4).astype('int64')
# # Reduce reviews to business-year review averages
# reviews = (reviews[['stars']]
# .groupby([reviews.business_id, reviews.YEAR])
# .mean()
# .reset_index()
# )
# Load the geo join data and join with the reviews
join = pd.DataFrame(json.loads(l) for l in open(args.join))
if args.no_vegas:
join = join[join.GISJOIN.apply(lambda g: not g.startswith('G32'))]
bus_reviews = reviews[['business_id', 'YEAR', 'stars']].merge(join)
# Get review means by GISJOIN and year
reviews = (bus_reviews[['stars']]
.groupby([bus_reviews.GISJOIN, bus_reviews.YEAR])
.mean()
.reset_index()
)
# Load the one-year census data
census = (pd.read_csv(args.census2010),
pd.read_csv(args.census2011),
pd.read_csv(args.census2012),
pd.read_csv(args.census2013),
)
# Select the columns we want and concat. This is awkward, because
# 1) column names for demographic data are different across years, and
# 2) when I downloaded 2013, i didn't ask for unweighted totals. This is
# an easy mistake to make. But I know I want GISJOIN, YEAR and the last 49
# columns, so...
census = [c[['GISJOIN', 'YEAR'] + list(c.columns[-49:])] for c in census]
# Assign more useful column names:
for c in census:
c.columns = '''
GISJOIN YEAR TOTAL
M M_4 M5_9 M10_14 M15_17 M18_19 M20 M21 M22_24 M25_29 M30_34
M35_39 M40_44 M45_49 M50_54 M55_59 M60_61 M62_64 M65_66 M67_69
M70_74 M75_79 M80_84 M85_
F F_4 F5_9 F10_14 F15_17 F18_19 F20 F21 F22_24 F25_29 F30_34
F35_39 F40_44 F45_49 F50_54 F55_59 F60_61 F62_64 F65_66 F67_69
F70_74 F75_79 F80_84 F85_
'''.strip().split()
# Combine
census = pd.concat(census, ignore_index=True)
# Compute young and old columns:
age_groups = {}
for n in '''
M18_19 M20 M21 M22_24 M25_29 M30_34F18_19 F20 F21 F22_24 F25_29 F30_34
'''.strip().split():
age_groups[n] = 'young'
for n in '''
M35_39 M40_44 M45_49 M50_54 M55_59 M60_61 M62_64 M65_66 M67_69
M70_74 M75_79 M80_84 M85_
F35_39 F40_44 F45_49 F50_54 F55_59 F60_61 F62_64 F65_66 F67_69
F70_74 F75_79 F80_84 F85_
'''.strip().split():
age_groups[n] = 'old'
yo = census.groupby(age_groups, axis=1).sum()
census = pd.concat((census, yo), axis=1)
# Normalize by total population
norm = census[census.columns[3:]].div(census.TOTAL, axis=0)
census = pd.concat((census[census.columns[:3]], norm), axis=1)
# Join with reviews
census = census.merge(reviews)
# Whew, now we're ready to explore relationships. Plot response
# rate vs age-group fraction for young and old.
fig, ax = plt.subplots(2, 1)
ax[0].scatter(census.young, census.stars, c='r', label='young')
ax[0].set_title("Yelp review means by fraction young for multiple years"
+ wolv)
ax[1].scatter(census.old, census.stars, c='b', label='old')
ax[1].set_title("Yelp review means by fraction old for multiple years"
+ wolv)
plt.savefig(oname+'review_means_young_and_old_multiyear' + imsuff)
# Well, no obvious pattern there. Perhaps it would be clearer if we
# aggregate by year.
census4 = (census[census.columns[1:]]
.groupby(census.GISJOIN)
.mean()
)
c4 = census4.reset_index()
fig, ax = plt.subplots(2, 1)
ax[0].scatter(census4.young, census4.stars, c='r', label='young')
ax[1].scatter(census4.old, census4.stars, c='b', label='old')
ax[0].set_title("Yelp review mean by fraction young mean over 4 years"
+ wolv)
ax[1].set_title("Yelp review mean by fraction old mean over 4 years"
+ wolv)
plt.savefig(oname+'review_means_young_and_old_mean' + imsuff)
# Nope, wtf that weird peak in the middle. There must be some other
# effect. We only have 15 counties. Let's see how reviews are
# distributed among them:
ax = plt.figure().add_subplot(1,1,1)
census4.stars.plot(kind='bar')
ax.set_title("Review means by county" + wolv)
plt.subplots_adjust(bottom=.2)
plt.savefig(oname+'mean_reviews_by_county' + imsuff)
# The reviews are dominated by a single county, which is Clark County,
# NV, which includes Las Vegas. Hm. Yelp reviews are probably
# concentrated in just the sort of businesses that are prominent in
# Las Vegas. Let's look at yelp reviews by category. The category is
# in an array value.
cats = []
for d in (json.loads(l) for l in open(args.businesses)):
for c in d['categories']:
cats.append(dict(business_id = d['business_id'], category=c))
cats = pd.DataFrame(cats).merge(bus_reviews)
cats = cats[['stars']].groupby(cats.category).mean()
ax = plt.figure().add_subplot(1,1,1)
cats.plot(kind='bar')
ax.set_title("Review meanss by category")
plt.subplots_adjust(bottom=.4)
plt.savefig('review_means_by_category' + imsuff)
| apache-2.0 |
JuBra/cobrapy | cobra/flux_analysis/double_deletion.py | 2 | 23264 | from warnings import warn
from itertools import chain, product
from six import iteritems, string_types
import numpy
from ..solvers import get_solver_name, solver_dict
from ..manipulation.delete import find_gene_knockout_reactions, \
get_compiled_gene_reaction_rules
from .deletion_worker import CobraDeletionPool, CobraDeletionMockPool
try:
import scipy
except ImportError:
moma = None
else:
from . import moma
try:
from pandas import DataFrame
except:
DataFrame = None
# Utility functions
def generate_matrix_indexes(ids1, ids2):
"""map an identifier to an entry in the square result matrix"""
return {id: index for index, id in enumerate(set(chain(ids1, ids2)))}
def yield_upper_tria_indexes(ids1, ids2, id_to_index):
"""gives the necessary indexes in the upper triangle
ids1 and ids2 are lists of the identifiers i.e. gene id's or reaction
indexes to be knocked out. id_to_index maps each identifier to its index
in the result matrix.
Note that this does not return indexes for the diagonal. Those have
to be computed separately."""
# sets to check for inclusion in o(1)
id_set1 = set(ids1)
id_set2 = set(ids2)
for id1, id2 in product(ids1, ids2):
# indexes in the result matrix
index1 = id_to_index[id1]
index2 = id_to_index[id2]
# upper triangle
if index2 > index1:
yield ((index1, index2), (id1, id2))
# lower triangle but would be skipped, so return in upper triangle
elif id2 not in id_set1 or id1 not in id_set2:
yield((index2, index1), (id2, id1)) # note that order flipped
def _format_upper_triangular_matrix(row_indexes, column_indexes, matrix):
"""reformat the square upper-triangular result matrix
For example, results may look like this
[[ A B C D]
[ - - - -]
[ - - E F]
[ - - - G]]
In this case, the second row was skipped. This means we have
row_indexes [0, 2, 3] and column_indexes [0, 1, 2, 3]
First, it will reflect the upper triangle into the lower triangle
[[ A B C D]
[ B - - -]
[ C - E F]
[ D - F G]]
Finally, it will remove the missing rows and return
[[ A B C D]
[ C - E F]
[ D - F G]]
"""
results = matrix.copy()
# Thse select the indexes for the upper triangle. However, switching
# the order selects the lower triangle.
triu1, triu2 = numpy.triu_indices(matrix.shape[0])
# This makes reflection pretty easy
results[triu2, triu1] = results[triu1, triu2]
# Remove the missing rows and return.
return results[row_indexes, :][:, column_indexes]
def format_results_frame(row_ids, column_ids, matrix, return_frame=False):
"""format results as a pandas.DataFrame if desired/possible
Otherwise returns a dict of
{"x": row_ids, "y": column_ids", "data": result_matrx}"""
if return_frame and DataFrame:
return DataFrame(data=matrix, index=row_ids, columns=column_ids)
elif return_frame and not DataFrame:
warn("could not import pandas.DataFrame")
return {"x": row_ids, "y": column_ids, "data": matrix}
def double_deletion(cobra_model, element_list_1=None, element_list_2=None,
element_type='gene', **kwargs):
"""Wrapper for double_gene_deletion and double_reaction_deletion
.. deprecated :: 0.4
Use double_reaction_deletion and double_gene_deletion
"""
warn("deprecated - use single_reaction_deletion and single_gene_deletion")
if element_type == "reaction":
return double_reaction_deletion(cobra_model, element_list_1,
element_list_2, **kwargs)
elif element_type == "gene":
return double_gene_deletion(cobra_model, element_list_1,
element_list_2, **kwargs)
else:
raise Exception("unknown element type")
def double_reaction_deletion(cobra_model,
reaction_list1=None, reaction_list2=None,
method="fba", return_frame=False,
solver=None, zero_cutoff=1e-12,
**kwargs):
"""sequentially knocks out pairs of reactions in a model
cobra_model : :class:`~cobra.core.Model.Model`
cobra model in which to perform deletions
reaction_list1 : [:class:`~cobra.core.Reaction.Reaction`:] (or their id's)
Reactions to be deleted. These will be the rows in the result.
If not provided, all reactions will be used.
reaction_list2 : [:class:`~cobra.core.Reaction`:] (or their id's)
Reactions to be deleted. These will be the rows in the result.
If not provided, reaction_list1 will be used.
method: "fba" or "moma"
Procedure used to predict the growth rate
solver: str for solver name
This must be a QP-capable solver for MOMA. If left unspecified,
a suitable solver will be automatically chosen.
zero_cutoff: float
When checking to see if a value is 0, this threshold is used.
return_frame: bool
If true, formats the results as a pandas.Dataframe. Otherwise
returns a dict of the form:
{"x": row_labels, "y": column_labels", "data": 2D matrix}
"""
# handle arguments which need to be passed on
if solver is None:
solver = get_solver_name(qp=(method == "moma"))
kwargs["solver"] = solver
kwargs["zero_cutoff"] = zero_cutoff
# generate other arguments
# identifiers for reactions are their indexes
if reaction_list1 is None:
reaction_indexes1 = range(len(cobra_model.reactions))
else:
reaction_indexes1 = [cobra_model.reactions.index(r)
for r in reaction_list1]
if reaction_list2 is None:
reaction_indexes2 = reaction_indexes1
else:
reaction_indexes2 = [cobra_model.reactions.index(r)
for r in reaction_list2]
reaction_to_result = generate_matrix_indexes(reaction_indexes1,
reaction_indexes2)
# Determine 0 flux reactions. If an optimal solution passes no flux
# through the deleted reactions, then we know removing them will
# not change the solution.
wt_solution = solver_dict[solver].solve(cobra_model)
if wt_solution.status == "optimal":
kwargs["wt_growth_rate"] = wt_solution.f
kwargs["no_flux_reaction_indexes"] = \
{i for i, v in enumerate(wt_solution.x) if abs(v) < zero_cutoff}
else:
warn("wild-type solution status is '%s'" % wt_solution.status)
# call the computing functions
if method == "fba":
results = _double_reaction_deletion_fba(
cobra_model, reaction_indexes1, reaction_indexes2,
reaction_to_result, **kwargs)
elif method == "moma":
results = _double_reaction_deletion_moma(
cobra_model, reaction_indexes1, reaction_indexes2,
reaction_to_result, **kwargs)
else:
raise ValueError("Unknown deletion method '%s'" % method)
# convert upper triangular matrix to full matrix
full_result = _format_upper_triangular_matrix(
[reaction_to_result[i] for i in reaction_indexes1], # row indexes
[reaction_to_result[i] for i in reaction_indexes2], # col indexes
results)
# format appropriately with labels
row_ids = [cobra_model.reactions[i].id for i in reaction_indexes1]
column_ids = [cobra_model.reactions[i].id for i in reaction_indexes2]
return format_results_frame(row_ids, column_ids,
full_result, return_frame)
def double_gene_deletion(cobra_model,
gene_list1=None, gene_list2=None,
method="fba", return_frame=False,
solver=None, zero_cutoff=1e-12,
**kwargs):
"""sequentially knocks out pairs of genes in a model
cobra_model : :class:`~cobra.core.Model.Model`
cobra model in which to perform deletions
gene_list1 : [:class:`~cobra.core.Gene.Gene`:] (or their id's)
Genes to be deleted. These will be the rows in the result.
If not provided, all reactions will be used.
gene_list1 : [:class:`~cobra.core.Gene.Gene`:] (or their id's)
Genes to be deleted. These will be the rows in the result.
If not provided, reaction_list1 will be used.
method: "fba" or "moma"
Procedure used to predict the growth rate
solver: str for solver name
This must be a QP-capable solver for MOMA. If left unspecified,
a suitable solver will be automatically chosen.
zero_cutoff: float
When checking to see if a value is 0, this threshold is used.
number_of_processes: int for number of processes to use.
If unspecified, the number of parallel processes to use will be
automatically determined. Setting this to 1 explicitly disables used
of the multiprocessing library.
.. note:: multiprocessing is not supported with method=moma
return_frame: bool
If true, formats the results as a pandas.Dataframe. Otherwise
returns a dict of the form:
{"x": row_labels, "y": column_labels", "data": 2D matrix}
"""
# handle arguments which need to be passed on
if solver is None:
solver = get_solver_name(qp=(method == "moma"))
kwargs["solver"] = solver
kwargs["zero_cutoff"] = zero_cutoff
# generate other arguments
# identifiers for genes
if gene_list1 is None:
gene_ids1 = cobra_model.genes.list_attr("id")
else:
gene_ids1 = [str(i) for i in gene_list1]
if gene_list2 is None:
gene_ids2 = gene_ids1
else:
gene_ids2 = [str(i) for i in gene_list2]
# The gene_id_to_result dict will map each gene id to the index
# in the result matrix.
gene_id_to_result = generate_matrix_indexes(gene_ids1, gene_ids2)
# Determine 0 flux reactions. If an optimal solution passes no flux
# through the deleted reactions, then we know removing them will
# not change the solution.
wt_solution = solver_dict[solver].solve(cobra_model)
if wt_solution.status == "optimal":
kwargs["wt_growth_rate"] = wt_solution.f
kwargs["no_flux_reaction_indexes"] = \
{i for i, v in enumerate(wt_solution.x) if abs(v) < zero_cutoff}
else:
warn("wild-type solution status is '%s'" % wt_solution.status)
if method == "fba":
result = _double_gene_deletion_fba(cobra_model, gene_ids1, gene_ids2,
gene_id_to_result, **kwargs)
elif method == "moma":
result = _double_gene_deletion_moma(cobra_model, gene_ids1, gene_ids2,
gene_id_to_result, **kwargs)
else:
raise ValueError("Unknown deletion method '%s'" % method)
# convert upper triangular matrix to full matrix
full_result = _format_upper_triangular_matrix(
[gene_id_to_result[id] for id in gene_ids1], # row indexes
[gene_id_to_result[id] for id in gene_ids2], # col indexes,
result)
# format as a Dataframe if required
return format_results_frame(gene_ids1, gene_ids2,
full_result, return_frame)
def _double_reaction_deletion_fba(cobra_model, reaction_indexes1,
reaction_indexes2, reaction_to_result,
solver, number_of_processes=None,
zero_cutoff=1e-15, wt_growth_rate=None,
no_flux_reaction_indexes=set(), **kwargs):
"""compute double reaction deletions using fba
cobra_model: model
reaction_indexes1, reaction_indexes2: reaction indexes (used as unique
identifiers)
reaction_to_result: maps each reaction identifier to the entry in
the result matrix
no_flux_reaction_indexes: set of indexes for reactions in the model
which carry no flux in an optimal solution. For deletions only in
this set, the result will beset to wt_growth_rate.
returns an upper triangular square matrix
"""
if solver is None:
solver = get_solver_name()
# generate the square result matrix
n_results = len(reaction_to_result)
results = numpy.empty((n_results, n_results))
results.fill(numpy.nan)
PoolClass = CobraDeletionMockPool if number_of_processes == 1 \
else CobraDeletionPool # explicitly disable multiprocessing
with PoolClass(cobra_model, n_processes=number_of_processes,
solver=solver, **kwargs) as pool:
# precompute all single deletions in the pool and store them along
# the diagonal
for reaction_index, result_index in iteritems(reaction_to_result):
pool.submit((reaction_index, ), label=result_index)
for result_index, value in pool.receive_all():
# if singly lethal, set everything in row and column to 0
value = value if abs(value) > zero_cutoff else 0.
if value == 0.:
results[result_index, :] = 0.
results[:, result_index] = 0.
else: # only the diagonal needs to be set
results[result_index, result_index] = value
# Run double knockouts in the upper triangle
index_selector = yield_upper_tria_indexes(
reaction_indexes1, reaction_indexes2, reaction_to_result)
for result_index, (r1_index, r2_index) in index_selector:
# skip if the result was already computed to be lethal
if results[result_index] == 0:
continue
# reactions removed carry no flux
if r1_index in no_flux_reaction_indexes and \
r2_index in no_flux_reaction_indexes:
results[result_index] = wt_growth_rate
continue
pool.submit((r1_index, r2_index), label=result_index)
# get results
for result in pool.receive_all():
results[result[0]] = result[1]
return results
def _double_gene_deletion_fba(cobra_model, gene_ids1, gene_ids2,
gene_id_to_result, solver,
number_of_processes=None, zero_cutoff=1e-12,
wt_growth_rate=None,
no_flux_reaction_indexes=set(), **kwargs):
"""compute double gene deletions using fba
cobra_model: model
gene_ids1, gene_ids2: lists of id's to be knocked out
gene_id_to_result: maps each gene identifier to the entry in
the result matrix
no_flux_reaction_indexes: set of indexes for reactions in the model
which carry no flux in an optimal solution. For deletions only in
this set, the result will beset to wt_growth_rate.
returns an upper triangular square matrix
"""
# Because each gene reaction rule will be evaluated multiple times
# the reaction has multiple associated genes being deleted, compiling
# the gene reaction rules ahead of time increases efficiency greatly.
compiled_rules = get_compiled_gene_reaction_rules(cobra_model)
n_results = len(gene_id_to_result)
results = numpy.empty((n_results, n_results))
results.fill(numpy.nan)
if number_of_processes == 1: # explicitly disable multiprocessing
PoolClass = CobraDeletionMockPool
else:
PoolClass = CobraDeletionPool
with PoolClass(cobra_model, n_processes=number_of_processes,
solver=solver, **kwargs) as pool:
# precompute all single deletions in the pool and store them along
# the diagonal
for gene_id, gene_result_index in iteritems(gene_id_to_result):
ko_reactions = find_gene_knockout_reactions(
cobra_model, (cobra_model.genes.get_by_id(gene_id),))
ko_indexes = [cobra_model.reactions.index(i) for i in ko_reactions]
pool.submit(ko_indexes, label=gene_result_index)
for result_index, value in pool.receive_all():
# if singly lethal, set everything in row and column to 0
value = value if abs(value) > zero_cutoff else 0.
if value == 0.:
results[result_index, :] = 0.
results[:, result_index] = 0.
else: # only the diagonal needs to be set
results[result_index, result_index] = value
# Run double knockouts in the upper triangle
index_selector = yield_upper_tria_indexes(gene_ids1, gene_ids2,
gene_id_to_result)
for result_index, (gene1, gene2) in index_selector:
# if singly lethal the results have already been set
if results[result_index] == 0:
continue
ko_reactions = find_gene_knockout_reactions(
cobra_model, (gene1, gene2), compiled_rules)
ko_indexes = [cobra_model.reactions.index(i)
for i in ko_reactions]
# if all removed gene indexes carry no flux
if len(set(ko_indexes) - no_flux_reaction_indexes) == 0:
results[result_index] = wt_growth_rate
continue
pool.submit(ko_indexes, label=result_index)
for result in pool.receive_all():
value = result[1]
if value < zero_cutoff:
value = 0
results[result[0]] = value
return results
def _double_reaction_deletion_moma(cobra_model, reaction_indexes1,
reaction_indexes2, reaction_to_result,
solver, number_of_processes=1,
zero_cutoff=1e-15, wt_growth_rate=None,
no_flux_reaction_indexes=set(), **kwargs):
"""compute double reaction deletions using moma
cobra_model: model
reaction_indexes1, reaction_indexes2: reaction indexes (used as unique
identifiers)
reaction_to_result: maps each reaction identifier to the entry in
the result matrix
no_flux_reaction_indexes: set of indexes for reactions in the model
which carry no flux in an optimal solution. For deletions only in
this set, the result will beset to wt_growth_rate.
number_of_processes: must be 1. Parallel MOMA not yet implmemented
returns an upper triangular square matrix
"""
if number_of_processes > 1:
raise NotImplementedError("parallel MOMA not implemented")
if moma is None:
raise RuntimeError("scipy required for MOMA")
# generate the square result matrix
n_results = len(reaction_to_result)
results = numpy.empty((n_results, n_results))
results.fill(numpy.nan)
# function to compute reaction knockouts with moma
moma_model, moma_obj = moma.create_euclidian_moma_model(cobra_model)
def run(indexes):
# If all the reactions carry no flux, deletion will have no effect.
if no_flux_reaction_indexes.issuperset(indexes):
return wt_growth_rate
return moma.moma_knockout(moma_model, moma_obj, indexes,
solver=solver, **kwargs).f
# precompute all single deletions and store them along the diagonal
for reaction_index, result_index in iteritems(reaction_to_result):
value = run((reaction_index,))
value = value if abs(value) > zero_cutoff else 0.
results[result_index, result_index] = value
# if singly lethal, the entire row and column are set to 0
if value == 0.:
results[result_index, :] = 0.
results[:, result_index] = 0.
# Run double knockouts in the upper triangle
index_selector = yield_upper_tria_indexes(
reaction_indexes1, reaction_indexes2, reaction_to_result)
for result_index, (r1_index, r2_index) in index_selector:
# skip if the result was already computed to be lethal
if results[result_index] == 0:
continue
else:
results[result_index] = run((r1_index, r2_index))
return results
def _double_gene_deletion_moma(cobra_model, gene_ids1, gene_ids2,
gene_id_to_result, solver,
number_of_processes=1,
zero_cutoff=1e-12, wt_growth_rate=None,
no_flux_reaction_indexes=set(), **kwargs):
"""compute double gene deletions using moma
cobra_model: model
gene_ids1, gene_ids2: lists of id's to be knocked out
gene_id_to_result: maps each gene identifier to the entry in
the result matrix
number_of_processes: must be 1. Parallel MOMA not yet implemented
no_flux_reaction_indexes: set of indexes for reactions in the model
which carry no flux in an optimal solution. For deletions only in
this set, the result will beset to wt_growth_rate.
returns an upper triangular square matrix
"""
if number_of_processes > 1:
raise NotImplementedError("parallel MOMA not implemented")
if moma is None:
raise RuntimeError("scipy required for MOMA")
# Because each gene reaction rule will be evaluated multiple times
# the reaction has multiple associated genes being deleted, compiling
# the gene reaction rules ahead of time increases efficiency greatly.
compiled_rules = get_compiled_gene_reaction_rules(cobra_model)
# function to compute reaction knockouts with moma
moma_model, moma_obj = moma.create_euclidian_moma_model(cobra_model)
def run(gene_ids):
ko_reactions = find_gene_knockout_reactions(cobra_model, gene_ids)
ko_indexes = map(cobra_model.reactions.index, ko_reactions)
# If all the reactions carry no flux, deletion will have no effect.
if no_flux_reaction_indexes.issuperset(gene_ids):
return wt_growth_rate
return moma.moma_knockout(moma_model, moma_obj, ko_indexes,
solver=solver, **kwargs).f
n_results = len(gene_id_to_result)
results = numpy.empty((n_results, n_results))
results.fill(numpy.nan)
# precompute all single deletions and store them along the diagonal
for gene_id, result_index in iteritems(gene_id_to_result):
value = run((gene_id,))
value = value if abs(value) > zero_cutoff else 0.
results[result_index, result_index] = value
# If singly lethal, the entire row and column are set to 0.
if value == 0.:
results[result_index, :] = 0.
results[:, result_index] = 0.
# Run double knockouts in the upper triangle
index_selector = yield_upper_tria_indexes(gene_ids1, gene_ids2,
gene_id_to_result)
for result_index, (gene1, gene2) in index_selector:
# if singly lethal the results have already been set
if results[result_index] == 0:
continue
results[result_index] = run((gene1, gene2))
return results
| lgpl-2.1 |
charanpald/wallhack | wallhack/modelselect/RealDataTreeProcess.py | 1 | 1243 | from sandbox.util.PathDefaults import PathDefaults
from sandbox.util import Util
from exp.modelselect.ModelSelectUtils import ModelSelectUtils
import matplotlib.pyplot as plt
import logging
import numpy
outputDir = PathDefaults.getOutputDir() + "modelPenalisation/regression/CART/"
datasets = ModelSelectUtils.getRegressionDatasets(True)
gammas = numpy.unique(numpy.array(numpy.round(2**numpy.arange(1, 7.25, 0.25)-1), dtype=numpy.int))
print(gammas)
#To use the betas in practice, pick the lowest value so far
for datasetName, numRealisations in datasets:
try:
A = numpy.load(outputDir + datasetName + "Beta.npz")["arr_0"]
inds = gammas>10
tempGamma = numpy.sqrt(gammas[inds])
tempA = A[inds, :]
tempA = numpy.clip(tempA, 0, 1)
plt.figure(0)
plt.plot(tempGamma, Util.cumMin(tempA[:, 0]), label="50")
plt.plot(tempGamma, Util.cumMin(tempA[:, 1]), label="100")
plt.plot(tempGamma, Util.cumMin(tempA[:, 2]), label="200")
plt.legend()
plt.title(datasetName)
plt.xlabel("gamma")
plt.ylabel("Beta")
plt.show()
except:
print("Dataset not found " + datasetName) | gpl-3.0 |
rhyolight/nupic.research | projects/sp_paper/run_sp_tm_model.py | 4 | 13849 | ## ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import importlib
import os
from optparse import OptionParser
import yaml
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.prediction_metrics_manager import MetricsManager
from nupic.frameworks.opf import metrics
from nupic.frameworks.opf.htm_prediction_model import HTMPredictionModel
import pandas as pd
from htmresearch.support.sequence_learning_utils import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
rcParams['pdf.fonttype'] = 42
plt.ion()
DATA_DIR = "../../htmresearch/data"
MODEL_PARAMS_DIR = "./model_params"
def getMetricSpecs(predictedField, stepsAhead=5):
_METRIC_SPECS = (
MetricSpec(field=predictedField, metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'negativeLogLikelihood',
'window': 1000, 'steps': stepsAhead}),
MetricSpec(field=predictedField, metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'nrmse', 'window': 1000,
'steps': stepsAhead}),
)
return _METRIC_SPECS
def createModel(modelParams):
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": predictedField})
return model
def getModelParamsFromName(dataSet):
# importName = "model_params.%s_model_params" % (
# dataSet.replace(" ", "_").replace("-", "_")
# )
# print "Importing model params from %s" % importName
try:
importedModelParams = yaml.safe_load(
open('model_params/nyc_taxi_model_params.yaml'))
# importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% dataSet)
return importedModelParams
def _getArgs():
parser = OptionParser(usage="%prog PARAMS_DIR OUTPUT_DIR [options]"
"\n\nCompare TM performance with trivial predictor using "
"model outputs in prediction directory "
"and outputting results to result directory.")
parser.add_option("-d",
"--dataSet",
type=str,
default='nyc_taxi',
dest="dataSet",
help="DataSet Name, choose from rec-center-hourly, nyc_taxi")
parser.add_option("-p",
"--plot",
default=False,
dest="plot",
help="Set to True to plot result")
parser.add_option("--stepsAhead",
help="How many steps ahead to predict. [default: %default]",
default=5,
type=int)
parser.add_option("--trainSP",
help="Whether to train SP",
default=True,
dest="trainSP",
type=int)
parser.add_option("--boostStrength",
help="strength of boosting",
default=1,
dest="boostStrength",
type=int)
parser.add_option("-c",
"--classifier",
type=str,
default='SDRClassifierRegion',
dest="classifier",
help="Classifier Type: SDRClassifierRegion or CLAClassifierRegion")
(options, remainder) = parser.parse_args()
print options
return options, remainder
def getInputRecord(df, predictedField, i):
inputRecord = {
predictedField: float(df[predictedField][i]),
"timeofday": float(df["timeofday"][i]),
"dayofweek": float(df["dayofweek"][i]),
}
return inputRecord
def printTPRegionParams(tpregion):
"""
Note: assumes we are using TemporalMemory/TPShim in the TPRegion
"""
tm = tpregion.getSelf()._tfdr
print "------------PY TemporalMemory Parameters ------------------"
print "numberOfCols =", tm.getColumnDimensions()
print "cellsPerColumn =", tm.getCellsPerColumn()
print "minThreshold =", tm.getMinThreshold()
print "activationThreshold =", tm.getActivationThreshold()
print "newSynapseCount =", tm.getMaxNewSynapseCount()
print "initialPerm =", tm.getInitialPermanence()
print "connectedPerm =", tm.getConnectedPermanence()
print "permanenceInc =", tm.getPermanenceIncrement()
print "permanenceDec =", tm.getPermanenceDecrement()
print "predictedSegmentDecrement=", tm.getPredictedSegmentDecrement()
print
def runMultiplePass(df, model, nMultiplePass, nTrain):
"""
run CLA model through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
result = model.run(inputRecord)
if j % 100 == 0:
print " pass %i, record %i" % (nPass, j)
# reset temporal memory
model._getTPRegion().getSelf()._tfdr.reset()
return model
def runMultiplePassSPonly(df, model, nMultiplePass, nTrain):
"""
run CLA model SP through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
model._sensorCompute(inputRecord)
model._spCompute()
if j % 400 == 0:
print " pass %i, record %i" % (nPass, j)
return model
if __name__ == "__main__":
(_options, _args) = _getArgs()
dataSet = _options.dataSet
plot = _options.plot
classifierType = _options.classifier
trainSP = bool(_options.trainSP)
boostStrength = _options.boostStrength
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
predictedField = "passenger_count"
modelParams = getModelParamsFromName("nyc_taxi")
modelParams['modelParams']['clParams']['steps'] = str(_options.stepsAhead)
modelParams['modelParams']['clParams']['regionName'] = classifierType
modelParams['modelParams']['spParams']['boostStrength'] = boostStrength
print "Creating model from %s..." % dataSet
# use customized CLA model
model = HTMPredictionModel(**modelParams['modelParams'])
model.enableInference({"predictedField": predictedField})
model.enableLearning()
model._spLearningEnabled = bool(trainSP)
model._tpLearningEnabled = True
print model._spLearningEnabled
printTPRegionParams(model._getTPRegion())
inputData = "%s/%s.csv" % (DATA_DIR, dataSet.replace(" ", "_"))
sensor = model._getSensorRegion()
encoderList = sensor.getSelf().encoder.getEncoderList()
if sensor.getSelf().disabledEncoder is not None:
classifier_encoder = sensor.getSelf().disabledEncoder.getEncoderList()
classifier_encoder = classifier_encoder[0]
else:
classifier_encoder = None
_METRIC_SPECS = getMetricSpecs(predictedField, stepsAhead=_options.stepsAhead)
metric = metrics.getModule(_METRIC_SPECS[0])
metricsManager = MetricsManager(_METRIC_SPECS, model.getFieldInfo(),
model.getInferenceType())
if plot:
plotCount = 1
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
plt.title(predictedField)
plt.ylabel('Data')
plt.xlabel('Timed')
plt.tight_layout()
plt.ion()
print "Load dataset: ", dataSet
df = pd.read_csv(inputData, header=0, skiprows=[1, 2])
nTrain = 5000
maxBucket = classifier_encoder.n - classifier_encoder.w + 1
likelihoodsVecAll = np.zeros((maxBucket, len(df)))
prediction_nstep = None
time_step = []
actual_data = []
patternNZ_track = []
predict_data = np.zeros((_options.stepsAhead, 0))
predict_data_ML = []
negLL_track = []
activeCellNum = []
trueBucketIndex = []
sp = model._getSPRegion().getSelf()._sfdr
spActiveCellsCount = np.zeros(sp.getColumnDimensions())
for i in xrange(len(df)):
inputRecord = getInputRecord(df, predictedField, i)
result = model.run(inputRecord)
trueBucketIndex.append(model._getClassifierInputRecord(inputRecord).bucketIndex)
# inspect SP
sp = model._getSPRegion().getSelf()._sfdr
spOutput = model._getSPRegion().getOutputData('bottomUpOut')
spActiveCellsCount[spOutput.nonzero()[0]] += 1
tp = model._getTPRegion()
tm = tp.getSelf()._tfdr
activeColumn = tm.getActiveCells()
activeCellNum.append(len(activeColumn))
result.metrics = metricsManager.update(result)
negLL = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='negativeLogLikelihood':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
if i % 100 == 0 and i>0:
negLL = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='negativeLogLikelihood':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
nrmse = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='nrmse':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
numActiveCell = np.mean(activeCellNum[-100:])
print "After %i records, %d-step negLL=%f nrmse=%f ActiveCell %f " % \
(i, _options.stepsAhead, negLL, nrmse, numActiveCell)
last_prediction = prediction_nstep
prediction_nstep = \
result.inferences["multiStepBestPredictions"][_options.stepsAhead]
bucketLL = \
result.inferences['multiStepBucketLikelihoods'][_options.stepsAhead]
likelihoodsVec = np.zeros((maxBucket,))
if bucketLL is not None:
for (k, v) in bucketLL.items():
likelihoodsVec[k] = v
time_step.append(i)
actual_data.append(inputRecord[predictedField])
predict_data_ML.append(
result.inferences['multiStepBestPredictions'][_options.stepsAhead])
negLL_track.append(negLL)
likelihoodsVecAll[0:len(likelihoodsVec), i] = likelihoodsVec
predData_TM_n_step = np.roll(np.array(predict_data_ML), _options.stepsAhead)
nTest = len(actual_data) - nTrain - _options.stepsAhead
NRMSE_TM = NRMSE(actual_data[nTrain:nTrain+nTest], predData_TM_n_step[nTrain:nTrain+nTest])
print "NRMSE on test data: ", NRMSE_TM
# calculate neg-likelihood
predictions = np.transpose(likelihoodsVecAll)
truth = np.roll(actual_data, -5)
from nupic.encoders.scalar import ScalarEncoder as NupicScalarEncoder
encoder = NupicScalarEncoder(w=1, minval=0, maxval=40000, n=22, forced=True)
bucketIndex2 = []
negLL = []
minProb = 0.0001
for i in xrange(len(truth)):
bucketIndex2.append(np.where(encoder.encode(truth[i]))[0])
outOfBucketProb = 1 - sum(predictions[i,:])
prob = predictions[i, bucketIndex2[i]]
if prob == 0:
prob = outOfBucketProb
if prob < minProb:
prob = minProb
negLL.append( -np.log(prob))
negLL = computeLikelihood(predictions, truth, encoder)
negLL[:5000] = np.nan
x = range(len(negLL))
if not os.path.exists("./results/nyc_taxi/"):
os.makedirs("./results/nyc_taxi/")
np.savez('./results/nyc_taxi/{}{}TMprediction_SPLearning_{}_boost_{}'.format(
dataSet, classifierType, trainSP, boostStrength),
predictions, predict_data_ML, truth)
activeDutyCycle = np.zeros(sp.getColumnDimensions(), dtype=np.float32)
sp.getActiveDutyCycles(activeDutyCycle)
overlapDutyCycle = np.zeros(sp.getColumnDimensions(), dtype=np.float32)
sp.getOverlapDutyCycles(overlapDutyCycle)
if not os.path.exists("./figures/nyc_taxi/"):
os.makedirs("./figures/nyc_taxi/")
plt.figure()
plt.clf()
plt.subplot(2, 2, 1)
plt.hist(overlapDutyCycle)
plt.xlabel('overlapDutyCycle')
plt.subplot(2, 2, 2)
plt.hist(activeDutyCycle)
plt.xlim([0, .1])
plt.xlabel('activeDutyCycle-1000')
plt.subplot(2, 2, 3)
totalActiveDutyCycle = spActiveCellsCount.astype('float32') / len(df)
dutyCycleDist, binEdge = np.histogram(totalActiveDutyCycle,
bins=20, range=[-0.0025, 0.0975])
dutyCycleDist = dutyCycleDist.astype('float32')/np.sum(dutyCycleDist)
binWidth = np.mean(binEdge[1:]-binEdge[:-1])
binCenter = binEdge[:-1] + binWidth/2
plt.bar(binCenter, dutyCycleDist, width=0.005)
plt.xlim([-0.0025, .1])
plt.ylim([0, .7])
plt.xlabel('activeDutyCycle-Total')
plt.savefig('figures/nyc_taxi/DutyCycle_SPLearning_{}_boost_{}.pdf'.format(
trainSP, boostStrength))
| gpl-3.0 |
prheenan/Research | Perkins/AnalysisUtil/ForceExtensionAnalysis/DataCorrection/CorrectionByFFT.py | 1 | 7635 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
from scipy.fftpack import rfft,irfft
from scipy.interpolate import interp1d
from FitUtil.FitUtils.Python.FitUtil import GenFit
import copy
from Research.Perkins.AnalysisUtil.ForceExtensionAnalysis import FEC_Util
class CorrectionObject:
def __init__(self,MaxInvolsSizeMeters = 10e-9,FractionForOffset = 0.2,
SpatialGridUpSample = 5,MaxFourierSpaceComponent=10e-9):
"""
Creates a sklearn-style object for correcting data
Args
MaxInvolsSizeMeters: Maximum possible decay constant (in separation)
from trigger point to zero.
FractionForOffset: how much of the approach/retract curve is used for
offsetting
SpatialGridUpSample: how much to up-sample the separation grid, to
get a uniform fourier series
MaxFourierSpaceComponent: the maximum spatial component to the
Fourier series. This is 1/(2*f_nyquist), where f_nyquist is the
nysquist 'frequency' (inverse sptial dimension)
"""
self.MaxInvolsSizeMeters = MaxInvolsSizeMeters
self.FractionForOffset = FractionForOffset
self.SpatialGridUpSample = SpatialGridUpSample
self.MaxFourierSpaceComponent = MaxFourierSpaceComponent
def ZeroForceAndSeparation(self,Obj,IsApproach):
"""
See FEC_Util.ZeroForceAndSeparation
"""
return FEC_Util.ZeroForceAndSeparation(Obj,IsApproach,
self.FractionForOffset)
def FitInvols(self,Obj):
"""
Fit to the invols on the (approach!) portion of Obj
Args:
Obj: TimeSepForceObject. We get just the approach from it and
fit to that
Returns:
Nothing, but sets the object for future predicts
"""
Approach,Retract = FEC_Util.GetApproachRetract(Obj)
# get the zeroed force and separation
SeparationZeroed,ForceZeroed = self.ZeroForceAndSeparation(Approach,
True)
ArbOffset = max(np.abs(ForceZeroed))
A = max(ForceZeroed)
# adding in the arbitrary offset actually helps quite a bit.
# we fit versus time, which also helps.
FittingFunction = lambda t,tau : np.log(A * np.exp(-t/tau)+ArbOffset)
# for fitting, flip time around
MaxTau = self.MaxInvolsSizeMeters
params,_,_ = GenFit(SeparationZeroed,np.log(ForceZeroed+ArbOffset),
model=FittingFunction,
bounds=(0,MaxTau))
# tau is the first (only) parameter
self.Lambda= params[0]
self.MaxForceForDecay = max(ForceZeroed)
def PredictInvols(self,Obj,IsApproach):
"""
Given an object, predicts the invols portion of its curve. *must* call
after a fit
Args:
Obj: see FitInvols, except this is *either* the approach
or retract
IsApproach: see FitInvols
Returns:
Predicted, Zero-offset invols decay for Obj
"""
SeparationZeroed,_ = self.ZeroForceAndSeparation(Obj,IsApproach)
return self.MaxForceForDecay * np.exp(-SeparationZeroed/self.Lambda)
def FitInterference(self,Obj):
"""
Given a TimeSepForce Object, fits to the interference artifact
Args:
Obj: TImeSepForceObject
Returns:
Nothing, but sets internal state for future predict
"""
Approach,_ = FEC_Util.GetApproachRetract(Obj)
# get the zeroed force and separation
SeparationZeroed,ForceZeroed = self.ZeroForceAndSeparation(Approach,
True)
# get the residuals (essentially, no 'invols') part
FourierComponents = max(SeparationZeroed)/self.MaxFourierSpaceComponent
NumFourierTerms = np.ceil(FourierComponents/self.SpatialGridUpSample)
# down-spample the number of terms to match the grid
# get the fourier transform in *space*. Need to interpolate onto
# uniform gridding
N = SeparationZeroed.size
self.linear_grid = np.linspace(0,max(SeparationZeroed),
N*self.SpatialGridUpSample)
# how many actual terms does that translate into?
ForceInterp =interp1d(x=SeparationZeroed,
y=Approach.Force,kind='linear')
self.fft_coeffs = rfft(ForceInterp(self.linear_grid))
# remove all the high-frequecy stuff
NumTotalTermsPlusDC = int(2*NumFourierTerms+1)
self.fft_coeffs[NumTotalTermsPlusDC:] = 0
def PredictInterference(self,Obj,IsApproach):
"""
Given a previous PredictIntereference, returns the prediction of the
fft (ie: at each spatial point in Obj.Force, returns the prediction)
Args:
Obj: See FitInterference
IsApproach: True if we are predicting the approach
Returns:
prediction of fft coefficients
"""
# interpolate back to the original grid
SeparationZeroed,_ = self.ZeroForceAndSeparation(Obj,
IsApproach)
N = SeparationZeroed.size
fft_representation = irfft(self.fft_coeffs)
MaxGrid = np.max(self.linear_grid)
# should only interpolate (correct) out to however much approach
# data we have
GoodInterpolationIndices = np.where(SeparationZeroed <= MaxGrid)
BadIdx = np.where(SeparationZeroed > MaxGrid)
fft_pred = np.zeros(SeparationZeroed.size)
# determine the interpolator -- should be able to use everywhere
# we are within range
GoodInterpolator = interp1d(x=self.linear_grid,y=fft_representation)
fft_pred[GoodInterpolationIndices] =\
GoodInterpolator(SeparationZeroed[GoodInterpolationIndices])
# everything else just gets the DC offset, which is the 0-th component
fft_pred[BadIdx] = fft_representation[0]
return fft_pred
def CorrectApproachAndRetract(self,Obj):
"""
Given an object, corrects and returns the approach and retract
portions of the curve (dwell excepted)
Args:
Obj: see FitInterference
Returns:
Tuple of two TimeSepForce Objects, one for approach, one for
Retract. Throws out the dwell portion
"""
Approach,Retract = FEC_Util.GetApproachRetract(Obj)
SeparationZeroed,ForceZeroed = self.\
ZeroForceAndSeparation(Approach,IsApproach=True)
# fit the interference artifact
self.FitInterference(Approach)
fft_pred = self.PredictInterference(Approach,
IsApproach=True)
# make a prediction without the wiggles
Approach.Force -= fft_pred
# just for clarities sake, the approach has now been corrected
ApproachCorrected = Approach
RetractNoInvols = Retract
fft_pred_retract = self.PredictInterference(RetractNoInvols,
IsApproach=False)
RetractCorrected = copy.deepcopy(RetractNoInvols)
RetractCorrected.Force -= fft_pred_retract
return ApproachCorrected,RetractCorrected
| gpl-3.0 |
zigahertz/2013-Sep-HR-ML-sprint | py/titanic.py | 1 | 3415 | import os
import csv as csv
import numpy as np
import matplotlib.pyplot as plt
path = os.getcwd()
csv_file_object = csv.reader(open(path + '/train.csv', 'rb'))
header = csv_file_object.next()
data = []
for row in csv_file_object:
data.append(row)
data = np.array(data)
number_passengers = np.size(data[0::, 0].astype(np.float))
number_survived = np.sum(data[0::, 0].astype(np.float))
prop_surv = number_survived / number_passengers # proportion of survivors
print '# passengers: ' + str(number_passengers)
print '# survived: ' + str(number_survived)
print '# % survive: ' + str(prop_surv)
print
women_only_stats = data[0::, 3] == 'female'
men_only_stats = data[0::, 3] != 'female'
women_onboard = data[women_only_stats, 0].astype(np.float)
men_onboard = data[men_only_stats, 0].astype(np.float)
print 'wos: ' + str(len(women_only_stats))
proportion_women_survived = np.sum(women_onboard) / np.size(women_onboard)
proportion_men_survived = np.sum(men_onboard) / np.size(men_onboard)
print 'proportion women survived: %s' % proportion_women_survived
print 'proportion men survived: %s' % proportion_men_survived
test_file_object = csv.reader(open(path + '/test.csv', 'rb'))
header = test_file_object.next()
open_file_object = csv.writer(open(path + '/genderbasedmodelpy.csv', 'wb'))
for row in test_file_object:
if row[2] == 'female':
row.insert(0, '1')
open_file_object.writerow(row)
else:
row.insert(0, '0')
open_file_object.writerow(row)
### Multivariate Prediction ###
fare_ceiling = 40
data[data[0::,8].astype(np.float) >= fare_ceiling, 8] = fare_ceiling - 1.0
fare_bracket_size = 10
num_price_brackets = fare_ceiling / fare_bracket_size
num_classes = 3
survival_table = np.zeros((2, num_classes, num_price_brackets))
for i in xrange(num_classes):
for j in xrange(num_price_brackets):
women_only_stats = data[(data[0::, 3] == 'female') & \
(data[0::, 1].astype(np.float) == i+1) & \
(data[0:, 8].astype(np.float) >= j * fare_bracket_size) & \
(data[0:, 8].astype(np.float) < (j+1) * fare_bracket_size), 0]
men_only_stats = data[(data[0::, 3] != 'female') & \
(data[0::, 1].astype(np.float) == i+1) & \
(data[0:, 8].astype(np.float) >= j * fare_bracket_size) & \
(data[0:, 8].astype(np.float) < (j+1) * fare_bracket_size), 0]
survival_table[0, i, j] = np.mean(women_only_stats.astype(np.float))
survival_table[1, i, j] = np.mean(men_only_stats.astype(np.float))
survival_table[survival_table != survival_table] = 0
survival_table[survival_table < 0.5] = 0
survival_table[survival_table >= 0.5] = 1
print survival_table
test_file_object = csv.reader(open(path + '/test.csv', 'rb'))
new_open_file_object = csv.writer(open(path + '/genderclasspricebasedmodelpy.csv', 'wb'))
header = test_file_object.next()
for row in test_file_object:
for j in xrange(num_price_brackets):
try:
row[7] = float(row(7))
except:
bin_fare = 3 - float(row[0])
break
if row[7] > fare_ceiling:
bin_fare = num_price_brackets - 1
break
if row[7] >= j*fare_bracket_size and row[7] < (j+1)*fare_bracket_size:
bin_fare = j
break
if row[2] == 'female':
row.insert(0, int(survival_table[0, float(row[0])-1, bin_fare]))
new_open_file_object.writerow(row)
else:
row.insert(0, int(survival_table[1, float(row[0])-1, bin_fare]))
new_open_file_object.writerow(row)
| mit |
Jean13/CVE_Compare | python/v1.2/setup.py | 1 | 1092 | '''
CVE_Compare.py Dependencies
Run: python setup.py
'''
import subprocess, sys
def check_path():
try:
# Find where PIP.exe is
p = subprocess.Popen(["where.exe", "pip.exe"], stdout = subprocess.PIPE)
path = str(p.stdout.read())
# Clean up the path found before adding to system path
path = path[:path.find("\\pip")]
path = path[2:-1]
path = path.replace("\\\\", "\\")
# Check whether PIP is in the PATH, and if not, add it
sys_path = str(sys.path)
if sys_path.find(path):
print("[*] PIP in PATH. \n")
else:
sys.path.append(path)
print("[*] PIP added to PATH. \n")
except Exception as e:
print(e)
def setup(package):
try:
p = subprocess.Popen(["pip.exe", "install", package], stdout = sys.stdout)
# Print output
p.communicate()
except Exception as e:
print(e)
check_path()
setup("pathlib")
setup("requests")
setup("numpy")
setup("xlrd")
setup("pandas")
| apache-2.0 |
AtsushiSakai/PythonRobotics | PathTracking/pure_pursuit/pure_pursuit.py | 1 | 6184 | """
Path tracking simulation with pure pursuit steering and PID speed control.
author: Atsushi Sakai (@Atsushi_twi)
Guillaume Jacquenot (@Gjacquenot)
"""
import numpy as np
import math
import matplotlib.pyplot as plt
# Parameters
k = 0.1 # look forward gain
Lfc = 2.0 # [m] look-ahead distance
Kp = 1.0 # speed proportional gain
dt = 0.1 # [s] time tick
WB = 2.9 # [m] wheel base of vehicle
show_animation = True
class State:
def __init__(self, x=0.0, y=0.0, yaw=0.0, v=0.0):
self.x = x
self.y = y
self.yaw = yaw
self.v = v
self.rear_x = self.x - ((WB / 2) * math.cos(self.yaw))
self.rear_y = self.y - ((WB / 2) * math.sin(self.yaw))
def update(self, a, delta):
self.x += self.v * math.cos(self.yaw) * dt
self.y += self.v * math.sin(self.yaw) * dt
self.yaw += self.v / WB * math.tan(delta) * dt
self.v += a * dt
self.rear_x = self.x - ((WB / 2) * math.cos(self.yaw))
self.rear_y = self.y - ((WB / 2) * math.sin(self.yaw))
def calc_distance(self, point_x, point_y):
dx = self.rear_x - point_x
dy = self.rear_y - point_y
return math.hypot(dx, dy)
class States:
def __init__(self):
self.x = []
self.y = []
self.yaw = []
self.v = []
self.t = []
def append(self, t, state):
self.x.append(state.x)
self.y.append(state.y)
self.yaw.append(state.yaw)
self.v.append(state.v)
self.t.append(t)
def proportional_control(target, current):
a = Kp * (target - current)
return a
class TargetCourse:
def __init__(self, cx, cy):
self.cx = cx
self.cy = cy
self.old_nearest_point_index = None
def search_target_index(self, state):
# To speed up nearest point search, doing it at only first time.
if self.old_nearest_point_index is None:
# search nearest point index
dx = [state.rear_x - icx for icx in self.cx]
dy = [state.rear_y - icy for icy in self.cy]
d = np.hypot(dx, dy)
ind = np.argmin(d)
self.old_nearest_point_index = ind
else:
ind = self.old_nearest_point_index
distance_this_index = state.calc_distance(self.cx[ind],
self.cy[ind])
while True:
distance_next_index = state.calc_distance(self.cx[ind + 1],
self.cy[ind + 1])
if distance_this_index < distance_next_index:
break
ind = ind + 1 if (ind + 1) < len(self.cx) else ind
distance_this_index = distance_next_index
self.old_nearest_point_index = ind
Lf = k * state.v + Lfc # update look ahead distance
# search look ahead target point index
while Lf > state.calc_distance(self.cx[ind], self.cy[ind]):
if (ind + 1) >= len(self.cx):
break # not exceed goal
ind += 1
return ind, Lf
def pure_pursuit_steer_control(state, trajectory, pind):
ind, Lf = trajectory.search_target_index(state)
if pind >= ind:
ind = pind
if ind < len(trajectory.cx):
tx = trajectory.cx[ind]
ty = trajectory.cy[ind]
else: # toward goal
tx = trajectory.cx[-1]
ty = trajectory.cy[-1]
ind = len(trajectory.cx) - 1
alpha = math.atan2(ty - state.rear_y, tx - state.rear_x) - state.yaw
delta = math.atan2(2.0 * WB * math.sin(alpha) / Lf, 1.0)
return delta, ind
def plot_arrow(x, y, yaw, length=1.0, width=0.5, fc="r", ec="k"):
"""
Plot arrow
"""
if not isinstance(x, float):
for ix, iy, iyaw in zip(x, y, yaw):
plot_arrow(ix, iy, iyaw)
else:
plt.arrow(x, y, length * math.cos(yaw), length * math.sin(yaw),
fc=fc, ec=ec, head_width=width, head_length=width)
plt.plot(x, y)
def main():
# target course
cx = np.arange(0, 50, 0.5)
cy = [math.sin(ix / 5.0) * ix / 2.0 for ix in cx]
target_speed = 10.0 / 3.6 # [m/s]
T = 100.0 # max simulation time
# initial state
state = State(x=-0.0, y=-3.0, yaw=0.0, v=0.0)
lastIndex = len(cx) - 1
time = 0.0
states = States()
states.append(time, state)
target_course = TargetCourse(cx, cy)
target_ind, _ = target_course.search_target_index(state)
while T >= time and lastIndex > target_ind:
# Calc control input
ai = proportional_control(target_speed, state.v)
di, target_ind = pure_pursuit_steer_control(
state, target_course, target_ind)
state.update(ai, di) # Control vehicle
time += dt
states.append(time, state)
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plot_arrow(state.x, state.y, state.yaw)
plt.plot(cx, cy, "-r", label="course")
plt.plot(states.x, states.y, "-b", label="trajectory")
plt.plot(cx[target_ind], cy[target_ind], "xg", label="target")
plt.axis("equal")
plt.grid(True)
plt.title("Speed[km/h]:" + str(state.v * 3.6)[:4])
plt.pause(0.001)
# Test
assert lastIndex >= target_ind, "Cannot goal"
if show_animation: # pragma: no cover
plt.cla()
plt.plot(cx, cy, ".r", label="course")
plt.plot(states.x, states.y, "-b", label="trajectory")
plt.legend()
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.axis("equal")
plt.grid(True)
plt.subplots(1)
plt.plot(states.t, [iv * 3.6 for iv in states.v], "-r")
plt.xlabel("Time[s]")
plt.ylabel("Speed[km/h]")
plt.grid(True)
plt.show()
if __name__ == '__main__':
print("Pure pursuit path tracking simulation start")
main()
| mit |
dfridovi/path_planning | src/python/filter/map.py | 1 | 6696 | """
Copyright (c) 2015, The Regents of the University of California (Regents).
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Please contact the author(s) of this library if you have any questions.
Author: David Fridovich-Keil ( [email protected] )
"""
###########################################################################
#
# Map class to test the filtering approach to mapping.
#
###########################################################################
import numpy as np
from numpy import matlib
import matplotlib.pyplot as plt
from landmark import Landmark
class Map:
# Constructor.
def __init__(self):
self.size_ = 0
self.registry_ = {}
# Add a landmark.
def AddLandmark(self, p):
if p.GetID() in self.registry_:
print "This landmark is already in the map. Did not add."
return
# Add to state vector and assign identiy covariance.
position = p.GetLocation()
if self.size_ == 0:
self.point_size_ = len(position)
self.state_ = position
self.covariance_ = np.matlib.eye(self.point_size_)
elif len(position) != self.point_size_:
print "Point size does not match. Did not add."
return
else:
self.state_ = np.vstack([self.state_, position])
old_covariance = self.covariance_
self.covariance_ = np.matlib.eye(old_covariance.shape[0] +
self.point_size_)
self.covariance_[:-self.point_size_,
:-self.point_size_] = old_covariance
# Update the registry.
self.registry_[p.GetID()] = self.size_
self.size_ += 1
# Update a landmark. This is a pure Kalman update.
def UpdateLandmark(self, p):
if p.GetID() not in self.registry_:
print "This landmark is not in the registry. Did not update."
return
# Extract index and position.
index = self.registry_[p.GetID()]
position = p.GetLocation()
# Generate observation vector z.
z = np.matlib.zeros(self.state_.shape)
z[index*self.point_size_:(index + 1)*self.point_size_] = position
# Generate measurement matrix H.
H = np.matlib.zeros(self.covariance_.shape)
H[index*self.point_size_:(index + 1)*self.point_size_,
index*self.point_size_:(index + 1)*self.point_size_] = \
np.matlib.eye(self.point_size_)
# Generate measurement covariance R.
R = np.matlib.zeros(self.covariance_.shape)
np.fill_diagonal(R, float("inf"))
R[index*self.point_size_:(index + 1)*self.point_size_,
index*self.point_size_:(index + 1)*self.point_size_] = \
np.matlib.eye(self.point_size_)
# Calculate innovation residual y and covariance S.
y = z - H * self.state_
S = H * self.covariance_ * H.T + R
# Calculate Kalman gain and posteriors.
K = self.covariance_ * H.T * np.linalg.inv(S)
self.state_ = self.state_ + K * y
self.covariance_ = (np.matlib.eye(len(z)) - K * H) * self.covariance_
# Visualize as a scatterplot.
def Visualize2D(self):
if self.point_size_ != 2:
print "Points must be in 2D."
return
x_coordinates = np.zeros(len(self.state_) / 2)
x_coordinates[:] = self.state_[0:len(self.state_):2].flatten()
y_coordinates = np.zeros(len(self.state_) / 2)
y_coordinates[:] = self.state_[1:len(self.state_):2].flatten()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x_coordinates, y_coordinates, color="green")
return fig
# Visualize as a scatterplot.
def VisualizeLines2D(self, true_positions):
if self.point_size_ != 2:
print "Points must be in 2D."
return
if len(true_positions) != self.size_:
print "Incorrect number of true positions."
return
# Extract estimated coordinates.
x_coordinates = np.zeros(len(self.state_) / 2)
x_coordinates[:] = self.state_[0:len(self.state_):2].flatten()
y_coordinates = np.zeros(len(self.state_) / 2)
y_coordinates[:] = self.state_[1:len(self.state_):2].flatten()
# Extract true coordinates.
true_x = np.zeros(len(self.state_) / 2)
true_y = np.zeros(len(self.state_) / 2)
for ii, position in enumerate(true_positions):
true_x[ii] = position[0]
true_y[ii] = position[1]
# Plot.
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x_coordinates, y_coordinates, color="green")
ax.scatter(true_x, true_y, color="red")
for ii in range(self.size_):
ax.plot([true_x[ii], x_coordinates[ii]],
[true_y[ii], y_coordinates[ii]], 'b-', lw=2)
return fig
# Getters.
def Size(self):
return self.size_
def PointSize(self):
return self.point_size_
def State(self):
return self.state_
def Covariance(self):
return self.covariance_
| bsd-3-clause |
aylward/ITKTubeTK | examples/archive/TubeGraphKernels/expdrive.py | 7 | 7820 | ##############################################################################
#
# Library: TubeTK
#
# Copyright 2010 Kitware Inc. 28 Corporate Drive,
# Clifton Park, NY, 12065, USA.
#
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
"""Data generation driver.
"""
__license__ = "Apache License, Version 2.0"
__author__ = "Roland Kwitt, Kitware Inc., 2013"
__email__ = "E-Mail: [email protected]"
__status__ = "Development"
import sys
import os
import json
import logging
import numpy as np
from itertools import *
from optparse import OptionParser
from sklearn.cross_validation import KFold
from sklearn.cross_validation import ShuffleSplit
import exputils as Utils
LOGGING_LEVELS = {
'critical': logging.CRITICAL,
'error': logging.ERROR,
'warning': logging.WARNING,
'info': logging.INFO,
'debug': logging.DEBUG}
def main(argv=None):
if argv is None:
argv = sys.argv
parser = OptionParser()
parser.add_option("", "--stage", help="Processing stage (0 = Run all)" , type="int" )
parser.add_option("", "--dest", help="Destination base directory", default="/tmp/")
parser.add_option("", "--data", help="Data file in JSON format (see README for format)")
parser.add_option("", "--cvruns", help="Number of cross-validation runs (1 == single split)", type="int", default=1)
parser.add_option("", "--config", help="Config file with relative executable paths")
parser.add_option("", "--cells", help="Number of CVT cells to use for ATLAS building", type="int", default=1000)
parser.add_option("", "--logat", help="Log at the specified logging level")
parser.add_option("", "--graphKernelType", help="Graph kernel type (see TubeGraphKernel)", type="int", default=0)
parser.add_option("", "--subtreeHeight", help="Subtree height of the WL subtree kernel (see TubeGraphKernel)", type="int", default=1)
parser.add_option("", "--defaultLabelType", help="Specify default labeling of graph nodes (see TubeGraphKernel)", type="int", default=0)
parser.add_option("", "--globalLabelFile", help="Specify a global label file to use")
parser.add_option("", "--segmentationImage", help="Image with brain segmentations (e.g., provided with SPL phantom)")
parser.add_option("", "--logto", help="Log to the specified file")
parser.add_option("", "--phantom", help="Phantom file to use")
parser.add_option("", "--phantomType", help="Specify the phantom type that is used (Supported phantoms are are: SPL, BrainWeb)")
(options, args) = parser.parse_args()
# Logger configuration
logging.basicConfig(level=LOGGING_LEVELS.get(options.logat, logging.NOTSET), filename=options.logto,
format='%(asctime)s [%(funcName)s] %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if (not os.path.exists(options.dest) or not os.path.isdir(options.dest)):
print "Error: Destination directory invalid!"
return -1
if (not Utils.check_file(options.data)):
print "Error: Data file not given or invalid!"
return -1
if (not Utils.check_file(options.config)):
print "Error: Config file is missing!"
return -1
if (options.phantom is None):
print "Error: No phantom given!"
return -1
if (options.phantomType is None):
print "Error: No phantom type given!"
return -1
config_fid = open(options.config).read()
config = json.loads(config_fid)
json_fid = open(options.data).read()
json_dat = json.loads( json_fid )
subject_dir_list = [] # Directories with subject data
subject_lab_list = [] # The group label for each subject, e.g., 'Male', 'Female'
for e in json_dat["Data"]:
subject_dir_list.append(e["Source"])
subject_lab_list.append(e["Group"].rstrip())
logger = logging.getLogger()
N = len(json_dat["Data"])
cv = ShuffleSplit(N, n_iter=options.cvruns, test_size=0.3, random_state=0)
Utils.ensure_dir(options.dest)
logger.debug("Destination directory = %s" % options.dest)
logger.debug("Phantom file = %s", options.phantom)
logger.debug("#CVT cells = %d", options.cells)
logger.debug("Cross-validation runs = %d" % options.cvruns)
logger.debug("#Subjects = %d", N)
try:
stage_opt = dict()
stage_opt["groupLabel"] = subject_lab_list
stage_opt["subjects"] = subject_dir_list
stage_opt["dest"] = options.dest
stage_opt["phantom"] = options.phantom
stage_opt["phantomType"] = options.phantomType
stage_opt["cells"] = options.cells
stage_opt["graphKernelType"] = options.graphKernelType
stage_opt["subtreeHeight"] = options.subtreeHeight
stage_opt["defaultLabelType"] = options.defaultLabelType
stage_opt["globalLabelFile"] = options.globalLabelFile
stage_opt["segmentationImage"] = options.segmentationImage
stage_opt["randomSeed"] = 1234 # Random seed for repeatability
if (options.stage == 1):
# MRA ToF images (includes skull)
stage_opt["mra_wSkull_glob"] = "*MRA.mha"
# MRI T1 images (includes skull)
stage_opt["mri_wSkull_glob"] = "*T1-Flash.mha"
# MRA Tof images (skull-stripped)
stage_opt["mri_nSkull_glob"] = "*SkullStripped*.mha"
Utils.compute_registrations(config, stage_opt)
elif (options.stage == 2):
Utils.transform_tubes_to_phantom(config, stage_opt)
elif (options.stage == 3):
Utils.compute_ind_atlas_edm(config, stage_opt)
elif (options.stage > 3 and options.stage < 24):
for cv_id,(train,test) in enumerate(cv):
stage_opt["id"] = cv_id + 1 # Cross-validation ID
stage_opt["trn"] = train # Trn indices
stage_opt["tst"] = test # Tst indices
res = {
4 : Utils.compute_grp_atlas_sum,
5 : Utils.compute_grp_atlas_cvt,
6 : Utils.compute_ind_graph_grp,
7 : Utils.compute_grp_graph_grp,
8 : Utils.compute_glo_atlas_edm,
9 : Utils.compute_glo_atlas_cvt,
10: Utils.compute_ind_graph_common,
11: Utils.compute_grp_graph_common,
12: Utils.compute_ind_graph_grp_testing,
13: Utils.compute_ind_graph_common_testing,
14: Utils.compute_ind_tube_prob_testing,
15: Utils.compute_ind_graph_prob_testing,
16: Utils.compute_glo_label_map,
17: Utils.compute_trn_gk,
18: Utils.compute_tst_gk,
19: Utils.compute_full_gk,
20: Utils.trn_classifier,
21: Utils.tst_classifier,
22: Utils.evaluate_classifier_from_full_gk,
23: Utils.compute_distance_signatures
}[options.stage](config, stage_opt)
else:
print "Error: Stage %d not available!" % options.stage
except Exception as e:
print e
return -1
if __name__ == "__main__":
sys.exit( main() )
| apache-2.0 |
sandeepkrjha/pgmpy | pgmpy/estimators/base.py | 5 | 16290 | #!/usr/bin/env python
from warnings import warn
import numpy as np
import pandas as pd
from scipy.stats import chisquare
class BaseEstimator(object):
def __init__(self, data, state_names=None, complete_samples_only=True):
"""
Base class for estimators in pgmpy; `ParameterEstimator`,
`StructureEstimator` and `StructureScore` derive from this class.
Parameters
----------
data: pandas DataFrame object
datafame object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
"""
self.data = data
self.complete_samples_only = complete_samples_only
variables = list(data.columns.values)
if not isinstance(state_names, dict):
self.state_names = {var: self._collect_state_names(var) for var in variables}
else:
self.state_names = dict()
for var in variables:
if var in state_names:
if not set(self._collect_state_names(var)) <= set(state_names[var]):
raise ValueError("Data contains unexpected states for variable '{0}'.".format(str(var)))
self.state_names[var] = sorted(state_names[var])
else:
self.state_names[var] = self._collect_state_names(var)
def _collect_state_names(self, variable):
"Return a list of states that the variable takes in the data"
states = sorted(list(self.data.ix[:, variable].dropna().unique()))
return states
def state_counts(self, variable, parents=[], complete_samples_only=None):
"""
Return counts how often each state of 'variable' occured in the data.
If a list of parents is provided, counting is done conditionally
for each state configuration of the parents.
Parameters
----------
variable: string
Name of the variable for which the state count is to be done.
parents: list
Optional list of variable parents, if conditional counting is desired.
Order of parents in list is reflected in the returned DataFrame
complete_samples_only: bool
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.NaN` somewhere are ignored. If `False` then
every row where neither the variable nor its parents are `np.NaN` is used.
Desired default behavior can be passed to the class constructor.
Returns
-------
state_counts: pandas.DataFrame
Table with state counts for 'variable'
Examples
--------
>>> import pandas as pd
>>> from pgmpy.estimators import BaseEstimator
>>> data = pd.DataFrame(data={'A': ['a1', 'a1', 'a2'],
'B': ['b1', 'b2', 'b1'],
'C': ['c1', 'c1', 'c2']})
>>> estimator = BaseEstimator(data)
>>> estimator.state_counts('A')
A
a1 2
a2 1
>>> estimator.state_counts('C', parents=['A', 'B'])
A a1 a2
B b1 b2 b1 b2
C
c1 1 1 0 0
c2 0 0 1 0
"""
# default for how to deal with missing data can be set in class constructor
if complete_samples_only is None:
complete_samples_only = self.complete_samples_only
# ignores either any row containing NaN, or only those where the variable or its parents is NaN
data = self.data.dropna() if complete_samples_only else self.data.dropna(subset=[variable] + parents)
if not parents:
# count how often each state of 'variable' occured
state_count_data = data.ix[:, variable].value_counts()
state_counts = state_count_data.reindex(self.state_names[variable]).fillna(0).to_frame()
else:
parents_states = [self.state_names[parent] for parent in parents]
# count how often each state of 'variable' occured, conditional on parents' states
state_count_data = data.groupby([variable] + parents).size().unstack(parents)
# reindex rows & columns to sort them and to add missing ones
# missing row = some state of 'variable' did not occur in data
# missing column = some state configuration of current 'variable's parents
# did not occur in data
row_index = self.state_names[variable]
column_index = pd.MultiIndex.from_product(parents_states, names=parents)
state_counts = state_count_data.reindex(index=row_index, columns=column_index).fillna(0)
return state_counts
def test_conditional_independence(self, X, Y, Zs=[]):
"""Chi-square conditional independence test.
Tests the null hypothesis that X is independent from Y given Zs.
This is done by comparing the observed frequencies with the expected
frequencies if X,Y were conditionally independent, using a chisquare
deviance statistic. The expected frequencies given independence are
`P(X,Y,Zs) = P(X|Zs)*P(Y|Zs)*P(Zs)`. The latter term can be computed
as `P(X,Zs)*P(Y,Zs)/P(Zs).
Parameters
----------
X: int, string, hashable object
A variable name contained in the data set
Y: int, string, hashable object
A variable name contained in the data set, different from X
Zs: list of variable names
A list of variable names contained in the data set, different from X and Y.
This is the separating set that (potentially) makes X and Y independent.
Default: []
Returns
-------
chi2: float
The chi2 test statistic.
p_value: float
The p_value, i.e. the probability of observing the computed chi2
statistic (or an even higher value), given the null hypothesis
that X _|_ Y | Zs.
sufficient_data: bool
A flag that indicates if the sample size is considered sufficient.
As in [4], require at least 5 samples per parameter (on average).
That is, the size of the data set must be greater than
`5 * (c(X) - 1) * (c(Y) - 1) * prod([c(Z) for Z in Zs])`
(c() denotes the variable cardinality).
References
----------
[1] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 18.2.2.3 (page 789)
[2] Neapolitan, Learning Bayesian Networks, Section 10.3 (page 600ff)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
[3] Chi-square test https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test#Test_of_independence
[4] Tsamardinos et al., The max-min hill-climbing BN structure learning algorithm, 2005, Section 4
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(50000, 4)), columns=list('ABCD'))
>>> data['E'] = data['A'] + data['B'] + data['C']
>>> c = ConstraintBasedEstimator(data)
>>> print(c.test_conditional_independence('A', 'C')) # independent
(0.95035644482050263, 0.8132617142699442, True)
>>> print(c.test_conditional_independence('A', 'B', 'D')) # independent
(5.5227461320130899, 0.59644169242588885, True)
>>> print(c.test_conditional_independence('A', 'B', ['D', 'E'])) # dependent
(9192.5172226063387, 0.0, True)
"""
if isinstance(Zs, (frozenset, list, set, tuple,)):
Zs = list(Zs)
else:
Zs = [Zs]
num_params = ((len(self.state_names[X])-1) *
(len(self.state_names[Y])-1) *
np.prod([len(self.state_names[Z]) for Z in Zs]))
sufficient_data = len(self.data) >= num_params * 5
if not sufficient_data:
warn("Insufficient data for testing {0} _|_ {1} | {2}. ".format(X, Y, Zs) +
"At least {0} samples recommended, {1} present.".format(5 * num_params, len(self.data)))
# compute actual frequency/state_count table:
# = P(X,Y,Zs)
XYZ_state_counts = pd.crosstab(index=self.data[X],
columns=[self.data[Y]] + [self.data[Z] for Z in Zs])
# reindex to add missing rows & columns (if some values don't appear in data)
row_index = self.state_names[X]
column_index = pd.MultiIndex.from_product(
[self.state_names[Y]] + [self.state_names[Z] for Z in Zs], names=[Y]+Zs)
XYZ_state_counts = XYZ_state_counts.reindex(index=row_index, columns=column_index).fillna(0)
# compute the expected frequency/state_count table if X _|_ Y | Zs:
# = P(X|Zs)*P(Y|Zs)*P(Zs) = P(X,Zs)*P(Y,Zs)/P(Zs)
if Zs:
XZ_state_counts = XYZ_state_counts.sum(axis=1, level=Zs) # marginalize out Y
YZ_state_counts = XYZ_state_counts.sum().unstack(Zs) # marginalize out X
else:
XZ_state_counts = XYZ_state_counts.sum(axis=1)
YZ_state_counts = XYZ_state_counts.sum()
Z_state_counts = YZ_state_counts.sum() # marginalize out both
XYZ_expected = pd.DataFrame(index=XYZ_state_counts.index, columns=XYZ_state_counts.columns)
for X_val in XYZ_expected.index:
if Zs:
for Y_val in XYZ_expected.columns.levels[0]:
XYZ_expected.loc[X_val, Y_val] = (XZ_state_counts.loc[X_val] *
YZ_state_counts.loc[Y_val] /
Z_state_counts).values
else:
for Y_val in XYZ_expected.columns:
XYZ_expected.loc[X_val, Y_val] = (XZ_state_counts.loc[X_val] *
YZ_state_counts.loc[Y_val] /
float(Z_state_counts))
observed = XYZ_state_counts.values.flatten()
expected = XYZ_expected.fillna(0).values.flatten()
# remove elements where the expected value is 0;
# this also corrects the degrees of freedom for chisquare
observed, expected = zip(*((o, e) for o, e in zip(observed, expected) if not e == 0))
chi2, significance_level = chisquare(observed, expected)
return (chi2, significance_level, sufficient_data)
class ParameterEstimator(BaseEstimator):
def __init__(self, model, data, **kwargs):
"""
Base class for parameter estimators in pgmpy.
Parameters
----------
model: pgmpy.models.BayesianModel or pgmpy.models.MarkovModel or pgmpy.models.NoisyOrModel
model for which parameter estimation is to be done
data: pandas DataFrame object
datafame object with column names identical to the variable names of the model.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
"""
if not set(model.nodes()) <= set(data.columns.values):
raise ValueError("variable names of the model must be identical to column names in data")
self.model = model
super(ParameterEstimator, self).__init__(data, **kwargs)
def state_counts(self, variable, **kwargs):
"""
Return counts how often each state of 'variable' occured in the data.
If the variable has parents, counting is done conditionally
for each state configuration of the parents.
Parameters
----------
variable: string
Name of the variable for which the state count is to be done.
complete_samples_only: bool
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.NaN` somewhere are ignored. If `False` then
every row where neither the variable nor its parents are `np.NaN` is used.
Desired default behavior can be passed to the class constructor.
Returns
-------
state_counts: pandas.DataFrame
Table with state counts for 'variable'
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import ParameterEstimator
>>> model = BayesianModel([('A', 'C'), ('B', 'C')])
>>> data = pd.DataFrame(data={'A': ['a1', 'a1', 'a2'],
'B': ['b1', 'b2', 'b1'],
'C': ['c1', 'c1', 'c2']})
>>> estimator = ParameterEstimator(model, data)
>>> estimator.state_counts('A')
A
a1 2
a2 1
>>> estimator.state_counts('C')
A a1 a2
B b1 b2 b1 b2
C
c1 1 1 0 0
c2 0 0 1 0
"""
parents = sorted(self.model.get_parents(variable))
return super(ParameterEstimator, self).state_counts(variable, parents=parents, **kwargs)
def get_parameters(self):
pass
class StructureEstimator(BaseEstimator):
def __init__(self, data, **kwargs):
"""
Base class for structure estimators in pgmpy.
Parameters
----------
data: pandas DataFrame object
datafame object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
"""
super(StructureEstimator, self).__init__(data, **kwargs)
def estimate(self):
pass
| mit |
magnunor/hyperspy | hyperspy/misc/holography/tools.py | 4 | 3063 | # -*- coding: utf-8 -*-
# Copyright 2007-2017 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft2, fftshift
import logging
_logger = logging.getLogger(__name__)
def calculate_carrier_frequency(holo_data, sb_position, scale):
"""
Calculates fringe carrier frequency of a hologram
Parameters
----------
holo_data: ndarray
The data of the hologram.
sb_position: tuple
Position of the sideband with the reference to non-shifted FFT
scale: tuple
Scale of the axes that will be used for the calculation.
Returns
-------
Carrier frequency
"""
shape = holo_data.shape
origins = [np.array((0, 0)),
np.array((0, shape[1])),
np.array((shape[0], shape[1])),
np.array((shape[0], 0))]
origin_index = np.argmin(
[np.linalg.norm(origin - sb_position) for origin in origins])
return np.linalg.norm(np.multiply(
origins[origin_index] - sb_position, scale))
def estimate_fringe_contrast_fourier(
holo_data, sb_position, apodization='hanning'):
"""
Estimates average fringe contrast of a hologram by dividing amplitude
of maximum pixel of sideband by amplitude of FFT's origin.
Parameters
----------
holo_data: ndarray
The data of the hologram.
sb_position: tuple
Position of the sideband with the reference to non-shifted FFT
apodization: string, None
Use 'hanning', 'hamming' or None to apply apodization window in real space before FFT
Apodization is typically needed to suppress the striking due to sharp edges
of the which often results in underestimation of the fringe contrast. (Default: 'hanning')
Returns
-------
Fringe contrast as a float
"""
holo_shape = holo_data.shape
if apodization:
if apodization == 'hanning':
window_x = np.hanning(holo_shape[0])
window_y = np.hanning(holo_shape[1])
elif apodization == 'hamming':
window_x = np.hamming(holo_shape[0])
window_y = np.hamming(holo_shape[1])
window_2d = np.sqrt(np.outer(window_x, window_y))
data = holo_data * window_2d
else:
data = holo_data
fft_exp = fft2(data)
return 2 * np.abs(fft_exp[tuple(sb_position)]) / np.abs(fft_exp[0, 0])
| gpl-3.0 |
thientu/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 149 | 7173 | # Authors: Tom Dupre la Tour <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
ztultrebor/BARKEVIOUS | oddsmaker.py | 1 | 2120 | # coding: utf-8
#read in libraries
import pandas as pd
import datetime
def read_odds(filenm, today_schedule):
odds = pd.read_csv(filenm)
if any([date != str(datetime.date.today()) for date in odds['Date']]):
raise ValueError('Check that Odds.csv has been update for today')
conversion = { 'ATL':'Atlanta Hawks',
'BOS':'Boston Celtics',
'BRK':'Brooklyn Nets',
'CHA':'Charlotte Hornets',
'CHI':'Chicago Bulls',
'CLE':'Cleveland Cavaliers',
'DAL':'Dallas Mavericks',
'DEN':'Denver Nuggets',
'DET':'Detroit Pistons',
'GS':'Golden State Warriors',
'HOU':'Houston Rockets',
'IND':'Indiana Pacers',
'LAC':'Los Angeles Clippers',
'LAL':'Los Angeles Lakers',
'MEM':'Memphis Grizzlies',
'MIA':'Miami Heat',
'MIL':'Milwaukee Bucks',
'MIN':'Minnesota Timberwolves',
'NO':'New Orleans Pelicans',
'NY':'New York Knicks',
'OKC':'Oklahoma City Thunder',
'ORL':'Orlando Magic',
'PHI':'Philadelphia 76ers',
'PHX':'Phoenix Suns',
'POR':'Portland Trail Blazers',
'SAC':'Sacramento Kings',
'SA':'San Antonio Spurs',
'TOR':'Toronto Raptors',
'UTA':'Utah Jazz',
'WSH':'Washington Wizards'}
teams = [conversion[team] for team in odds.Team]
odds.Team = teams
prob = []
for team in odds.Team:
if not today_schedule[today_schedule.Home==team].empty:
prob.append(today_schedule.Prob[today_schedule.Home==team].iloc[0])
elif not today_schedule[today_schedule.Away==team].empty:
prob.append(1 - today_schedule.Prob[today_schedule.Away==team].iloc[0])
odds['Prob'] = prob
return odds
| mit |
QuantumElephant/horton | horton/scripts/atomdb.py | 4 | 22763 | # -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
"""Code used by ``horton-atomdb.py``"""
from glob import glob
import os
import re
import stat
from string import Template as BaseTemplate
import numpy as np
import matplotlib.pyplot as pt
from horton.io.iodata import IOData
from horton.log import log
from horton.periodic import periodic
from horton.scripts.common import iter_elements
from horton.units import angstrom
__all__ = [
'iter_mults', 'iter_states', 'plot_atoms',
'Template', 'EnergyTable', 'atom_programs',
]
# Presets for spin multiplicites. The first element is according to Hund's rule.
# Following elements are reasonable.
mult_presets = {
1: [2],
2: [1, 3],
3: [2, 4],
4: [1, 3],
5: [2, 4],
6: [3, 5, 1],
7: [4, 2],
8: [3, 1],
9: [2],
10: [1],
11: [2],
12: [1, 3],
13: [2, 4],
14: [3, 5, 1],
15: [4, 2],
16: [3, 1],
17: [2],
18: [1],
19: [2],
20: [1, 3],
21: [2, 4],
22: [3, 5, 1],
23: [4, 6, 2],
24: [7, 5, 3, 1],
25: [6, 4, 2],
26: [5, 3, 1],
27: [4, 2],
28: [3, 1],
29: [2],
30: [1],
31: [2, 4, 6],
32: [3, 1, 5],
33: [4, 2],
34: [3, 1],
35: [2],
36: [1],
37: [2],
38: [1, 3],
39: [2, 4],
40: [3, 1, 5],
41: [6, 4, 2],
42: [7, 5, 3, 1],
43: [6, 4, 2],
44: [5, 3, 1],
45: [4, 2],
46: [1, 3],
47: [2],
48: [1],
49: [2, 4],
50: [3, 1, 5],
51: [4, 2],
52: [3, 1],
53: [2],
54: [1],
55: [2],
56: [1, 3],
57: [2, 4],
58: [3, 1, 5],
59: [4, 2, 6],
60: [5, 1, 3, 7],
61: [6, 2, 4, 8],
62: [7, 1, 3, 5, 9],
63: [8, 2, 4, 6, 10],
64: [9, 1, 3, 5, 7, 11],
65: [6, 2, 4, 8, 10, 12],
66: [5, 1, 3, 7, 9, 11],
67: [4, 2, 6, 8, 10],
68: [3, 1, 5, 7, 9],
69: [2, 4, 6, 8],
70: [1, 2, 5, 7],
71: [2, 4, 6],
72: [3, 5, 1],
73: [4, 2, 6],
74: [5, 1, 3, 7],
75: [6, 2, 4, 8],
76: [5, 1, 3],
77: [4, 2],
78: [3, 1],
79: [2],
80: [1],
81: [2, 4],
82: [3, 1, 5],
83: [4, 2],
84: [3, 1],
85: [2],
86: [1],
}
def iter_mults(nel, hund):
"""Iterate over atomic spin multiplicites for the given number of electrons
**Arguments:**
nel
The number of electrons (1-56)
hund
When set to True, only one spin multiplicity is returned. Otherwise
several reasonable spin multiplicities are given.
"""
if hund:
yield mult_presets[nel][0]
else:
for mult in mult_presets[nel]:
yield mult
def iter_states(elements, max_cation, max_anion, hund):
"""Iterate over all requested atomic states
**Arguments:**
elements
A string that is suitable for ``iter_elements``
template
An instance of the ``atomdb.Template`` class
max_cation
The limit for the most positive cation
max_anion
The limit for the most negative anion
hund
Flag to adhere to hund's rule for the spin multiplicities.
"""
for number in iter_elements(elements):
# Loop over all charge states for this element
for charge in xrange(-max_anion, max_cation+1):
nel = number - charge
if nel <= 0:
continue
# loop over multiplicities
for mult in iter_mults(nel, hund):
yield number, charge, mult
def plot_atoms(proatomdb, dn='.'):
"""Make PNG figures for all atoms in a pro-atom database.
Warning: this script writes a bunch of PNG files!
Parameters
----------
proatomdb : horton.part.proatomdb.ProAtomDB
A database of pro-atoms.
dn : str
Directory where the PNG files will be written. Local directory if not given.
"""
def get_color(index):
"""Return a nice color for a given index."""
colors = ["#FF0000", "#FFAA00", "#00AA00", "#00AAFF", "#0000FF", "#FF00FF", "#777777"]
return colors[index % len(colors)]
lss = {True: '-', False: ':'}
for number in proatomdb.get_numbers():
r = proatomdb.get_rgrid(number).radii
symbol = periodic[number].symbol
charges = proatomdb.get_charges(number)
suffix = '%03i_%s' % (number, symbol.lower().rjust(2, '_'))
# The density (rho)
pt.clf()
for i, charge in enumerate(charges):
record = proatomdb.get_record(number, charge)
y = record.rho
ls = lss[record.safe]
color = get_color(i)
label = 'q=%+i' % charge
pt.semilogy(r/angstrom, y, lw=2, ls=ls, label=label, color=color)
pt.xlim(0, 3)
pt.ylim(ymin=1e-5)
pt.xlabel('Distance from the nucleus [A]')
pt.ylabel('Spherically averaged density [Bohr**-3]')
pt.title('Proatoms for element %s (%i)' % (symbol, number))
pt.legend(loc=0)
fn_png = '%s/dens_%s.png' % (dn, suffix)
pt.savefig(fn_png)
if log.do_medium:
log('Written', fn_png)
# 4*pi*r**2*rho
pt.clf()
for i, charge in enumerate(charges):
record = proatomdb.get_record(number, charge)
y = record.rho
ls = lss[record.safe]
color = get_color(i)
label = 'q=%+i' % charge
pt.plot(r/angstrom, 4*np.pi*r**2*y, lw=2, ls=ls, label=label, color=color)
pt.xlim(0, 3)
pt.ylim(ymin=0.0)
pt.xlabel('Distance from the nucleus [A]')
pt.ylabel('4*pi*r**2*density [Bohr**-1]')
pt.title('Proatoms for element %s (%i)' % (symbol, number))
pt.legend(loc=0)
fn_png = '%s/rdens_%s.png' % (dn, suffix)
pt.savefig(fn_png)
if log.do_medium:
log('Written', fn_png)
fukui_data = []
if number - charges[0] == 1:
record0 = proatomdb.get_record(number, charges[0])
fukui_data.append((record0.rho, record0.safe, '%+i' % charges[0]))
for i, charge in enumerate(charges[1:]):
record0 = proatomdb.get_record(number, charge)
record1 = proatomdb.get_record(number, charges[i])
fukui_data.append((
record0.rho - record1.rho,
record0.safe and record1.safe,
'%+i-%+i' % (charge, charges[i])
))
# The Fukui functions
pt.clf()
for i, (f, safe, label) in enumerate(fukui_data):
ls = lss[safe]
color = get_color(i)
pt.semilogy(r/angstrom, f, lw=2, ls=ls, label=label, color=color, alpha=1.0)
pt.semilogy(r/angstrom, -f, lw=2, ls=ls, color=color, alpha=0.2)
pt.xlim(0, 3)
pt.ylim(ymin=1e-5)
pt.xlabel('Distance from the nucleus [A]')
pt.ylabel('Fukui function [Bohr**-3]')
pt.title('Proatoms for element %s (%i)' % (symbol, number))
pt.legend(loc=0)
fn_png = '%s/fukui_%s.png' % (dn, suffix)
pt.savefig(fn_png)
if log.do_medium:
log('Written', fn_png)
# 4*pi*r**2*Fukui
pt.clf()
for i, (f, safe, label) in enumerate(fukui_data):
ls = lss[safe]
color = get_color(i)
pt.plot(r/angstrom, 4*np.pi*r**2*f, lw=2, ls=ls, label=label, color=color)
pt.xlim(0, 3)
pt.xlabel('Distance from the nucleus [A]')
pt.ylabel('4*pi*r**2*Fukui [Bohr**-1]')
pt.title('Proatoms for element %s (%i)' % (symbol, number))
pt.legend(loc=0)
fn_png = '%s/rfukui_%s.png' % (dn, suffix)
pt.savefig(fn_png)
if log.do_medium:
log('Written', fn_png)
class Template(BaseTemplate):
"""A template with modifications to support inclusion of other files."""
idpattern = r'[_a-z0-9.:-]+'
def __init__(self, *args, **kwargs):
BaseTemplate.__init__(self, *args, **kwargs)
self._init_include_names()
self._load_includes()
def _init_include_names(self):
"""Return a list of include variables
The include variables in the template are variables of the form
${file:name} or ${line:name}. This routine lists all the names
encountered. Duplicates are eliminated.
"""
pattern = '%s{(?P<braced>%s)}' % (re.escape(self.delimiter), self.idpattern)
file_names = set([])
line_names = set([])
for mo in re.finditer(pattern, self.template):
braced = mo.group('braced')
if braced is not None and braced.startswith('file:'):
file_names.add(braced[5:])
if braced is not None and braced.startswith('line:'):
line_names.add(braced[5:])
self.file_names = list(file_names)
self.line_names = list(line_names)
def _load_includes(self):
"""Load included files for a given element number"""
self.includes = []
# Load files
for name in self.file_names:
records = []
for fn in sorted(glob('%s.[0-9][0-9][0-9]_[0-9][0-9][0-9]_[0-9][0-9]' % name)):
with open(fn) as f:
s = f.read()
# chop of one final newline if present (mostly the case)
if s[-1] == '\n':
s = s[:-1]
number = int(fn[-10:-7])
pop = int(fn[-6:-3])
mult = int(fn[-2:])
records.append((number, pop, mult, s))
self.includes.append((name, 'file', records))
# Load lines
for name in self.line_names:
with open(name) as f:
records = []
for line in f:
# ignore empty lines
if len(line.strip()) == 0:
continue
number = int(line[:3])
assert line[3] == '_'
pop = int(line[4:7])
assert line[7] == '_'
mult = int(line[8:10])
assert line[10] == ' '
s = line[11:-1]
records.append((number, pop, mult, s))
self.includes.append((name, 'line', records))
def _log_includes(self):
# log the include names
if len(self.file_names) + len(self.line_names) > 0 and log.do_medium:
log('The following includes were detected in the template:')
for name, kind, records in self.includes:
log(' %s (%s)' % (name, kind))
for n, p, m, s in self.includes[name]:
log(' %03i_%03i_%02i' % (n, p, m))
def get_subs(self, number, pop, mult):
subs = {}
for name, kind, records in self.includes:
found_s = None
for n, p, m, s in records:
if ((n==0) or (number==n)) and ((p==0) or (pop==p)) and ((m==0) or (mult==m)):
# match
found_s = s
break
if found_s is None:
raise KeyError('No matching include found for \'%s\' (%03i_%03i_%02i)' % (name, number, pop, mult))
subs['%s:%s' % (kind, name)] = s
return subs
class EnergyTable(object):
def __init__(self):
self.all = {}
def add(self, number, pop, energy):
cases = self.all.setdefault(number, {})
cases[pop] = energy
def log(self):
log(' Nr Pop Chg Energy Ionization Affinity')
log.hline()
for number, cases in sorted(self.all.iteritems()):
for pop, energy in sorted(cases.iteritems()):
energy_prev = cases.get(pop-1)
if energy_prev is None:
ip_str = ''
else:
ip_str = '% 18.10f' % (energy_prev - energy)
energy_next = cases.get(pop+1)
if energy_next is None:
ea_str = ''
else:
ea_str = '% 18.10f' % (energy - energy_next)
log('%3i %3i %+3i % 18.10f %18s %18s' % (
number, pop, number-pop, energy, ip_str, ea_str
))
log.blank()
class AtomProgram(object):
name = None
run_script = None
def write_input(self, number, charge, mult, template, do_overwrite):
# Directory stuff
nel = number - charge
dn_mult = '%03i_%s_%03i_q%+03i/mult%02i' % (
number, periodic[number].symbol.lower().rjust(2, '_'), nel, charge, mult)
# Figure out if we want to write
fn_inp = '%s/atom.in' % dn_mult
exists = os.path.isfile(fn_inp)
do_write = not exists or do_overwrite
if do_write:
try:
subs = template.get_subs(number, nel, mult)
except KeyError:
if log.do_warning:
log.warn('Could not find all subs for %03i.%03i.%03i. Skipping.' % (number, nel, mult))
return dn_mult, False
if not os.path.isdir(dn_mult):
os.makedirs(dn_mult)
with open(fn_inp, 'w') as f:
f.write(template.substitute(
subs,
charge=str(charge),
mult=str(mult),
number=str(number),
element=periodic[number].symbol,
))
if log.do_medium:
if exists:
log('Overwritten: ', fn_inp)
else:
log('Written new: ', fn_inp)
elif log.do_medium:
log('Not overwriting: ', fn_inp)
return dn_mult, do_write
def write_run_script(self):
# write the script
fn_script = 'run_%s.sh' % self.name
exists = os.path.isfile(fn_script)
if not exists:
with open(fn_script, 'w') as f:
print >> f, self.run_script
log('Written new: ', fn_script)
else:
log('Not overwriting: ', fn_script)
# make the script executable
os.chmod(fn_script, stat.S_IXUSR | os.stat(fn_script).st_mode)
def _get_energy(self, mol, dn_mult):
return mol.energy
def load_atom(self, dn_mult, ext):
fn = '%s/atom.%s' % (dn_mult, ext)
if not os.path.isfile(fn):
return None, None
try:
mol = IOData.from_file(fn)
except:
return None, None
mol.energy = self._get_energy(mol, dn_mult)
return mol, mol.energy
run_gaussian_script = """\
#!/bin/bash
# make sure %(name)s and formchk are available before running this script.
MISSING=0
if ! which %(name)s &>/dev/null; then echo "%(name)s binary not found."; MISSING=1; fi
if ! which formchk &>/dev/null; then echo "formchk binary not found."; MISSING=1; fi
if [ $MISSING -eq 1 ]; then echo "The required programs are not present on your system. Giving up."; exit -1; fi
function do_atom {
echo "Computing in ${1}"
cd ${1}
if [ -e atom.out ]; then
echo "Output file present in ${1}, not recomputing."
else
%(name)s atom.in > atom.out
RETCODE=$?
if [ $RETCODE == 0 ]; then
formchk atom.chk atom.fchk
rm -f atom.out.failed
else
# Rename the output of the failed job such that it gets recomputed
# when the run script is executed again.
mv atom.out atom.out.failed
fi
rm atom.chk
fi
cd -
}
for ATOMDIR in [01][0-9][0-9]_*_[01][0-9][0-9]_q[-+][0-9][0-9]/mult[0-9][0-9]; do
do_atom ${ATOMDIR}
done
"""
class G09AtomProgram(AtomProgram):
name = 'g09'
run_script = run_gaussian_script % {'name': 'g09'}
def write_input(self, number, charge, mult, template, do_overwrite):
if '%chk=atom.chk\n' not in template.template:
raise ValueError('The template must contain a line \'%chk=atom.chk\'')
return AtomProgram.write_input(self, number, charge, mult, template, do_overwrite)
def load_atom(self, dn_mult):
return AtomProgram.load_atom(self, dn_mult, 'fchk')
class G03AtomProgram(G09AtomProgram):
name = 'g03'
run_script = run_gaussian_script % {'name': 'g03'}
run_orca_script = """\
#!/bin/bash
# make sure orca and orca2mkl are available before running this script.
MISSING=0
if ! which orca &>/dev/null; then echo "orca binary not found."; MISSING=1; fi
if ! which orca_2mkl &>/dev/null; then echo "orca_2mkl binary not found."; MISSING=1; fi
if [ $MISSING -eq 1 ]; then echo "The required programs are not present on your system. Giving up."; exit -1; fi
function do_atom {
echo "Computing in ${1}"
cd ${1}
if [ -e atom.out ]; then
echo "Output file present in ${1}, not recomputing."
else
orca atom.in > atom.out
RETCODE=$?
if [ $RETCODE == 0 ]; then
orca_2mkl atom -molden
rm -f atom.out.failed
else
# Rename the output of the failed job such that it gets recomputed
# when the run script is executed again.
mv atom.out atom.out.failed
fi
fi
cd -
}
for ATOMDIR in [01][0-9][0-9]_*_[01][0-9][0-9]_q[-+][0-9][0-9]/mult[0-9][0-9]; do
do_atom ${ATOMDIR}
done
"""
class OrcaAtomProgram(AtomProgram):
name = 'orca'
run_script = run_orca_script
def _get_energy(self, mol, dn_mult):
with open('%s/atom.out' % dn_mult) as f:
for line in f:
if line.startswith('Total Energy :'):
return float(line[25:43])
def load_atom(self, dn_mult):
return AtomProgram.load_atom(self, dn_mult, 'molden.input')
run_cp2k_script = """\
#!/bin/bash
# Note: if you want to use an mpi-parallel CP2K binary, uncomment the following
# line and fill in the right binary and mpirun script:
#CP2K_BIN="mpirun -n4 cp2k.popt"
# Find a non-mpi CP2K binary if needed.
if [ -z "$CP2K_BIN" ]; then
# Find all potential non-mpi CP2K binaries in the $PATH
CP2K_BINS=$(find ${PATH//:/ } -name "cp2k.s*")
# Check for any known non-mpi cp2k binary name in order of preference:
for KNOWN_CP2K in cp2k.ssmp cp2k.sopt cp2k.sdbg; do
for TMP in ${CP2K_BINS}; do
if [ $(basename $TMP) == ${KNOWN_CP2K} ]; then
CP2K_BIN=$TMP
break
fi
done
if [ -n $CP2K_BIN ]; then break; fi
done
MISSING=0
if [ -z $CP2K_BIN ]; then echo "No non-mpi CP2K binary found."; MISSING=1; fi
if [ $MISSING -eq 1 ]; then echo "The required programs are not present on your system. Giving up."; exit -1; fi
fi
echo "Using the following CP2K binary: $CP2K_BIN"
function do_atom {
echo "Computing in ${1}"
cd ${1}
if [ -e atom.cp2k.out ]; then
echo "Output file present in ${1}, not recomputing."
else
$CP2K_BIN atom.in > atom.cp2k.out
RETCODE=$?
if [ $RETCODE == 0 ]; then
rm -f atom.cp2k.out.failed
else
# Rename the output of the failed job such that it gets recomputed
# when the run script is executed again.
mv atom.cp2k.out atom.cp2k.out.failed
fi
fi
cd -
}
for ATOMDIR in [01][0-9][0-9]_*_[01][0-9][0-9]_q[-+][0-9][0-9]/mult[0-9][0-9]; do
do_atom ${ATOMDIR}
done
"""
class CP2KAtomProgram(AtomProgram):
name = 'cp2k'
run_script = run_cp2k_script
def write_input(self, number, charge, mult, template, do_overwrite):
if '&ATOM' not in template.template:
raise ValueError('The template must be a CP2K atom input. (\'&ATOM\' not found.)')
return AtomProgram.write_input(self, number, charge, mult, template, do_overwrite)
def load_atom(self, dn_mult):
return AtomProgram.load_atom(self, dn_mult, 'cp2k.out')
run_psi4_script = """\
#!/bin/bash
# make sure psi4 is available before running this script.
MISSING=0
if ! which psi4 &>/dev/null; then echo "psi4 binary not found."; MISSING=1; fi
if [ $MISSING -eq 1 ]; then echo "The required programs are not present on your system. Giving up."; exit -1; fi
function do_atom {
echo "Computing in ${1}"
cd ${1}
if [ -e atom.out ]; then
echo "Output file present in ${1}, not recomputing."
else
psi4 atom.in
RETCODE=$?
if [ $RETCODE == 0 ]; then
rm -f atom.out.failed
else
# Rename the output of the failed job such that it gets recomputed
# when the run script is executed again.
mv atom.out atom.out.failed
fi
fi
cd -
}
for ATOMDIR in [01][0-9][0-9]_*_[01][0-9][0-9]_q[-+][0-9][0-9]/mult[0-9][0-9]; do
do_atom ${ATOMDIR}
done
"""
class Psi4AtomProgram(AtomProgram):
name = 'psi4'
run_script = run_psi4_script
def _get_energy(self, mol, dn_mult):
with open('%s/atom.out' % dn_mult) as f:
for line in f:
if 'Final Energy' in line:
return float(line.split()[-1])
def write_input(self, number, charge, mult, template, do_overwrite):
found = False
for line in template.template.split('\n'):
words = line.lower().split()
if 'molden_write' in words and 'true' in words:
found = True
break
if not found:
raise ValueError('The template must contain a line with \'molden_write true\'.')
return AtomProgram.write_input(self, number, charge, mult, template, do_overwrite)
def load_atom(self, dn_mult):
return AtomProgram.load_atom(self, dn_mult, 'default.molden')
atom_programs = {}
for APC in globals().values():
if isinstance(APC, type) and issubclass(APC, AtomProgram) and not APC is AtomProgram:
atom_programs[APC.name] = APC()
| gpl-3.0 |
trichter/sito | colormap.py | 1 | 3124 | #!/usr/bin/python
# by TR
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.colors
import os
import glob
CM_DATA = '/home/richter/Data/cm/'
def combine(cmaps, name, splitters=None, get_cdict=False):
if not splitters:
N = len(cmaps)
splitters = np.linspace(0, 1, N + 1)
cdict = {}
for i, m in enumerate(cmaps):
m = plt.get_cmap(m)
if hasattr(m, '_segmentdata'):
m = m._segmentdata
for color in m:
m[color] = np.array(m[color])
m[color][:, 0] = (splitters[i] + (m[color][:, 0] - m[color][0, 0]) /
(m[color][-1, 0] - m[color][0, 0]) *
(splitters[i + 1] - splitters[i]))
try:
cdict[color] = np.concatenate((cdict[color], m[color]))
except KeyError:
cdict[color] = m[color]
if get_cdict:
return cdict
else:
return matplotlib.colors.LinearSegmentedColormap(name, cdict)
def show_colormaps(mode='mpl', path=CM_DATA + '*.gpf', cmaps=None):
plt.rc('text', usetex=False)
a = np.outer(np.ones(10), np.arange(0, 1, 0.01))
plt.figure(figsize=(10, 5))
plt.subplots_adjust(top=0.99, bottom=0.01, left=0.01, right=0.8)
if mode == 'mpl':
cmaps = [m for m in plt.cm.datad if not m.endswith("_r")]
elif mode == 'local':
cmaps = [createColormapFromGPF(f) for f in glob.glob(path)]
elif cmaps is None:
raise ValueError("Mode has to be 'mpl' or 'local' or cmaps=list of cmaps")
cmaps.sort()
l = len(cmaps) + 1
for i, m in enumerate(cmaps):
plt.subplot(l, 1, i + 1)
plt.axis("off")
plt.imshow(a, aspect='auto', cmap=plt.get_cmap(m), origin="lower")
plt.annotate(m.name if hasattr(m, 'name') else m, (1, 0.5), xycoords='axes fraction',
fontsize=10, ha='left', va='center')
return cmaps
def createColormapFromGPF(file_, get_dict=False):
data = sp.loadtxt(file_)
cdict = {'red': np.take(data, (0, 1, 1), axis=1),
'green': np.take(data, (0, 2, 2), axis=1),
'blue': np.take(data, (0, 3, 3), axis=1)}
name = os.path.splitext(os.path.basename(file_))[0]
if get_dict:
return cdict
else:
return matplotlib.colors.LinearSegmentedColormap(name, cdict)
def getXcorrColormap(name='xcorr', get_dict=False):
cdict = {'red': ((0.00, 0, 0),
(0.35, 0, 0),
(0.50, 1, 1),
(0.65, 1, 1),
(1.00, 1, 1)),
'green': ((0.00, 0, 0),
(0.35, 1, 1),
(0.50, 1, 1),
(0.65, 1, 1),
(1.00, 0, 0)),
'blue': ((0.00, 1, 1),
(0.35, 1, 1),
(0.50, 1, 1),
(0.65, 0, 0),
(1.00, 0, 0))}
if get_dict:
return cdict
else:
return matplotlib.colors.LinearSegmentedColormap(name, cdict)
if __name__ == '__main__':
pass
| mit |
MartinDelzant/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
SiLab-Bonn/monopix_daq | monopix_daq/analysis/plotting_base.py | 1 | 17517 | import numpy as np
import math
import logging
import shutil
import os,sys
import matplotlib
import random
import datetime
import tables
import matplotlib.pyplot as plt
from collections import OrderedDict
from scipy.optimize import curve_fit
from scipy.stats import norm
from matplotlib.figure import Figure
from matplotlib.artist import setp
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import colors, cm
from matplotlib import gridspec
from matplotlib.backends.backend_pdf import PdfPages
from decimal import Decimal
import matplotlib.ticker as ticker
COL_SIZE = 36 ##TODO change hard coded values
ROW_SIZE = 129
TITLE_COLOR = '#07529a'
OVERTEXT_COLOR = '#07529a'
import monopix_daq.analysis.utils
class PlottingBase(object):
def __init__(self, fout, save_png=False ,save_single_pdf=False):
self.logger = logging.getLogger()
#self.logger.setLevel(loglevel)
self.plot_cnt = 0
self.save_png = save_png
self.save_single_pdf = save_single_pdf
self.filename = fout
self.out_file = PdfPages(self.filename)
def _save_plots(self, fig, suffix=None, tight=True):
increase_count = False
bbox_inches = 'tight' if tight else ''
fig.tight_layout()
if suffix is None:
suffix = str(self.plot_cnt)
self.out_file.savefig(fig, bbox_inches=bbox_inches)
if self.save_png:
fig.savefig(self.filename[:-4] + '_' +
suffix + '.png') #, bbox_inches=bbox_inches)
increase_count = True
if self.save_single_pdf:
fig.savefig(self.filename[:-4] + '_' +
suffix + '.pdf') #, bbox_inches=bbox_inches)
increase_count = True
if increase_count:
self.plot_cnt += 1
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.out_file is not None and isinstance(self.out_file, PdfPages):
self.logger.info('Closing output PDF file: %s', str(self.out_file._file.fh.name))
self.out_file.close()
shutil.copyfile(self.filename, os.path.join(os.path.split(self.filename)[0], 'last_scan.pdf'))
def _add_title(self,text,fig):
#fig.subplots_adjust(top=0.85)
#y_coord = 0.92
#fig.text(0.1, y_coord, text, fontsize=12, color=OVERTEXT_COLOR, transform=fig.transFigure)
fig.suptitle(text, fontsize=12,color=OVERTEXT_COLOR)
def table_1value(self,dat,n_row=30,n_col=3,
page_title="Chip configurations"):
keys=np.sort(np.array(dat.keys()))
##fill table
cellText=[["" for i in range(n_col*2)] for j in range(n_row)]
for i,k in enumerate(keys):
cellText[i%n_row][i/n_row*2]=k
cellText[i%n_row][i/n_row*2+1]=dat[k]
colLabels=[]
colWidths=[]
for i in range(n_col):
colLabels.append("Parameter")
colWidths.append(0.2) ## width for param name
colLabels.append("Value")
colWidths.append(0.15) ## width for value
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
fig.patch.set_visible(False)
ax.set_adjustable('box')
ax.axis('off')
ax.axis('tight')
tab=ax.table(cellText=cellText,
colLabels=colLabels,
colWidths = colWidths,
loc='upper center')
tab.auto_set_font_size(False)
tab.set_fontsize(4)
for key, cell in tab.get_celld().items():
cell.set_linewidth(0.1)
if page_title is not None and len(page_title)>0:
self._add_title(page_title,fig)
tab.scale(1,0.7)
self.out_file.savefig(fig)
#self._save_plots(fig, suffix=None, tight=True)
#fig = Figure()
#FigureCanvas(fig)
#ax = fig.add_subplot(111)
#ax.set_adjustable('box')
def plot_2d_pixel_4(self, dat, page_title="Pixel configurations",
title=["Preamp","Inj","Mon","TDAC"],
x_axis_title="Column", y_axis_title="Row", z_axis_title="",
z_min=[0,0,0,0], z_max=[1,1,1,15]):
fig = Figure()
FigureCanvas(fig)
for i in range(4):
ax = fig.add_subplot(221+i)
cmap = cm.get_cmap('plasma')
cmap.set_bad('w')
cmap.set_over('r') # Make noisy pixels red
# if z_max[i]+2-z_min[i] < 20:
# bounds = np.linspace(start=z_min[i], stop=z_max[i] + 1,
# num=z_max[i]+2-z_min[i],
# endpoint=True)
# norm = colors.BoundaryNorm(bounds, cmap.N)
# else:
# norm = colors.BoundaryNorm()
im=ax.imshow(np.transpose(dat[i]),origin='lower',aspect="auto",
vmax=z_max[i]+1,vmin=z_min[i], interpolation='none',
cmap=cmap #, norm=norm
)
ax.set_title(title[i])
ax.set_ylim((-0.5, ROW_SIZE-0.5))
ax.set_xlim((-0.5, COL_SIZE-0.5))
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cb = fig.colorbar(im, cax=cax)
cb.set_label(z_axis_title)
if page_title is not None and len(page_title)>0:
fig.suptitle(page_title, fontsize=12,color=OVERTEXT_COLOR, y=1.05)
self._save_plots(fig)
def plot_1d_pixel_hists(self,hist2d_array, mask=None, bins=30,
top_axis_factor=None,
top_axis_title="Threshold [e]",
x_axis_title="Test pulse injection [V]",
y_axis_title="# of pixel",
dat_title=["TH=0.81V"],
page_title=None,
title="Threshold dispersion"):
if mask is None:
mask=np.ones([COL_SIZE, ROW_SIZE],dtype=int)
elif isinstance(mask,list):
mask=np.array(mask)
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.set_adjustable('box')
for hist2d in hist2d_array:
hist2d=hist2d[mask==1]
hist=ax.hist(hist2d.reshape([-1]),
bins=bins, histtype="step")
ax.set_xbound(hist[1][0],hist[1][-1])
ax.set_xlabel(x_axis_title)
ax.set_ylabel(y_axis_title)
if top_axis_factor is None:
ax.set_title(title,color=TITLE_COLOR)
else:
ax2=ax.twiny()
ax2.set_xbound(hist[1][0]*top_axis_factor,hist[1][-1]*top_axis_factor)
ax2.set_xlabel(top_axis_title)
pad=40
ax.set_title(title,pad=40,color=TITLE_COLOR)
if page_title is not None and len(page_title)>0:
self._add_title(page_title,fig)
self._save_plots(fig)
def plot_2d_pixel_hist(self, hist2d, page_title=None,
title="Hit Occupancy",
z_axis_title=None,
z_min=0, z_max=None):
if z_max == 'median':
z_max = 2.0 * np.ma.median(hist2d[hist2d>0])
elif z_max == 'maximum':
z_max = np.ma.max(hist2d)
elif z_max is None:
z_max = np.percentile(hist2d, q=90)
if np.any(hist2d > z_max):
z_max = 1.1 * z_max
if hist2d.all() is np.ma.masked:
z_max = 1.0
if z_min is None:
z_min = np.ma.min(hist2d)
if z_min == z_max or hist2d.all() is np.ma.masked:
z_min = 0
x_axis_title="Column"
y_axis_title="Row"
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.set_adjustable('box')
#extent = [0.5, 400.5, 192.5, 0.5]
bounds = np.linspace(start=z_min, stop=z_max, num=255, endpoint=True)
cmap = cm.get_cmap('viridis')
cmap.set_bad('k')
cmap.set_over('r') # Make noisy pixels red
cmap.set_under('w')
#norm = colors.BoundaryNorm(bounds, cmap.N)
im = ax.imshow(np.transpose(hist2d), interpolation='none', aspect='auto',
vmax=z_max,vmin=z_min,
cmap=cmap, # norm=norm,
origin='lower') # TODO: use pcolor or pcolormesh
ax.set_ylim((-0.5, ROW_SIZE-0.5))
ax.set_xlim((-0.5, COL_SIZE-0.5))
ax.set_title(title + r' ($\Sigma$ = {0})'.format((0 if hist2d.all() is np.ma.masked else np.ma.sum(hist2d))), color=TITLE_COLOR)
ax.set_xlabel(x_axis_title)
ax.set_ylabel(y_axis_title)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
cb = fig.colorbar(im, cax=cax)
cb.set_label(z_axis_title)
if page_title is not None and len(page_title)>0:
self._add_title(page_title,fig)
self._save_plots(fig)
def plot_2d_hist(self, hist2d, bins=None,
page_title=None,
title="Hit Occupancy",
x_axis_title="Test pulse injection [V]",
y_axis_title="Counts",
z_axis_title=None, z_min=1, z_max=None, z_scale="lin"):
if z_max == 'median':
z_max = 2 * np.ma.median(hist2d)
elif z_max == 'maximum':
z_max = np.ma.max(hist2d)*1.1
elif z_max is None:
z_max = np.percentile(hist2d, q=90)
if np.any(hist2d > z_max):
z_max = 1.1 * z_max
if z_max < 1 or hist2d.all() is np.ma.masked:
z_max = 1.0
if z_min is None:
z_min = np.ma.min(hist2d)
if z_min == z_max or hist2d.all() is np.ma.masked:
z_min = 0
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.set_adjustable('box')
bounds = np.linspace(start=z_min, stop=z_max + 1, num=255, endpoint=True)
cmap = cm.get_cmap('viridis')
cmap.set_over('r')
cmap.set_under('w')
if z_scale=="log":
norm = colors.LogNorm()
cmap.set_bad('w')
else:
norm = None
cmap.set_bad('k')
im = ax.imshow(np.transpose(hist2d), interpolation='none', aspect='auto',
vmax=z_max+1,vmin=z_min,
cmap=cmap,norm=norm,
extent=[bins[0][0],bins[0][-1],bins[1][0],bins[1][-1]],
origin='lower')
ax.set_title(title + r' ($\Sigma$ = {0})'.format((0 if hist2d.all() is np.ma.masked else np.ma.sum(hist2d))), color=TITLE_COLOR)
ax.set_xlabel(x_axis_title)
ax.set_ylabel(y_axis_title)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
cb = fig.colorbar(im, cax=cax)
cb.set_label(z_axis_title)
if page_title is not None and len(page_title)>0:
self._add_title(page_title,fig)
self._save_plots(fig)
def plot_2d_hist_4(self, dat, page_title="Pixel configurations",
bins=None,
title=["Preamp","Inj","Mon","TDAC"],
x_axis_title="Column",
y_axis_title="Row",
z_axis_title="",
z_min=[0,0,0,0], z_max=[1,1,1,15]):
fig = Figure()
FigureCanvas(fig)
for i in range(4):
ax = fig.add_subplot(221+i)
if z_max[i]=='maximum':
z_max[i]=np.max(dat[i])
cmap = cm.get_cmap('viridis')
cmap.set_bad('w')
cmap.set_over('r') # Make noisy pixels red
im=ax.imshow(np.transpose(dat[i]),origin='lower',aspect="auto",
vmax=z_max[i]+1,vmin=z_min[i], interpolation='none',
extent=[bins[0][0],bins[0][-1],bins[1][0],bins[1][-1]],
cmap=cmap #, norm=norm
)
ax.set_title(title[i])
#ax.set_ylim((-0.5, ROW_SIZE-0.5))
#ax.set_xlim((-0.5, COL_SIZE-0.5))
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cb = fig.colorbar(im, cax=cax)
cb.set_label(z_axis_title)
if page_title is not None and len(page_title)>0:
fig.suptitle(page_title, fontsize=12,color=OVERTEXT_COLOR, y=1.05)
self._save_plots(fig)
def plot_scurve(self,dat,
top_axis_factor=None,
top_axis_title="Threshold [e]",
x_axis_title="Test pulse injection [V]",
y_axis_title="# of pixel",
y_max=200,
y_min=None,
x_min=None,
x_max=None,
reverse=True,
dat_title=["TH=0.81V"],
page_title=None,
title="Pixel xx-xx"):
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.set_adjustable('box')
for i, d in enumerate(dat):
color = next(ax._get_lines.prop_cycler)['color']
ax.plot(d["x"], d["y"],linestyle="", marker="o",color=color,label=dat_title[i])
if np.isnan(d["A"]):
continue
x,y=monopix_daq.analysis.utils.scurve_from_fit(d["x"], d["A"],d["mu"],d["sigma"],reverse=reverse,n=500)
ax.plot(x,y,linestyle="-", marker="",color=color)
if x_min is None:
x_min=np.min(d["x"])
if x_max is None:
x_max=np.max(d["x"])
if y_min is None:
y_min=np.min(d["y"])
if y_max is None:
y_max=np.max(d["y"])
ax.set_xbound(x_min,x_max)
ax.set_ybound(y_min,y_max)
ax.set_xlabel(x_axis_title)
ax.set_ylabel(y_axis_title)
if top_axis_factor is None:
ax.set_title(title,color=TITLE_COLOR)
else:
ax2=ax.twiny()
ax2.set_xbound(x_min*top_axis_factor,x_max*top_axis_factor)
ax2.set_xlabel(top_axis_title)
pad=40
ax.set_title(title,pad=40,color=TITLE_COLOR)
ax.legend()
if page_title is not None and len(page_title)>0:
self._add_title(page_title,fig)
self._save_plots(fig)
def plot_1d_pixel_hists_gauss(self,hist2d_array, mask=None, bins=100,
top_axis_factor=None,
top_axis_title="Threshold [e]",
x_axis_title="Test pulse injection [V]",
y_axis_title="# of pixel",
dat_title=["TH=0.81V"],
page_title=None,
title="Threshold dispersion"):
if mask is None:
mask=np.ones([COL_SIZE, ROW_SIZE],dtype=int)
elif isinstance(mask,list):
mask=np.array(mask)
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.set_adjustable('box')
for hist2d in hist2d_array:
hist2d=hist2d[mask==1]
hist_median=np.median(hist2d)
if np.isnan(hist_median)==True:
hist_median=0
d = np.abs(hist2d - np.median(hist2d))
mdev = np.median(d)
if np.isnan(mdev)==True:
mdev=0
hist_std=np.std(hist2d)
hist_min=hist_median-10*mdev
hist_max=hist_median+10*mdev
hist=ax.hist(hist2d.reshape([-1]), range=(hist_min,hist_max),
bins=bins, histtype="step")
bin_center = (hist[1][1:] + hist[1][:-1]) / 2.0 #####
gauss_func=monopix_daq.analysis.utils.gauss_func #####
signal_params=monopix_daq.analysis.utils.fit_gauss(bin_center, hist[0]) #####
ax.set_xbound(hist[1][0],hist[1][-1])
ax.set_xlabel(x_axis_title)
ax.set_ylabel(y_axis_title)
if top_axis_factor is None:
ax.set_title(title,color=TITLE_COLOR)
str_fit="Amp= "+ str('%.2E' %Decimal(signal_params[0]))+"\nMean= "+ str("%.4f" %signal_params[1])+"\nSigma= "+ str("%.4f" %signal_params[2])+")"
else:
ax2=ax.twiny()
ax2.set_xbound(hist[1][0]*top_axis_factor,hist[1][-1]*top_axis_factor)
ax2.set_xlabel(top_axis_title)
pad=40
ax.set_title(title,pad=40,color=TITLE_COLOR)
str_fit="Amp= "+ str('%.2E' %Decimal(signal_params[0]))+"\nMean= "+ str("%.4f" %signal_params[1])+ str(' (%.2E' %Decimal(signal_params[1]*top_axis_factor))+")\nSigma= "+ str("%.4f" %signal_params[2])+ str(' (%.2E' %Decimal(signal_params[2]*top_axis_factor))+")"
ax.plot(bin_center, gauss_func(bin_center, *signal_params[0:3]), 'g-', label=str_fit)
ax.legend()
if page_title is not None and len(page_title)>0:
self._add_title(page_title,fig)
self._save_plots(fig)
| gpl-2.0 |
meteorcloudy/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_test.py | 41 | 8716 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments = self.make_random_points(
self.true_centers, self.num_points)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
return (points, assignments)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertLess(score1, score2)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments = self.make_random_points(clusters, num_points)
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
PMende/Ecclesia | src/output/shapes.py | 1 | 3455 | from __future__ import absolute_import, division, print_function
from builtins import (
ascii, bytes, chr, dict, filter, hex, input, int, map,
next, oct, open, pow, range, round, str, super, zip)
# Standard library imports
import os
from itertools import cycle
import json
import numpy as np
# Imports for working with shapefiles
from shapely.geometry import (
shape,
mapping
)
from descartes import PolygonPatch
import fiona
from fiona.crs import from_epsg
# matplotlib imports
import matplotlib.pyplot as plt
from matplotlib.colors import (
to_rgb,
to_hex
)
def generate_colors(values, cmap, reference=1.):
'''Generate colors from a matplotlib colormap
Parameters
----------
Values : numpy array
Values to map to RGBA tuples according to
the provided colormap
cmap: matplotlib colormap object
reference: float, default: 1
A reference to use for the values provided
Returns
-------
_colors: list, RGBA tuples
'''
_colors = [cmap(value/reference) for value in values]
return _colors
def plot_shapes(
shapelist, shape_colors, alpha=0.85, fig_file=None,
center_of_mass_arr=None, patch_lw = 1.5,
cutout = None):
'''Function for plotting generated districts
'''
_patches = [
PolygonPatch(shape['shape']) if cutout is None
else PolygonPatch(shape['shape'].intersection(cutout))
for shape in shapelist
]
for patch, color in zip(_patches, cycle(shape_colors)):
patch.set_facecolor(color)
patch.set_linewidth(patch_lw)
patch.set_alpha(alpha)
fig, ax = plt.subplots()
fig.patch.set_alpha(0.0)
for patch in _patches:
ax.add_patch(patch)
if center_of_mass_arr is not None:
ax.plot(center_of_mass_arr[:,0], center_of_mass_arr[:,1])
ax.relim()
ax.autoscale_view()
ax.axis('off')
ymin, ymax = ax.get_ylim()
xmin, xmax = ax.get_xlim()
aspect_ratio = (ymax - ymin)/(xmax - xmin)
x_size = 20
fig.set_size_inches((x_size, x_size*aspect_ratio))
if fig_file:
try:
fig.savefig(fig_file, bbox_inches='tight')
except IOError as e:
raise(e)
return None
def generate_shapefiles(districts, location,
schema, all_schema_values,
epsg_spec):
crs = from_epsg(epsg_spec)
with fiona.open(location, 'w', 'ESRI Shapefile',
schema, crs=crs) as c:
for district, schema_values in zip(districts, all_schema_values):
c.write(schema_values)
def geojson_from_shapefile(source, target, simp_factor):
with fiona.collection(source, "r") as shapefile:
features = [feature for feature in shapefile]
crs = " ".join(
"+{}={}".format(key,value)
for key, value in shapefile.crs.items()
)
for feature in features:
feature['geometry'] = mapping(
shape(feature['geometry']).simplify(simp_factor)
)
my_layer = {
"type": "FeatureCollection",
"features": features,
"crs": {
"type": "link",
"properties": {"href": "kmeans_districts.crs", "type": "proj4"}
}
}
crs_target = os.path.splitext(target)[0]+'.crs'
with open(target, "w") as f:
f.write(unicode(json.dumps(my_layer)))
with open(crs_target, "w") as f:
f.write(unicode(crs))
| gpl-3.0 |
pythonvietnam/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
anhaidgroup/py_stringsimjoin | py_stringsimjoin/join/jaccard_join_py.py | 1 | 10448 | # jaccard join
from joblib import delayed, Parallel
import pandas as pd
from py_stringsimjoin.join.set_sim_join import set_sim_join
from py_stringsimjoin.utils.generic_helper import convert_dataframe_to_array, \
get_attrs_to_project, get_num_processes_to_launch, remove_redundant_attrs, \
split_table
from py_stringsimjoin.utils.missing_value_handler import \
get_pairs_with_missing_value
from py_stringsimjoin.utils.validation import validate_attr, \
validate_attr_type, validate_comp_op_for_sim_measure, validate_key_attr, \
validate_input_table, validate_threshold, validate_tokenizer, \
validate_output_attrs
def jaccard_join_py(ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, threshold, comp_op='>=',
allow_empty=True, allow_missing=False,
l_out_attrs=None, r_out_attrs=None,
l_out_prefix='l_', r_out_prefix='r_',
out_sim_score=True, n_jobs=1, show_progress=True):
"""Join two tables using Jaccard similarity measure.
For two sets X and Y, the Jaccard similarity score between them is given by:
:math:`jaccard(X, Y) = \\frac{|X \\cap Y|}{|X \\cup Y|}`
In the case where both X and Y are empty sets, we define their Jaccard
score to be 1.
Finds tuple pairs from left table and right table such that the Jaccard
similarity between the join attributes satisfies the condition on input
threshold. For example, if the comparison operator is '>=', finds tuple
pairs whose Jaccard similarity between the strings that are the values of
the join attributes is greater than or equal to the input threshold, as
specified in "threshold".
Args:
ltable (DataFrame): left input table.
rtable (DataFrame): right input table.
l_key_attr (string): key attribute in left table.
r_key_attr (string): key attribute in right table.
l_join_attr (string): join attribute in left table.
r_join_attr (string): join attribute in right table.
tokenizer (Tokenizer): tokenizer to be used to tokenize join
attributes.
threshold (float): Jaccard similarity threshold to be satisfied.
comp_op (string): comparison operator. Supported values are '>=', '>'
and '=' (defaults to '>=').
allow_empty (boolean): flag to indicate whether tuple pairs with empty
set of tokens in both the join attributes should be included in the
output (defaults to True).
allow_missing (boolean): flag to indicate whether tuple pairs with
missing value in at least one of the join attributes should be
included in the output (defaults to False). If this flag is set to
True, a tuple in ltable with missing value in the join attribute
will be matched with every tuple in rtable and vice versa.
l_out_attrs (list): list of attribute names from the left table to be
included in the output table (defaults to None).
r_out_attrs (list): list of attribute names from the right table to be
included in the output table (defaults to None).
l_out_prefix (string): prefix to be used for the attribute names coming
from the left table, in the output table (defaults to 'l\_').
r_out_prefix (string): prefix to be used for the attribute names coming
from the right table, in the output table (defaults to 'r\_').
out_sim_score (boolean): flag to indicate whether similarity score
should be included in the output table (defaults to True). Setting
this flag to True will add a column named '_sim_score' in the
output table. This column will contain the similarity scores for the
tuple pairs in the output.
n_jobs (int): number of parallel jobs to use for the computation
(defaults to 1). If -1 is given, all CPUs are used. If 1 is given,
no parallel computing code is used at all, which is useful for
debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used
(where n_cpus is the total number of CPUs in the machine). Thus for
n_jobs = -2, all CPUs but one are used. If (n_cpus + 1 + n_jobs)
becomes less than 1, then no parallel computing code will be used
(i.e., equivalent to the default).
show_progress (boolean): flag to indicate whether task progress should
be displayed to the user (defaults to True).
Returns:
An output table containing tuple pairs that satisfy the join
condition (DataFrame).
"""
# check if the input tables are dataframes
validate_input_table(ltable, 'left table')
validate_input_table(rtable, 'right table')
# check if the key attributes and join attributes exist
validate_attr(l_key_attr, ltable.columns,
'key attribute', 'left table')
validate_attr(r_key_attr, rtable.columns,
'key attribute', 'right table')
validate_attr(l_join_attr, ltable.columns,
'join attribute', 'left table')
validate_attr(r_join_attr, rtable.columns,
'join attribute', 'right table')
# check if the join attributes are not of numeric type
validate_attr_type(l_join_attr, ltable[l_join_attr].dtype,
'join attribute', 'left table')
validate_attr_type(r_join_attr, rtable[r_join_attr].dtype,
'join attribute', 'right table')
# check if the input tokenizer is valid
validate_tokenizer(tokenizer)
# check if the input threshold is valid
validate_threshold(threshold, 'JACCARD')
# check if the comparison operator is valid
validate_comp_op_for_sim_measure(comp_op, 'JACCARD')
# check if the output attributes exist
validate_output_attrs(l_out_attrs, ltable.columns,
r_out_attrs, rtable.columns)
# check if the key attributes are unique and do not contain missing values
validate_key_attr(l_key_attr, ltable, 'left table')
validate_key_attr(r_key_attr, rtable, 'right table')
# set return_set flag of tokenizer to be True, in case it is set to False
revert_tokenizer_return_set_flag = False
if not tokenizer.get_return_set():
tokenizer.set_return_set(True)
revert_tokenizer_return_set_flag = True
# remove redundant attrs from output attrs.
l_out_attrs = remove_redundant_attrs(l_out_attrs, l_key_attr)
r_out_attrs = remove_redundant_attrs(r_out_attrs, r_key_attr)
# get attributes to project.
l_proj_attrs = get_attrs_to_project(l_out_attrs, l_key_attr, l_join_attr)
r_proj_attrs = get_attrs_to_project(r_out_attrs, r_key_attr, r_join_attr)
# Do a projection on the input dataframes to keep only the required
# attributes. Then, remove rows with missing value in join attribute from
# the input dataframes. Then, convert the resulting dataframes into ndarray.
ltable_array = convert_dataframe_to_array(ltable, l_proj_attrs, l_join_attr)
rtable_array = convert_dataframe_to_array(rtable, r_proj_attrs, r_join_attr)
# computes the actual number of jobs to launch.
n_jobs = min(get_num_processes_to_launch(n_jobs), len(rtable_array))
if n_jobs <= 1:
# if n_jobs is 1, do not use any parallel code.
output_table = set_sim_join(ltable_array, rtable_array,
l_proj_attrs, r_proj_attrs,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, 'JACCARD',
threshold, comp_op, allow_empty,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score, show_progress)
else:
# if n_jobs is above 1, split the right table into n_jobs splits and
# join each right table split with the whole of left table in a separate
# process.
r_splits = split_table(rtable_array, n_jobs)
results = Parallel(n_jobs=n_jobs)(delayed(set_sim_join)(
ltable_array, r_splits[job_index],
l_proj_attrs, r_proj_attrs,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, 'JACCARD',
threshold, comp_op, allow_empty,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score,
(show_progress and (job_index==n_jobs-1)))
for job_index in range(n_jobs))
output_table = pd.concat(results)
# If allow_missing flag is set, then compute all pairs with missing value in
# at least one of the join attributes and then add it to the output
# obtained from the join.
if allow_missing:
missing_pairs = get_pairs_with_missing_value(
ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score, show_progress)
output_table = pd.concat([output_table, missing_pairs])
# add an id column named '_id' to the output table.
output_table.insert(0, '_id', range(0, len(output_table)))
# revert the return_set flag of tokenizer, in case it was modified.
if revert_tokenizer_return_set_flag:
tokenizer.set_return_set(False)
return output_table
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/scipy/signal/signaltools.py | 4 | 88095 | # Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import warnings
import threading
from . import sigtools
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import fftpack, linalg
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
prod, product, r_, ravel, real_if_close, reshape,
roots, sort, sum, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'detrend', 'lfilter_zi', 'sosfilt_zi',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm'"
" (or 'symmetric').")
val = boundary << 2
return val
def _check_valid_mode_shapes(shape1, shape2):
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in "
"every dimension for 'valid' mode.")
def correlate(in1, in2, mode='full'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as:
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
if in1.ndim == in2.ndim == 0:
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
# numpy is significantly faster for 1d
if in1.ndim == 1 and in2.ndim == 1:
return np.correlate(in1, in2, mode)
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
# numpy is significantly faster for 1d
if in1.ndim == 1 and in2.ndim == 1 and (in1.size >= in2.size):
return np.correlate(in1, in2, mode)
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward
swapped_inputs = (mode == 'full') and (in2.size > in1.size)
if swapped_inputs:
in1, in2 = in2, in1
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
# Reverse and conjugate to undo the effect of swapping inputs
if swapped_inputs:
slice_obj = [slice(None, None, -1)] * len(z.shape)
z = z[slice_obj].conj()
return z
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _next_regular(target):
"""
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
"""
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target-1)):
return target
match = float('inf') # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
try:
p2 = 2**((quotient - 1).bit_length())
except AttributeError:
# Fallback for Python <2.7
p2 = 2**(len(bin(quotient - 1)) - 2)
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse. (This is at least 100 times
as fast as `convolve`.)
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(face, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(1, 3)
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, complex) or
np.issubdtype(in2.dtype, complex))
shape = s1 + s2 - 1
if mode == "valid":
_check_valid_mode_shapes(s1, s2)
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [_next_regular(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
ret = (np.fft.irfftn(np.fft.rfftn(in1, fshape) *
np.fft.rfftn(in2, fshape), fshape)[fslice].
copy())
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
ret = fftpack.ifftn(fftpack.fftn(in1, fshape) *
fftpack.fftn(in2, fshape))[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def convolve(in1, in2, mode='full'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
# fastpath to faster numpy 1d convolve
if volume.ndim == 1 and kernel.ndim == 1 and volume.size >= kernel.size:
return np.convolve(volume, kernel, mode)
slice_obj = [slice(None, None, -1)] * len(kernel.shape)
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode)
else:
return correlate(volume, kernel[slice_obj], mode)
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `in`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `in`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or array_like, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') / product(mysize, axis=0)
- lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(face, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(1, 3)
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True) - misc.face(gray=True).mean()
>>> template = np.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + np.random.randn(*face.shape) * 50 # add noise
>>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(1, 3)
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a),len(b))-1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = np.asarray(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape, strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[ind] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[ind]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[ind]
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N=len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M=len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi``.
``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, where ``K = max(M,N)``.
See Also
--------
lfilter
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal``.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = ones(N - D + 1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the instantaneous
phase in respect to time. The instantaneous phase corresponds to the phase
angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = np.diff(instantaneous_phase) / (2.0*np.pi) * fs
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal Processing,
Third Edition, 2009. Chapter 12. ISBN 13: 978-1292-02572-8
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fftpack.fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if len(x.shape) > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = fftpack.ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if len(x.shape) > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fftpack.fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = fftpack.ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Parameters
----------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
'max': pick the maximum of those roots.
'min': pick the minimum of those roots.
'avg': take the average of those roots.
See Also
--------
residue, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
expansion H(s) is defined as::
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n])
/ factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
invresz, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1)...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fftpack.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
x = asarray(x)
X = fftpack.fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftpack.fftfreq(Nx))
elif isinstance(window, ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = fftpack.ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
sl = [slice(None)] * len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
y = fftpack.ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
doi: 10.1063/1.3670512
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. doi: 10.1007/s00422-013-0561-7.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
doi: 10.1007/s00422-013-0560-8
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1000
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Compute an initial state `zi` for the sosfilt function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
A forward-backward filter.
This function applies a linear filter twice, once forward and once
backwards. The combined filter has linear phase.
The function provides options for handling the edges of the signal.
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output, an array of type numpy.float64 with the same
shape as `x`.
See Also
--------
lfilter_zi, lfilter
Notes
-----
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# `method` is "pad"...
ntaps = max(len(a), len(b))
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = np.zeros(700)
>>> x[0] = 1.
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos = atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r.' %
(axis, x.shape, n_sections, x_zi_shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
from scipy.signal.filter_design import cheby1
from scipy.signal.fir_filter_design import firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""
Downsample the signal by using a filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor.
n : int, optional
The order of the filter (1 less than the length for 'fir').
ftype : str {'iir', 'fir'}, optional
The type of the lowpass filter.
axis : int, optional
The axis along which to decimate.
Returns
-------
y : ndarray
The down-sampled signal.
See also
--------
resample
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n + 1, 1. / q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8 / q)
y = lfilter(b, a, x, axis=axis)
sl = [slice(None)] * y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
| mit |
JohnGriffiths/dipy | scratch/very_scratch/diffusion_sphere_stats.py | 20 | 18082 | import nibabel
import os
import numpy as np
import dipy as dp
#import dipy.core.generalized_q_sampling as dgqs
import dipy.reconst.gqi as dgqs
import dipy.reconst.dti as ddti
import dipy.reconst.recspeed as rp
import dipy.io.pickles as pkl
import scipy as sp
from matplotlib.mlab import find
#import dipy.core.sphere_plots as splots
import dipy.core.sphere_stats as sphats
import dipy.core.geometry as geometry
import get_vertices as gv
#old SimData files
'''
results_SNR030_1fibre
results_SNR030_1fibre+iso
results_SNR030_2fibres_15deg
results_SNR030_2fibres_30deg
results_SNR030_2fibres_60deg
results_SNR030_2fibres_90deg
results_SNR030_2fibres+iso_15deg
results_SNR030_2fibres+iso_30deg
results_SNR030_2fibres+iso_60deg
results_SNR030_2fibres+iso_90deg
results_SNR030_isotropic
'''
#fname='/home/ian/Data/SimData/results_SNR030_1fibre'
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 1000 * 100 is the number of all rows.
The 100 conditions are given by 10 polar angles (in degrees) 0, 20, 40, 60, 80,
80, 60, 40, 20 and 0, and each of these with longitude angle 0, 40, 80,
120, 160, 200, 240, 280, 320, 360.
'''
#new complete SimVoxels files
simdata = ['fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00']
simdir = '/home/ian/Data/SimVoxels/'
def gq_tn_calc_save():
for simfile in simdata:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
gq = dgqs.GeneralizedQSampling(sim_data,bvals,gradients)
gqfile = simdir+'gq/'+dataname+'.pkl'
pkl.save_pickle(gqfile,gq)
'''
gq.IN gq.__doc__ gq.glob_norm_param
gq.QA gq.__init__ gq.odf
gq.__class__ gq.__module__ gq.q2odf_params
'''
tn = ddti.Tensor(sim_data,bvals,gradients)
tnfile = simdir+'tn/'+dataname+'.pkl'
pkl.save_pickle(tnfile,tn)
'''
tn.ADC tn.__init__ tn._getevals
tn.B tn.__module__ tn._getevecs
tn.D tn.__new__ tn._getndim
tn.FA tn.__reduce__ tn._getshape
tn.IN tn.__reduce_ex__ tn._setevals
tn.MD tn.__repr__ tn._setevecs
tn.__class__ tn.__setattr__ tn.adc
tn.__delattr__ tn.__sizeof__ tn.evals
tn.__dict__ tn.__str__ tn.evecs
tn.__doc__ tn.__subclasshook__ tn.fa
tn.__format__ tn.__weakref__ tn.md
tn.__getattribute__ tn._evals tn.ndim
tn.__getitem__ tn._evecs tn.shape
tn.__hash__ tn._getD
'''
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 100 * 1000 is the number of all rows.
At the moment this module is hardwired to the use of the EDS362
spherical mesh. I am assumung (needs testing) that directions 181 to 361
are the antipodal partners of directions 0 to 180. So when counting the
number of different vertices that occur as maximal directions we wll map
the indices modulo 181.
'''
def analyze_maxima(indices, max_dirs, subsets):
'''This calculates the eigenstats for each of the replicated batches
of the simulation data
'''
results = []
for direction in subsets:
batch = max_dirs[direction,:,:]
index_variety = np.array([len(set(np.remainder(indices[direction,:],181)))])
#normed_centroid, polar_centroid, centre, b1 = sphats.eigenstats(batch)
centre, b1 = sphats.eigenstats(batch)
# make azimuth be in range (0,360) rather than (-180,180)
centre[1] += 360*(centre[1] < 0)
#results.append(np.concatenate((normed_centroid, polar_centroid, centre, b1, index_variety)))
results.append(np.concatenate((centre, b1, index_variety)))
return results
#dt_first_directions = tn.evecs[:,:,0].reshape((100,1000,3))
# these are the principal directions for the full set of simulations
#gq_tn_calc_save()
#eds=np.load(os.path.join(os.path.dirname(dp.__file__),'core','matrices','evenly_distributed_sphere_362.npz'))
from dipy.data import get_sphere
odf_vertices,odf_faces=get_sphere('symmetric362')
#odf_vertices=eds['vertices']
def run_comparisons(sample_data=35):
for simfile in [simdata[sample_data]]:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
gqfile = simdir+'gq/'+dataname+'.pkl'
gq = pkl.load_pickle(gqfile)
tnfile = simdir+'tn/'+dataname+'.pkl'
tn = pkl.load_pickle(tnfile)
dt_first_directions_in=odf_vertices[tn.IN]
dt_indices = tn.IN.reshape((100,1000))
dt_results = analyze_maxima(dt_indices, dt_first_directions_in.reshape((100,1000,3)),range(10,90))
gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(10,90))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
np.set_printoptions(precision=3, suppress=True, linewidth=200, threshold=5000)
out = open('/home/ian/Data/SimVoxels/Out/'+'***_'+dataname,'w')
#print np.vstack(dt_results).shape, np.vstack(gq_results).shape
results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
#print results.shape
#results = np.vstack(dt_results)
print >> out, results[:,:]
out.close()
#up = dt_batch[:,2]>= 0
#splots.plot_sphere(dt_batch[up], 'batch '+str(direction))
#splots.plot_lambert(dt_batch[up],'batch '+str(direction), centre)
#spread = gq.q2odf_params e,v = np.linalg.eigh(np.dot(spread,spread.transpose())) effective_dimension = len(find(np.cumsum(e) > 0.05*np.sum(e))) #95%
#rotated = np.dot(dt_batch,evecs)
#rot_evals, rot_evecs = np.linalg.eig(np.dot(rotated.T,rotated)/rotated.shape[0])
#eval_order = np.argsort(rot_evals)
#rotated = rotated[:,eval_order]
#up = rotated[:,2]>= 0
#splot.plot_sphere(rotated[up],'first1000')
#splot.plot_lambert(rotated[up],'batch '+str(direction))
def run_gq_sims(sample_data=[35,23,46,39,40,10,37,27,21,20]):
results = []
out = open('/home/ian/Data/SimVoxels/Out/'+'npa+fa','w')
for j in range(len(sample_data)):
sample = sample_data[j]
simfile = simdata[sample]
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
for j in np.vstack((np.arange(100)*1000,np.arange(100)*1000+1)).T.ravel():
# 0,1,1000,1001,2000,2001,...
s = sim_data[j,:]
gqs = dp.GeneralizedQSampling(s.reshape((1,102)),bvals,gradients,Lambda=3.5)
tn = dp.Tensor(s.reshape((1,102)),bvals,gradients,fit_method='LS')
t0, t1, t2, npa = gqs.npa(s, width = 5)
print >> out, dataname, j, npa, tn.fa()[0]
'''
for (i,o) in enumerate(gqs.odf(s)):
print i,o
for (i,o) in enumerate(gqs.odf_vertices):
print i,o
'''
#o = gqs.odf(s)
#v = gqs.odf_vertices
#pole = v[t0[0]]
#eqv = dgqs.equatorial_zone_vertices(v, pole, 5)
#print 'Number of equatorial vertices: ', len(eqv)
#print np.max(o[eqv]),np.min(o[eqv])
#cos_e_pole = [np.dot(pole.T, v[i]) for i in eqv]
#print np.min(cos1), np.max(cos1)
#print 'equatorial max in equatorial vertices:', t1[0] in eqv
#x = np.cross(v[t0[0]],v[t1[0]])
#x = x/np.sqrt(np.sum(x**2))
#print x
#ptchv = dgqs.patch_vertices(v, x, 5)
#print len(ptchv)
#eqp = eqv[np.argmin([np.abs(np.dot(v[t1[0]].T,v[p])) for p in eqv])]
#print (eqp, o[eqp])
#print t2[0] in ptchv, t2[0] in eqv
#print np.dot(pole.T, v[t1[0]]), np.dot(pole.T, v[t2[0]])
#print ptchv[np.argmin([o[v] for v in ptchv])]
#gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
#gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
#gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(100))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
#np.set_printoptions(precision=6, suppress=True, linewidth=200, threshold=5000)
#out = open('/home/ian/Data/SimVoxels/Out/'+'+++_'+dataname,'w')
#results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
#results = np.vstack(dt_results)
#print >> out, results[:,:]
out.close()
def run_small_data():
#smalldir = '/home/ian/Devel/dipy/dipy/data/'
smalldir = '/home/eg309/Devel/dipy/dipy/data/'
# from os.path import join as opj
# bvals=np.load(opj(os.path.dirname(__file__), \
# 'data','small_64D.bvals.npy'))
bvals=np.load(smalldir+'small_64D.bvals.npy')
# gradients=np.load(opj(os.path.dirname(__file__), \
# 'data','small_64D.gradients.npy'))
gradients=np.load(smalldir+'small_64D.gradients.npy')
# img =ni.load(os.path.join(os.path.dirname(__file__),\
# 'data','small_64D.nii'))
img=nibabel.load(smalldir+'small_64D.nii')
small_data=img.get_data()
print 'real_data', small_data.shape
gqsmall = dgqs.GeneralizedQSampling(small_data,bvals,gradients)
tnsmall = ddti.Tensor(small_data,bvals,gradients)
x,y,z,a,b=tnsmall.evecs.shape
evecs=tnsmall.evecs
xyz=x*y*z
evecs = evecs.reshape(xyz,3,3)
#vs = np.sign(evecs[:,2,:])
#print vs.shape
#print np.hstack((vs,vs,vs)).reshape(1000,3,3).shape
#evecs = np.hstack((vs,vs,vs)).reshape(1000,3,3)
#print evecs.shape
evals=tnsmall.evals
evals = evals.reshape(xyz,3)
#print evals.shape
#print('GQS in %d' %(t2-t1))
'''
eds=np.load(opj(os.path.dirname(__file__),\
'..','matrices',\
'evenly_distributed_sphere_362.npz'))
'''
from dipy.data import get_sphere
odf_vertices,odf_faces=get_sphere('symmetric362')
#odf_vertices=eds['vertices']
#odf_faces=eds['faces']
#Yeh et.al, IEEE TMI, 2010
#calculate the odf using GQI
scaling=np.sqrt(bvals*0.01506) # 0.01506 = 6*D where D is the free
#water diffusion coefficient
#l_values sqrt(6 D tau) D free water
#diffusion coefficiet and tau included in the b-value
tmp=np.tile(scaling,(3,1))
b_vector=gradients.T*tmp
Lambda = 1.2 # smoothing parameter - diffusion sampling length
q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi)
#implements equation no. 9 from Yeh et.al.
S=small_data.copy()
x,y,z,g=S.shape
S=S.reshape(x*y*z,g)
QA = np.zeros((x*y*z,5))
IN = np.zeros((x*y*z,5))
FA = tnsmall.fa().reshape(x*y*z)
fwd = 0
#Calculate Quantitative Anisotropy and find the peaks and the indices
#for every voxel
summary = {}
summary['vertices'] = odf_vertices
v = odf_vertices.shape[0]
summary['faces'] = odf_faces
f = odf_faces.shape[0]
for (i,s) in enumerate(S):
#print 'Volume %d' % i
istr = str(i)
summary[istr] = {}
t0, t1, t2, npa = gqsmall.npa(s, width = 5)
summary[istr]['triple']=(t0,t1,t2)
summary[istr]['npa']=npa
odf = Q2odf(s,q2odf_params)
peaks,inds=rp.peak_finding(odf,odf_faces)
fwd=max(np.max(odf),fwd)
#peaks = peaks - np.min(odf)
n_peaks=min(len(peaks),5)
peak_heights = [odf[i] for i in inds[:n_peaks]]
#QA[i][:l] = peaks[:n_peaks]
IN[i][:n_peaks] = inds[:n_peaks]
summary[istr]['odf'] = odf
summary[istr]['peaks'] = peaks
summary[istr]['inds'] = inds
summary[istr]['evecs'] = evecs[i,:,:]
summary[istr]['evals'] = evals[i,:]
summary[istr]['n_peaks'] = n_peaks
summary[istr]['peak_heights'] = peak_heights
# summary[istr]['fa'] = tnsmall.fa()[0]
summary[istr]['fa'] = FA[i]
'''
QA/=fwd
QA=QA.reshape(x,y,z,5)
IN=IN.reshape(x,y,z,5)
'''
peaks_1 = [i for i in range(1000) if summary[str(i)]['n_peaks']==1]
peaks_2 = [i for i in range(1000) if summary[str(i)]['n_peaks']==2]
peaks_3 = [i for i in range(1000) if summary[str(i)]['n_peaks']==3]
#peaks_2 = [i for i in range(1000) if len(summary[str(i)]['inds'])==2]
#peaks_3 = [i for i in range(1000) if len(summary[str(i)]['inds'])==3]
print '#voxels with 1, 2, 3 peaks', len(peaks_1),len(peaks_2),len(peaks_3)
return FA, summary
def Q2odf(s,q2odf_params):
''' construct odf for a voxel '''
odf=np.dot(s,q2odf_params)
return odf
#run_comparisons()
#run_gq_sims()
FA, summary = run_small_data()
peaks_1 = [i for i in range(1000) if summary[str(i)]['n_peaks']==1]
peaks_2 = [i for i in range(1000) if summary[str(i)]['n_peaks']==2]
peaks_3 = [i for i in range(1000) if summary[str(i)]['n_peaks']==3]
fa_npa_1 = [[summary[str(i)]['fa'], summary[str(i)]['npa'], summary[str(i)]['peak_heights']] for i in peaks_1]
fa_npa_2 = [[summary[str(i)]['fa'], summary[str(i)]['npa'], summary[str(i)]['peak_heights']] for i in peaks_2]
fa_npa_3 = [[summary[str(i)]['fa'], summary[str(i)]['npa'], summary[str(i)]['peak_heights']] for i in peaks_3]
| bsd-3-clause |
bosszhou/ThinkStats2 | code/chap12soln.py | 68 | 4459 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import thinkplot
import thinkstats2
import regression
import timeseries
def RunQuadraticModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
daily['years2'] = daily.years**2
model = smf.ols('ppg ~ years + years2', data=daily)
results = model.fit()
return model, results
def PlotQuadraticModel(daily, name):
"""
"""
model, results = RunQuadraticModel(daily)
regression.SummarizeResults(results)
timeseries.PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries11',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)')
timeseries.PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries12',
title='residuals',
xlabel='years',
ylabel='price per gram ($)')
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
timeseries.PlotPredictions(daily, years, func=RunQuadraticModel)
thinkplot.Save(root='timeseries13',
title='predictions',
xlabel='years',
xlim=[years[0]-0.1, years[-1]+0.1],
ylabel='price per gram ($)')
def PlotEwmaPredictions(daily, name):
"""
"""
# use EWMA to estimate slopes
filled = timeseries.FillMissing(daily)
filled['slope'] = pandas.ewma(filled.ppg.diff(), span=180)
filled[-1:]
# extract the last inter and slope
start = filled.index[-1]
inter = filled.ewma[-1]
slope = filled.slope[-1]
# reindex the DataFrame, adding a year to the end
dates = pandas.date_range(filled.index.min(),
filled.index.max() + np.timedelta64(365, 'D'))
predicted = filled.reindex(dates)
# generate predicted values and add them to the end
predicted['date'] = predicted.index
one_day = np.timedelta64(1, 'D')
predicted['days'] = (predicted.date - start) / one_day
predict = inter + slope * predicted.days
predicted.ewma.fillna(predict, inplace=True)
# plot the actual values and predictions
thinkplot.Scatter(daily.ppg, alpha=0.1, label=name)
thinkplot.Plot(predicted.ewma)
thinkplot.Save()
class SerialCorrelationTest(thinkstats2.HypothesisTest):
"""Tests serial correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
series, lag = data
test_stat = abs(thinkstats2.SerialCorr(series, lag))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
series, lag = self.data
permutation = series.reindex(np.random.permutation(series.index))
return permutation, lag
def TestSerialCorr(daily):
"""Tests serial correlations in daily prices and their residuals.
daily: DataFrame of daily prices
"""
# test the correlation between consecutive prices
series = daily.ppg
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the linear model
_, results = timeseries.RunLinearModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the quadratic model
_, results = RunQuadraticModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
def main(name):
transactions = timeseries.ReadData()
dailies = timeseries.GroupByQualityAndDay(transactions)
name = 'high'
daily = dailies[name]
PlotQuadraticModel(daily, name)
TestSerialCorr(daily)
PlotEwmaPredictions(daily, name)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/util/clipboard.py | 16 | 6355 | # Pyperclip v1.3
# A cross-platform clipboard module for Python. (only handles plain text for now)
# By Al Sweigart [email protected]
# Usage:
# import pyperclip
# pyperclip.copy('The text to be copied to the clipboard.')
# spam = pyperclip.paste()
# On Mac, this module makes use of the pbcopy and pbpaste commands, which should come with the os.
# On Linux, this module makes use of the xclip command, which should come with the os. Otherwise run "sudo apt-get install xclip"
# Copyright (c) 2010, Albert Sweigart
# All rights reserved.
#
# BSD-style license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the pyperclip nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Albert Sweigart "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Albert Sweigart BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Change Log:
# 1.2 Use the platform module to help determine OS.
# 1.3 Changed ctypes.windll.user32.OpenClipboard(None) to ctypes.windll.user32.OpenClipboard(0), after some people ran into some TypeError
import platform, os
class NoClipboardProgramError(OSError):
pass
def winGetClipboard():
ctypes.windll.user32.OpenClipboard(0)
pcontents = ctypes.windll.user32.GetClipboardData(1) # 1 is CF_TEXT
data = ctypes.c_char_p(pcontents).value
#ctypes.windll.kernel32.GlobalUnlock(pcontents)
ctypes.windll.user32.CloseClipboard()
return data
def winSetClipboard(text):
GMEM_DDESHARE = 0x2000
ctypes.windll.user32.OpenClipboard(0)
ctypes.windll.user32.EmptyClipboard()
try:
# works on Python 2 (bytes() only takes one argument)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text))+1)
except TypeError:
# works on Python 3 (bytes() requires an encoding)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text, 'ascii'))+1)
pchData = ctypes.windll.kernel32.GlobalLock(hCd)
try:
# works on Python 2 (bytes() only takes one argument)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text))
except TypeError:
# works on Python 3 (bytes() requires an encoding)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text, 'ascii'))
ctypes.windll.kernel32.GlobalUnlock(hCd)
ctypes.windll.user32.SetClipboardData(1,hCd)
ctypes.windll.user32.CloseClipboard()
def macSetClipboard(text):
outf = os.popen('pbcopy', 'w')
outf.write(text)
outf.close()
def macGetClipboard():
outf = os.popen('pbpaste', 'r')
content = outf.read()
outf.close()
return content
def gtkGetClipboard():
return gtk.Clipboard().wait_for_text()
def gtkSetClipboard(text):
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def qtGetClipboard():
return str(cb.text())
def qtSetClipboard(text):
cb.setText(text)
def xclipSetClipboard(text):
outf = os.popen('xclip -selection c', 'w')
outf.write(text)
outf.close()
def xclipGetClipboard():
outf = os.popen('xclip -selection c -o', 'r')
content = outf.read()
outf.close()
return content
def xselSetClipboard(text):
outf = os.popen('xsel -i', 'w')
outf.write(text)
outf.close()
def xselGetClipboard():
outf = os.popen('xsel -o', 'r')
content = outf.read()
outf.close()
return content
if os.name == 'nt' or platform.system() == 'Windows':
import ctypes
getcb = winGetClipboard
setcb = winSetClipboard
elif os.name == 'mac' or platform.system() == 'Darwin':
getcb = macGetClipboard
setcb = macSetClipboard
elif os.name == 'posix' or platform.system() == 'Linux':
xclipExists = os.system('which xclip > /dev/null') == 0
if xclipExists:
getcb = xclipGetClipboard
setcb = xclipSetClipboard
else:
xselExists = os.system('which xsel > /dev/null') == 0
if xselExists:
getcb = xselGetClipboard
setcb = xselSetClipboard
else:
try:
import gtk
except ImportError:
try:
import PyQt4 as qt4
import PyQt4.QtCore
import PyQt4.QtGui
except ImportError:
try:
import PySide as qt4
import PySide.QtCore
import PySide.QtGui
except ImportError:
raise NoClipboardProgramError('Pyperclip requires the'
' gtk, PyQt4, or PySide'
' module installed, or '
'either the xclip or '
'xsel command.')
app = qt4.QtGui.QApplication([])
cb = qt4.QtGui.QApplication.clipboard()
getcb = qtGetClipboard
setcb = qtSetClipboard
else:
getcb = gtkGetClipboard
setcb = gtkSetClipboard
copy = setcb
paste = getcb
## pandas aliases
clipboard_get = paste
clipboard_set = copy
| mit |
WangWenjun559/Weiss | summary/sumy/sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| apache-2.0 |
kjung/scikit-learn | sklearn/datasets/lfw.py | 31 | 19544 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
speed-of-light/pyslider | lib/texer/nsf_roc_tab.py | 1 | 1464 | import pandas as pd
class NsfRocTab(object):
def __init__(self):
pass
def __extreme(self, data, key):
di = data[key].argmax()
return data.ix[di]
def __bold_max(self, dseries, x):
if dseries.max() - x < 0.000001:
bs = "BL{:.3f}BR".format(x)
tc = "STextcolorBLemphasisBR{}".format(bs)
it = "STextitBL{}BR".format(tc)
else:
it = "{:.3f}".format(x)
return it
def __tex_post(self, txt):
txt = txt.replace("ST", "\\t")
txt = txt.replace("BL", "{")
txt = txt.replace("BR", "}")
return txt
def __tex_roc_table(self, data):
res = ["name", "key", "sensitivity", "precision", "accuracy"]
fmts = dict(
header=lambda x: x[:3],
accuracy=lambda x: self.__bold_max(data["accuracy"], x),
precision=lambda x: self.__bold_max(data["precision"], x),
sensitivity=lambda x: self.__bold_max(data["sensitivity"], x))
ffmt = "{:3,.3f}".format
st = data.to_latex(columns=res, index=0, formatters=fmts,
float_format=ffmt)
return self.__tex_post(st)
def tabular(self, data):
df = pd.DataFrame()
df = df.append(self.__extreme(data, "sensitivity"))
df = df.append(self.__extreme(data, "precision"))
df = df.append(self.__extreme(data, "accuracy"))
return self.__tex_roc_table(df)
| agpl-3.0 |
mdegis/machine-learning | 001 - Naive Bayes Classifier/exercise/main.py | 1 | 1570 | #!/usr/bin/python
""" The objective of this exercise is to recreate the decision
boundary found in the lesson video, and make a plot that
visually shows the decision boundary """
import sys
sys.path.append("../../tools")
from prep_terrain_data import makeTerrainData
from sklearn.metrics import accuracy_score
from class_vis import prettyPicture, output_image
from classify_NB import classify, NB_accuracy
import numpy as np
import pylab as pl
from PIL import Image
features_train, labels_train, features_test, labels_test = makeTerrainData()
# the training data (features_train, labels_train) have both "fast" and "slow" points mixed
# in together--separate them so we can give them different colors in the scatterplot,
# and visually identify them
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
clf = classify(features_train, labels_train)
# draw the decision boundary with the text points overlaid
prettyPicture(clf, features_test, labels_test, f_name="bayes.png")
Image.open('bayes.png').show()
# JSON object to read data:
# output_image("test.png", "png", open("test.png", "rb").read())
pred = clf.predict(features_test)
print "Naive Bayes accuracy: %r" % accuracy_score(labels_test, pred)
| gpl-3.0 |
beyondvalence/biof509_wtl | Wk02/genetic_algorithm.py | 1 | 4420 | """Module to find shortest path connecting series of points
genetic_algorithm_optimizer accepts a set of coordinates,
cost function, new path function, population size, and
number of generations to return the optimized path, optimized distance,
and the other paths and distances.
20160218 Wayne Liu
"""
import itertools
import math
import matplotlib.pyplot as plt
import numpy as np
import random
%matplotlib inline
print("Numpy:", np.__version__)
random.seed(0)
def distance(coords):
distance = 0
for p1, p2 in zip(coords[:-1], coords[1:]):
distance += ((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2) ** 0.5
return distance
def select_best(population, cost_func, num_to_keep):
"""Selects best specified population based on the cost function
Arguments:
population -- List of shuffled coordinates
cost_func -- Function deriving optimized metric
num_to_keep -- Number of best population to keep
Returns:
List of best optimized specified number of coordinates
"""
scored_population = [(i, cost_func(i)) for i in population]
scored_population.sort(key=lambda x: x[1])
return [i[0] for i in scored_population[:num_to_keep]]
def new_path(existing_path):
"""Reorders list of coordinates
Arguments:
existing_path -- List of coordinates, e.g. [(0,0), (1,1)]
Returns:
path -- List of reordered coordinates, e.g. [(0,0), (1,1)]
"""
path = existing_path[:]
# switches three points instead of two, marginally better
point = random.randint(0, len(path)-3)
path[point+2], path[point+1], path[point] = path[point], path[point+2], path[point+1]
# print(point)
return path
def recombine(population):
"""Recombines random two halves of two random sets of coordinates
Argument:
population -- List of coordinates, e.g. [(0,0), (1,1)]
Returns:
child -- A set of coordinates, recombined from two random sets of coordinates, e.g. [(9,9), (2,3)]
"""
# Randomly choose two parents
options = list(range(len(population)))
random.shuffle(options)
partner1 = options[0]
partner2 = options[1]
# Choose a split point, take the first parents order to that split point,
# then the second parents order for all remaining points
split_point = random.randint(0, len(population[0])-1)
child = population[partner1][:split_point]
for point in population[partner2]:
if point not in child:
child.append(point)
return child
def genetic_algorithm_optimizer(starting_path, cost_func, new_path_func, pop_size, generations):
"""Selects best path from set of coordinates by randomly joining two sets of coordinates
Arguments:
starting_path -- List of coordinates, e.g. [(0,0), (1,1)]
cost_func -- Optimization metric calculator, e.g. distance()
new_path_func -- Returns reordered coordinates, e.g. new_path()
pop_size -- Number of each set of coordinates in each generation, 500
generations -- Number of iterations, 100
Returns:
population -- A list of optimized coordinates, e.g. [(2,3), (5,6)]
cost_func -- The optimized least distance
history -- Dictionary of generation and distance metrics
"""
# Create a starting population by randomly shuffling the points
population = []
for i in range(pop_size):
new_path = starting_path[:]
random.shuffle(new_path)
population.append(new_path)
history = []
# Take the top 25% of routes and recombine to create new routes, repeating for generations
for i in range(generations):
pop_best = select_best(population, cost_func, int(pop_size / 4))
new_population = []
for i in range(pop_size):
new_population.append(new_path_func(recombine(pop_best))) # use new path to scramble the order
population = new_population
record = {'generation':i, 'current_cost':cost_func(population[0]),}
history.append(record)
return (population[0], cost_func(population[0]), history)
coords = [(0,0), (10,5), (10,10), (5,10), (3,3), (3,7), (12,3), (10,11)]
best_path, best_cost, history = genetic_algorithm_optimizer(coords, distance, new_path, 500, 100)
print(best_cost)
plt.plot([i['current_cost'] for i in history])
plt.show()
plt.plot([i[0] for i in best_path], [i[1] for i in best_path])
plt.show() | mit |
bospetersen/h2o-3 | h2o-py/tests/testdir_misc/pyunit_frame_as_list.py | 1 | 1056 | import sys
sys.path.insert(1, "../../")
import h2o, tests
def frame_as_list(ip,port):
iris = h2o.import_file(path=h2o.locate("smalldata/iris/iris_wheader.csv"))
prostate = h2o.import_file(path=h2o.locate("smalldata/prostate/prostate.csv.zip"))
airlines = h2o.import_file(path=h2o.locate("smalldata/airlines/allyears2k.zip"))
res1 = h2o.as_list(iris, use_pandas=False)
assert abs(float(res1[9][0]) - 4.4) < 1e-10 and abs(float(res1[9][1]) - 2.9) < 1e-10 and \
abs(float(res1[9][2]) - 1.4) < 1e-10, "incorrect values"
res2 = h2o.as_list(prostate, use_pandas=False)
assert abs(float(res2[7][0]) - 7) < 1e-10 and abs(float(res2[7][1]) - 0) < 1e-10 and \
abs(float(res2[7][2]) - 68) < 1e-10, "incorrect values"
res3 = h2o.as_list(airlines, use_pandas=False)
assert abs(float(res3[4][0]) - 1987) < 1e-10 and abs(float(res3[4][1]) - 10) < 1e-10 and \
abs(float(res3[4][2]) - 18) < 1e-10, "incorrect values"
if __name__ == "__main__":
tests.run_test(sys.argv, frame_as_list)
| apache-2.0 |
kazemakase/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
dingocuster/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
drewlinsley/draw_classify | draw/datasets/package_sketch_images.py | 1 | 5086 | #Import libraries for doing image analysis
from skimage.io import imread
from skimage.transform import resize
from sklearn.ensemble import RandomForestClassifier as RF
import glob
import os
from sklearn import cross_validation
from sklearn.cross_validation import StratifiedKFold as KFold
from sklearn.metrics import classification_report
from matplotlib import pyplot as plt
from matplotlib import colors
from pylab import cm
from skimage import segmentation
from skimage.morphology import watershed
from skimage import measure
from skimage import morphology
import numpy as np
import pandas as pd
from scipy import ndimage
from skimage.feature import peak_local_max
import multiprocessing as mp
import theano
from fuel.datasets import IterableDataset, IndexableDataset
import commands
import re
def process(fname):
image = imread(fname, as_grey=True)
imagethr = np.where(image > np.mean(image),0.,1.0)
return imagethr.ravel().astype(np.int64)
def assign_datastream(X,y):
n_labels = np.unique(y).shape[0]
y = np.eye(n_labels)[y]
# Reassign dataset
dataset = IndexableDataset({'features': X.astype(np.float64),'targets': y.astype(np.uint8)},sources=('features','targets')) #may ask to cast X as float32
#dataset = IndexableDataset({'features': X.astype(np.float32),'targets': y.astype(np.int32)},sources=('features','targets')) #may ask to cast X as float32
return dataset
def import_sketch(data_dir):
# make graphics inline
#get_ipython().magic(u'matplotlib inline')
find_string = u'find ' + data_dir + ' -name "*.jpg"'
file_string = commands.getoutput(find_string)
files = re.split('\n',file_string)
#files = get_ipython().getoutput(u'find ' + data_dir + ' -name "*.jpg"')
#len(files)
#outpath = '/Users/drewlinsley/Documents/draw/draw/datasets'
#datasource = 'sketch_uint8_shuffle'
#plt.figure(figsize=(12,3))
#image = imread(files[0], as_grey=True)
#imagethr = np.where(image > np.mean(image),0.,1.0)
#plt.subplot(1,3,1)
#plt.imshow(imagethr, cmap=cm.gray);
#imdilated = morphology.dilation(imagethr, np.ones((16,16)))
#plt.subplot(1,3,2)
#plt.imshow(imdilated, cmap=cm.gray);
#im1 = resize(imdilated,[56,56])
#plt.subplot(1,3,3)
#plt.imshow(im1, cmap=cm.gray);
#plt.show()
NUM_PROCESSES = 8
pool = mp.Pool(NUM_PROCESSES)
results = pool.map(process, files, chunksize=100)
pool.close()
pool.join()
y = np.array(map(lambda f: f.split('_')[-2], files))
y = y.reshape(-1,1)
y = y.astype(np.int64)
#y.reshape(-1,1)
X = np.array(results)
N, image_size = X.shape
D = int(np.sqrt(image_size))
N, image_size, D
num_els = y.shape[0]
test_size = int(num_els * (.1/2)) #/2 because +/- types
pos_test_id = np.asarray(range(0,test_size))
neg_test_id = np.asarray(range(num_els - test_size,num_els))
train_id = np.asarray(range(test_size, num_els - test_size))
test_y = y[np.hstack((pos_test_id,neg_test_id))]
test_X = X[np.hstack((pos_test_id,neg_test_id))]
N_test = test_y.shape[0]
np.sum(test_y)
train_y = y[train_id]
train_X = X[train_id]
N_train = train_y.shape[0]
np.sum(train_y)
import random
test_s = random.sample(xrange(test_y.shape[0]),test_y.shape[0])
train_s = random.sample(xrange(train_y.shape[0]),train_y.shape[0])
test_X=test_X[test_s]
train_X=train_X[train_s]
test_y=test_y[test_s]
train_y=train_y[train_s]
train_y.dtype
return test_X, train_X, test_y, train_y
#import fuel
#datasource_dir = os.path.join(outpath, datasource)
#get_ipython().system(u'mkdir -p {datasource_dir}')
#datasource_fname = os.path.join(datasource_dir , datasource+'.hdf5')
#datasource_fname
# In[132]:
#import h5py
#fp = h5py.File(datasource_fname, mode='w')
#image_features = fp.create_dataset('features', (N, image_size), dtype='uint8')
# In[133]:
# image_features[...] = np.vstack((train_X,test_X))
# # In[134]:
# targets = fp.create_dataset('targets', (N, 1), dtype='uint8')
# # In[135]:
# targets[...] = np.vstack((train_y,test_y)).reshape(-1,1)
# # In[136]:
# from fuel.datasets.hdf5 import H5PYDataset
# split_dict = {
# 'train': {'features': (0, N_train), 'targets': (0, N_train)},
# 'test': {'features': (N_train, N), 'targets': (N_train, N)}
# }
# fp.attrs['split'] = H5PYDataset.create_split_array(split_dict)
# # In[137]:
# fp.flush()
# fp.close()
# # In[138]:
# get_ipython().system(u'ls -l {datasource_fname}')
# # In[139]:
# #!aws s3 cp {datasource_fname} s3://udidraw/ --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers
# # #Look at training
# # In[140]:
# train_set = H5PYDataset(datasource_fname, which_sets=('train',))
# # In[141]:
# train_set.num_examples
# # In[142]:
# train_set.provides_sources
# # In[143]:
# handle = train_set.open()
# data = train_set.get_data(handle, slice(0, 16))
# data[0].shape,data[1].shape
# # In[144]:
# data[1]
# # In[145]:
# plt.figure(figsize=(12,12))
# for i in range(16):
# plt.subplot(4,4,i+1)
# plt.imshow(data[0][i].reshape(D,D), cmap=cm.gray)
# plt.title(data[1][i][0]);
# # In[146]:
# train_set.close(handle)
| mit |
mcdeaton13/dynamic | Data/Calibration/Firm Calibration Python/parameters/depreciation/depreciation_calibration.py | 2 | 2016 | """
Depreciation Rate Calibration (depreciation_calibration.py):
-------------------------------------------------------------------------------
Last updated: 6/26/2015.
This module calibrates the firm economic and tax depreciation parameters.
"""
# Packages:
import os.path
import sys
import numpy as np
import pandas as pd
# Directories:
_CUR_DIR = os.path.dirname(__file__)
_DATA_DIR = os.path.join(_CUR_DIR, "data")
_PROC_DIR = os.path.join(_CUR_DIR, "processing")
_OUT_DIR = os.path.join(_CUR_DIR, "output")
# Importing custom modules:
import naics_processing as naics
import constants as cst
# Importing depreciation helper custom modules:
sys.path.append(_PROC_DIR)
import calc_rates as calc_rates
import read_bea as read_bea
import read_inventories as read_inv
import read_land as read_land
# Dataframe names:
_CODE_DF_NM = cst.CODE_DF_NM
# Dataframe column names:
_CORP_TAX_SECTORS_NMS_DICT = cst.CORP_TAX_SECTORS_NMS_DICT
_CORP_NMS = _CORP_TAX_SECTORS_NMS_DICT.values()
_NON_CORP_TAX_SECTORS_NMS_DICT = cst.NON_CORP_TAX_SECTORS_NMS_DICT
_NCORP_NMS = _NON_CORP_TAX_SECTORS_NMS_DICT.values()
def init_depr_rates(asset_tree=naics.generate_tree(), get_econ=False,
get_tax_est=False, get_tax_150=False,
get_tax_200=False, get_tax_sl=False,
get_tax_ads=False, soi_from_out=False,
output_data=False):
""" This fun
"""
# Calculating the fixed asset data:
fixed_asset_tree = read_bea.read_bea(asset_tree)
# Calculating the inventory data:
inv_tree = read_inv.read_inventories(asset_tree)
# Calculating the land data:
land_tree = read_land.read_land(asset_tree)
# Calculating the depreciation rates:
econ_depr_tree = calc_rates.calc_depr_rates(fixed_asset_tree, inv_tree, land_tree)
tax_depr_tree = calc_rates.calc_tax_depr_rates(fixed_asset_tree, inv_tree, land_tree)
#naics.pop_rates(tax_depr_tree)
return {"Econ": econ_depr_tree, "Tax": tax_depr_tree}
| mit |
crisojog/vqa_research | preprocess.py | 1 | 21353 | import argparse
import cPickle as pickle
import os
from operator import itemgetter
import matplotlib.pyplot as plt
import numpy as np
import spacy
import json
from keras.applications.inception_v3 import InceptionV3
from keras.applications.xception import Xception
from keras.applications.resnet50 import ResNet50
from resnet_152 import ResNet152
from keras.applications.vgg19 import VGG19
from keras.models import Model
from keras.preprocessing import image
from keras.applications import imagenet_utils
from keras.applications.inception_v3 import preprocess_input
from tqdm import tqdm
from VQA.PythonHelperTools.vqaTools.vqa import VQA
def get_img_model(img_model_type):
if img_model_type == "vgg19":
print ("Loading VGG19 model")
base_model = VGG19(weights='imagenet', include_top=True)
return Model(inputs=base_model.input, outputs=base_model.get_layer('fc2').output)
elif img_model_type == "vgg19_multi":
print ("Loading VGG19-early-cut model")
return VGG19(weights='imagenet', include_top=False)
elif img_model_type == "resnet50":
print ("Loading ResNet50 model")
return ResNet50(weights='imagenet', include_top=False)
elif img_model_type == "resnet50_multi":
print ("Loading ResNet50-early-cut model")
base_model = ResNet50(weights='imagenet', include_top=False)
return Model(inputs=base_model.input, outputs=base_model.layers[-2].output)
elif img_model_type == "resnet152":
print ("Loading ResNet152 model")
return ResNet152(224, 224, 3, include_top=True)
elif img_model_type == "resnet152_multi":
print ("Loading ResNet152-early-cut model")
return ResNet152(224, 224, 3, include_top=False)
elif img_model_type == "inception":
print ("Loading InceptionV3 model")
base_model = InceptionV3(weights='imagenet', include_top=True)
return Model(inputs=base_model.input, outputs=base_model.layers[-2].output)
elif img_model_type == "inception_multi":
print ("Loading InceptionV3-early-cut model")
return InceptionV3(weights='imagenet', include_top=False)
def get_preprocess_function(img_model_type):
if img_model_type in ["inception", "xception"]:
return preprocess_input
return imagenet_utils.preprocess_input
def get_most_common_answers(vqa_train, vqa_val, num_answers, ans_types, show_top_ans=False, use_test=False):
ans_dict = {}
annIds_train = vqa_train.getQuesIds(ansTypes=ans_types)
anns = vqa_train.loadQA(annIds_train)
if use_test:
annIds_val = vqa_val.getQuesIds(ansTypes=ans_types)
anns += vqa_val.loadQA(annIds_val)
for ann in anns:
# answer = ann['multiple_choice_answer'].lower()
for ans in ann['answers']:
answer = ans['answer'].lower()
if answer in ans_dict:
ans_dict[answer] += 1
else:
ans_dict[answer] = 1
sorted_ans_dict = sorted(ans_dict.items(), key=itemgetter(1), reverse=True)
if show_top_ans:
# Some bar plots
num_ans_plot = 20
total_ans = 0
for (x, y) in sorted_ans_dict: total_ans += y
plt.bar(range(1, num_ans_plot + 1), [float(y) / total_ans * 100 for (x, y) in sorted_ans_dict[0:num_ans_plot]],
0.9, color='b')
plt.xticks(range(1, num_ans_plot + 1), [x for (x, y) in sorted_ans_dict[0:num_ans_plot]])
plt.title("Most Common Answer Frequencies")
plt.show()
sorted_ans_dict = [x for (x, y) in sorted_ans_dict]
sorted_ans_dict = sorted_ans_dict[0:num_answers]
ans_to_id = dict((a, i) for i, a in enumerate(sorted_ans_dict))
id_to_ans = dict((i, a) for i, a in enumerate(sorted_ans_dict))
return ans_to_id, id_to_ans
def process_question(vqa, ann, nlp, question_word_vec_map, tokens_dict, question_tokens_map):
quesId = ann['question_id']
if quesId in question_word_vec_map:
return question_word_vec_map[quesId], question_tokens_map[quesId]
question = nlp(vqa.qqa[quesId]['question'])
question_word_vec = [w.vector for w in question]
question_len = len(question)
question_tokens = [0] * question_len
for i in range(question_len):
token = question[i]
token_l = token.lower_
if token.has_vector and token_l in tokens_dict:
question_tokens[i] = tokens_dict[token_l]
return np.array(question_word_vec), np.array(question_tokens)
def process_answer(ann, data, ans_map, ans_to_id, id_to_ans):
quesId = ann['question_id']
if quesId in ans_map:
return ans_map[quesId]
answer = ann['multiple_choice_answer'].lower()
if answer in ans_to_id:
return ans_to_id[answer]
elif data == "val":
return -1
else:
return None
def process_img(img_model, preprocess, imgId, dataSubType, imgDir, input_shape=(224, 224), output_shape=(4096,)):
imgFilename = 'COCO_' + dataSubType + '_' + str(imgId).zfill(12) + '.jpg'
if os.path.isfile(imgDir + imgFilename):
img = image.load_img(imgDir + imgFilename, target_size=input_shape)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess(x)
features = img_model.predict(x)
features = np.reshape(features[0], output_shape)
return features
else:
return None
def get_input_shape(img_model_name):
if img_model_name in ['inception', 'xception']:
return (299, 299)
return (224, 224)
def get_output_shape(img_model_name):
if img_model_name == 'vgg19':
return (4096,)
elif img_model_name == 'vgg19_multi':
return (49, 512)
elif img_model_name in ['resnet50', 'inception', 'xception', 'resnet152']:
return (2048,)
elif img_model_name in ['resnet50_multi', 'resnet152_multi']:
return (49, 2048)
elif img_model_name == 'inception_multi':
return (64, 2048)
def process_questions(vqa, data, nlp, overwrite, tokens_dict,
question_word_vec_map=None, question_tokens_map=None):
if question_word_vec_map is None:
question_word_vec_map = {}
if question_tokens_map is None:
question_tokens_map = {}
filename = "data/%s_questions.pkl" % data
filename_tokens = "data/%s_tokens_questions.pkl" % data
if os.path.exists(filename) and os.path.exists(filename_tokens) and not overwrite:
return question_word_vec_map, question_tokens_map
annIds = vqa.getQuesIds()
anns = vqa.loadQA(annIds)
for ann in tqdm(anns):
quesId = int(ann['question_id'])
if quesId in question_word_vec_map:
continue
question, question_tokens = process_question(vqa, ann, nlp, question_word_vec_map,
tokens_dict, question_tokens_map)
if question is None:
continue
question_word_vec_map[quesId] = question
question_tokens_map[quesId] = question_tokens
f = open(filename, "w")
pickle.dump(question_word_vec_map, f, pickle.HIGHEST_PROTOCOL)
f.close()
f = open(filename_tokens, "w")
pickle.dump(question_tokens_map, f, pickle.HIGHEST_PROTOCOL)
f.close()
return question_word_vec_map, question_tokens_map
def process_answers(vqa, data, ans_types, ans_to_id, id_to_ans, overwrite, ans_map=None):
if ans_map is None:
ans_map = {}
if not ans_types:
filename = "data/%s_answers.pkl" % data
else:
filename = "data/%s_answers_%s.pkl" % (data, ans_types.replace("/", ""))
if not os.path.exists(filename) or overwrite:
annIds = vqa.getQuesIds(ansTypes=ans_types)
anns = vqa.loadQA(annIds)
for ann in tqdm(anns):
quesId = int(ann['question_id'])
if quesId in ans_map:
continue
answer = process_answer(ann, data, ans_map, ans_to_id, id_to_ans)
if answer is None:
continue
ans_map[quesId] = answer
f = open(filename, "w")
pickle.dump(ans_map, f, pickle.HIGHEST_PROTOCOL)
f.close()
return ans_map
def process_images(img_model, preprocess, vqa, data, data_sub_type, img_dir, img_model_name, overwrite, img_map=None):
if img_map is None:
img_map = {}
filename = "data/%s_images.pkl" % data
if not os.path.exists(filename) or overwrite:
annIds = vqa.getQuesIds()
anns = vqa.loadQA(annIds)
input_shape = get_input_shape(img_model_name)
output_shape = get_output_shape(img_model_name)
for ann in tqdm(anns):
imgId = int(ann['image_id'])
if imgId in img_map:
continue
img = process_img(img_model, preprocess, ann['image_id'], data_sub_type, img_dir, input_shape, output_shape)
if img is None:
continue
img_map[imgId] = img
print "Saving %d images in %s" % (len(img_map), filename)
f = open(filename, "w")
pickle.dump(img_map, f, pickle.HIGHEST_PROTOCOL)
f.close()
return img_map
def process_ques_to_img(vqa, data, overwrite, ques_to_img=None):
if ques_to_img is None:
ques_to_img = {}
filename = "data/%s_ques_to_img.pkl" % data
if not os.path.exists(filename) or overwrite:
annIds = vqa.getQuesIds()
anns = vqa.loadQA(annIds)
for ann in tqdm(anns):
quesId = int(ann['question_id'])
imgId = int(ann['image_id'])
ques_to_img[quesId] = imgId
f = open(filename, "w")
pickle.dump(ques_to_img, f, pickle.HIGHEST_PROTOCOL)
f.close()
return ques_to_img
def process_questions_test(dataFile, data, nlp, overwrite, tokens_dict,
question_word_vec_map=None, question_tokens_map=None):
if question_word_vec_map is None:
question_word_vec_map = {}
if question_tokens_map is None:
question_tokens_map = {}
filename = "data/%s_questions.pkl" % data
filename_tokens = "data/%s_tokens_questions.pkl" % data
if os.path.exists(filename) and os.path.exists(filename_tokens) and not overwrite:
return
dataset = json.load(open(dataFile, 'r'))
for question in tqdm(dataset['questions']):
quesId = question['question_id']
questext = question['question']
ques_nlp = nlp(questext)
question_word_vec = [w.vector for w in ques_nlp]
question_word_vec_map[quesId] = question_word_vec
question_len = len(ques_nlp)
question_tokens = [0] * question_len
for i in range(question_len):
token = ques_nlp[i]
token_l = token.lower_
if token.has_vector and token_l in tokens_dict:
question_tokens[i] = tokens_dict[token_l]
question_tokens_map[quesId] = question_tokens
f = open(filename, "w")
pickle.dump(question_word_vec_map, f, pickle.HIGHEST_PROTOCOL)
f.close()
f = open(filename_tokens, "w")
pickle.dump(question_tokens_map, f, pickle.HIGHEST_PROTOCOL)
f.close()
def process_images_test(img_model, preprocess, data, dataFile, dataSubType, imgDir, img_model_name, overwrite, img_map=None):
if img_map is None:
img_map = {}
filename = "data/%s_images.pkl" % data
if not os.path.exists(filename) or overwrite:
dataset = json.load(open(dataFile, 'r'))
input_shape = get_input_shape(img_model_name)
output_shape = get_output_shape(img_model_name)
for question in tqdm(dataset['questions']):
imgId = question['image_id']
if imgId in img_map:
continue
img = process_img(img_model, preprocess, imgId, dataSubType, imgDir, input_shape, output_shape)
if img is None:
continue
img_map[imgId] = img
f = open(filename, "w")
pickle.dump(img_map, f, pickle.HIGHEST_PROTOCOL)
f.close()
def process_ques_to_img_test(dataFile, data, overwrite, ques_to_img=None):
if ques_to_img is None:
ques_to_img = {}
filename = "data/%s_ques_to_img.pkl" % data
if not os.path.exists(filename) or overwrite:
dataset = json.load(open(dataFile, 'r'))
for question in tqdm(dataset['questions']):
quesId = question['question_id']
imgId = question['image_id']
ques_to_img[quesId] = imgId
f = open(filename, "w")
pickle.dump(ques_to_img, f, pickle.HIGHEST_PROTOCOL)
f.close()
def get_most_common_tokens(vqa, nlp, tokens_dict, dataFile=None):
if not dataFile:
annIds = vqa.getQuesIds()
anns = vqa.loadQA(annIds)
for ann in tqdm(anns):
quesId = int(ann['question_id'])
question = nlp(vqa.qqa[quesId]['question'])
question_tokens = [w.lower_ for w in question]
for token in question_tokens:
if token in tokens_dict:
tokens_dict[token] += 1
else:
tokens_dict[token] = 1
return
# get tokens from the test set
dataset = json.load(open(dataFile, 'r'))
for question in tqdm(dataset['questions']):
questext = question['question']
ques_nlp = nlp(questext)
question_tokens = [w.lower_ for w in ques_nlp]
for token in question_tokens:
if token in tokens_dict:
tokens_dict[token] += 1
else:
tokens_dict[token] = 1
def get_tokens_dict(vqa_train, vqa_val, dataFile_test, nlp, word_embedding_dim):
tokens_dict = {}
get_most_common_tokens(vqa_train, nlp, tokens_dict)
get_most_common_tokens(vqa_val, nlp, tokens_dict)
get_most_common_tokens(None, nlp, tokens_dict, dataFile=dataFile_test)
tokens_dict = sorted(tokens_dict.items(), key=lambda x: x[1])
tokens_with_embedding = [(key, value) for (key, value) in tokens_dict if (nlp(key)).has_vector]
# index 0 will be for unknown tokens or for tokens without word vectors
index = 1
tokens_dict = {}
tokens_embedding = [np.array([0.] * word_embedding_dim)]
for (key, _) in tokens_with_embedding:
tokens_dict[key] = index
tokens_embedding.append(nlp(key).vector)
index += 1
f = open("data/tokens_embedding.pkl", "w")
pickle.dump(np.array(tokens_embedding), f, pickle.HIGHEST_PROTOCOL)
f.close()
return tokens_dict
def process_data(vqa_train, dataSubType_train, imgDir_train,
vqa_val, dataSubType_val, imgDir_val,
dataSubType_test, dataFile_test, imgDir_test,
nlp, img_model, preprocess, ans_to_id, id_to_ans, params):
ans_types = params['ans_types']
only = params['only']
img_model_name = params['img_model']
overwrite = params['overwrite']
use_tests = params['use_test']
word_embedding_dim = params['word_embedding_dim']
if only == 'all' or only == 'ques':
print "Obtaining tokens from all datasets"
tokens_dict = get_tokens_dict(vqa_train, vqa_val, dataFile_test, nlp, word_embedding_dim)
print "Processing train questions"
if not use_tests:
process_questions(vqa_train, "train", nlp, overwrite, tokens_dict)
else:
ques_train_map, ques_tokens_train_map = process_questions(vqa_train, "train_val", nlp,
overwrite, tokens_dict)
process_questions(vqa_val, "train_val", nlp, overwrite, tokens_dict, ques_train_map, ques_tokens_train_map)
if only == 'all' or only == 'ans':
print "Processing train answers"
if not use_tests:
process_answers(vqa_train, "train", ans_types, ans_to_id, id_to_ans, overwrite)
else:
ans_map = process_answers(vqa_train, "train_val", ans_types, ans_to_id, id_to_ans, overwrite)
process_answers(vqa_val, "train_val", ans_types, ans_to_id, id_to_ans, overwrite, ans_map)
if only == 'all' or only == 'img':
print "Processing train images"
if not use_tests:
process_images(img_model, preprocess, vqa_train, "train", dataSubType_train,
imgDir_train, img_model_name, overwrite)
else:
img_map = process_images(img_model, preprocess, vqa_train, "train_val", dataSubType_train,
imgDir_train, img_model_name, overwrite)
process_images(img_model, preprocess, vqa_val, "train_val", dataSubType_val,
imgDir_val, img_model_name, overwrite, img_map)
if only == 'all' or only == 'ques_to_img':
print "Processing train question id to image id mapping"
if not use_tests:
process_ques_to_img(vqa_train, "train", overwrite)
else:
ques_to_img = process_ques_to_img(vqa_train, "train_val", overwrite)
process_ques_to_img(vqa_val, "train_val", overwrite, ques_to_img)
print "Done"
# -------------------------------------------------------------------------------------------------
if only == 'all' or only == 'ques':
print "Processing validation questions"
if not use_tests:
process_questions(vqa_val, "val", nlp, overwrite, tokens_dict)
else:
process_questions_test(dataFile_test, "test", nlp, overwrite, tokens_dict)
if only == 'all' or only == 'ans':
print "Processing validation answers"
if not use_tests:
process_answers(vqa_val, "val", ans_types, ans_to_id, id_to_ans, overwrite)
else:
print "Skipping answers for test set"
if only == 'all' or only == 'img':
print "Processing validation images"
if not use_tests:
process_images(img_model, preprocess, vqa_val, "val", dataSubType_val, imgDir_val, img_model_name, overwrite)
else:
process_images_test(img_model, preprocess, "test", dataFile_test, "test2015", imgDir_test,
img_model_name, overwrite)
if only == 'all' or only == 'ques_to_img':
print "Processing validation question id to image id mapping"
if not use_tests:
process_ques_to_img(vqa_val, "val", overwrite)
else:
process_ques_to_img_test(dataFile_test, "test", overwrite)
print "Done"
def main(params):
dataDir = 'VQA'
taskType = 'OpenEnded'
dataType = 'mscoco'
dataSubType_train = 'train2014'
annFile_train = '%s/Annotations/%s_%s_annotations.json' % (dataDir, dataType, dataSubType_train)
quesFile_train = '%s/Questions/%s_%s_%s_questions.json' % (dataDir, taskType, dataType, dataSubType_train)
imgDir_train = '%s/Images/%s/%s/' % (dataDir, dataType, dataSubType_train)
vqa_train = VQA(annFile_train, quesFile_train)
dataSubType_val = 'val2014'
annFile_val = '%s/Annotations/%s_%s_annotations.json' % (dataDir, dataType, dataSubType_val)
quesFile_val = '%s/Questions/%s_%s_%s_questions.json' % (dataDir, taskType, dataType, dataSubType_val)
imgDir_val = '%s/Images/%s/%s/' % (dataDir, dataType, dataSubType_val)
vqa_val = VQA(annFile_val, quesFile_val)
dataSubType_test = 'test-dev2015' # Hardcoded for test-dev
quesFile_test = '%s/Questions/%s_%s_%s_questions.json' % (dataDir, taskType, dataType, dataSubType_test)
imgDir_test = '%s/Images/%s/%s/' % (dataDir, dataType, 'test2015')
nlp = spacy.load('en_vectors_glove_md')
ans_to_id, id_to_ans = get_most_common_answers(vqa_train, vqa_val, int(params['num_answers']), params['ans_types'],
params['show_top_ans'], params['use_test'])
img_model = get_img_model(params['img_model'])
preprocess = get_preprocess_function(params['img_model'])
process_data(vqa_train, dataSubType_train, imgDir_train,
vqa_val, dataSubType_val, imgDir_val,
dataSubType_test, quesFile_test, imgDir_test,
nlp, img_model, preprocess, ans_to_id, id_to_ans, params)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ans_types', default=[], help='filter questions with specific answer types')
parser.add_argument('--num_answers', default=1000, type=int, help='number of top answers to classify')
parser.add_argument('--word_embedding_dim', default=300, type=int, help='word embedding dimension for one word')
parser.add_argument('--img_model', default='resnet50', help='which image model to use for embeddings')
parser.add_argument('--only', default='all', help='which data to preprocess (all, ques, ans, img, ques_to_img)')
parser.add_argument('--use_test', dest='use_test', action='store_true',
help='use test set (which also means training on train+val')
parser.set_defaults(use_test=False)
parser.add_argument('--show_top_ans', dest='show_top_ans', action='store_true', help='show plot with top answers')
parser.set_defaults(show_top_ans=False)
parser.add_argument('--overwrite', dest='overwrite', action='store_true', help='force overwrite')
parser.set_defaults(overwrite=False)
args = parser.parse_args()
params = vars(args)
main(params)
| mit |
HeraclesHX/scikit-learn | sklearn/metrics/pairwise.py | 104 | 42995 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
hitlonewind/PR-experiment | Perceptron/perceptron.py | 1 | 3153 | #encoding=utf-8
import numpy as np
import random
from matplotlib import pyplot as plt
class Perceptron(object):
"""docstring for Perceptron"""
def __init__(self,study_step=0.0000001, study_total=10000):
super(Perceptron, self).__init__()
self.datadic = {}
self.label = {}
self.study_step = study_step # 学习步长
self.study_total = study_total
self.loaddata()
def loaddata(self, fname ='demo.csv', labelfile='demoLabel.csv'):
self.data = np.loadtxt(fname, delimiter=",")
label = open(labelfile)
count = 0
for item in label:
self.label[count] = item[0:-1]
self.datadic[count] = self.data[count]
count += 1
pass
def train(self, trainlabel=str):
train_size = len(self.label)
datadim = len(self.data[0])
w = np.zeros((datadim, 1))
b = 0.0
study_count = 0 # 学习次数记录,只有当分类错误时才会增加
nochange_count = 0 # 统计连续分类正确数,当分类错误时归为0
nochange_upper_limit = 25000
count = 0
while True:
nochange_count += 1
if nochange_count > nochange_upper_limit:
print 'break0'
break
index = random.randint(0, train_size-1)
#index = count
count += 1
point = self.data[index]
label = self.label[index]
#yi = int(label)
if label == trainlabel:
yi = 1
else:
yi = -1
result = yi *(np.dot(point, w) + b )
if result <= 0:
item = np.reshape(self.data[index], (datadim, 1))
w += item*yi*self.study_step
b += yi * self.study_step
study_count += 1
if study_count > self.study_total:
print 'break1'
break
nochange_count = 0
if count > 10000:
count = 0
self.w = w
self.b = b
print type(w)
return w, b
def train_plot(self):
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("Perceptron")
#help(ax1.annotate)
plt.xlabel('x')
plt.ylabel('y')
color = ['r','b']
label = ['label1', 'label2']
marker = ['x', 'o']
count = 0
for index in self.data:
if self.label[count] == '1':
label = 'o'
pcolor = 'g'
else:
label = '^'
pcolor = 'b'
plt.scatter(index[0], index[1], marker=label,color=pcolor,alpha=0.6)
count += 1
x = range(0,3)
numx = np.array(x)
y = -((self.w[0])/(self.w[1]))*x - self.b/self.w[1]
k = str(-((self.w[0])/(self.w[1])))
b = str(- self.b/self.w[1])
ax1.annotate('y={0}x +{1} '.format(k[1:-1],b[1:-1]), (x[0],y[0]))
print 'k:{0}\n'.format(k)
print 'b:{0}'.format(b)
#print 'b:{0}\n'
plt.plot(x,y,marker='x',color='r')
plt.savefig('Perceptron.jpg')
plt.show()
pass
def test(self):
weigh = np.loadtxt('weight.csv', delimiter=',')
#b = -11188293700.0
b = -80793100.0
testbench = np.loadtxt('TestSamples.csv', delimiter=',')
re = np.dot(testbench, weigh) - b
count = 0
count2 = 0
for i in range(0, len(re)):
if self.label[i] == '9':
count += 1
if re[i] > 0:
count2 += 1
print count2
print count
print float(count2)/float(count)
P = Perceptron(100)
P.train('1')
P.train_plot()
| mit |
tbenthompson/tectosaur | tectosaur/continuity.py | 1 | 9699 | import numpy as np
import scipy.sparse.csgraph
from tectosaur.util.geometry import tri_normal, unscaled_normals, normalize
from tectosaur.constraints import ConstraintEQ, Term
from tectosaur.stress_constraints import stress_constraints, stress_constraints2, \
equilibrium_constraint, constant_stress_constraint
def find_touching_pts(tris):
max_pt_idx = np.max(tris)
out = [[] for i in range(max_pt_idx + 1)]
for i, t in enumerate(tris):
for d in range(3):
out[t[d]].append((i, d))
return out
def tri_connectivity_graph(tris):
n_tris = tris.shape[0]
touching = [[] for i in range(np.max(tris) + 1)]
for i in range(n_tris):
for d in range(3):
touching[tris[i,d]].append(i)
rows = []
cols = []
for i in range(len(touching)):
for row in touching[i]:
for col in touching[i]:
rows.append(row)
cols.append(col)
rows = np.array(rows)
cols = np.array(cols)
connectivity = scipy.sparse.coo_matrix((np.ones(rows.shape[0]), (rows, cols)), shape = (n_tris, n_tris))
return connectivity
def tri_side(tri1, tri2, threshold = 1e-12):
tri1_normal = tri_normal(tri1, normalize = True)
tri1_center = np.mean(tri1, axis = 0)
tri2_center = np.mean(tri2, axis = 0)
direction = tri2_center - tri1_center
direction /= np.linalg.norm(direction)
dot_val = direction.dot(tri1_normal)
if dot_val > threshold:
return 0
elif dot_val < -threshold:
return 1
else:
return 2
def get_side_of_fault(pts, tris, fault_start_idx):
connectivity = tri_connectivity_graph(tris)
fault_touching_pair = np.where(np.logical_and(
connectivity.row < fault_start_idx,
connectivity.col >= fault_start_idx
))[0]
side = np.zeros(tris.shape[0])
shared_verts = np.zeros(tris.shape[0])
fault_surf_tris = pts[tris[connectivity.col[fault_touching_pair]]]
for i in range(fault_touching_pair.shape[0]):
surf_tri_idx = connectivity.row[fault_touching_pair[i]]
surf_tri = tris[surf_tri_idx]
fault_tri = tris[connectivity.col[fault_touching_pair[i]]]
which_side = tri_side(pts[fault_tri], pts[surf_tri])
n_shared_verts = 0
for d in range(3):
if surf_tri[d] in fault_tri:
n_shared_verts += 1
if shared_verts[surf_tri_idx] < 2:
side[surf_tri_idx] = int(which_side) + 1
shared_verts[surf_tri_idx] = n_shared_verts
return side
#TODO: this function needs to know the idxs of the surface_tris and fault_tris, so use
# idx lists and pass the full tris array, currently using the (n_surf_tris * 9) hack!
#TODO: refactor and merge this with the traction continuity constraints
def continuity_constraints(pts, tris, fault_start_idx, tensor_dim = 3):
surface_tris = tris[:fault_start_idx]
fault_tris = tris[fault_start_idx:]
touching_pt = find_touching_pts(surface_tris)
side = get_side_of_fault(pts, tris, fault_start_idx)
constraints = []
for i, tpt in enumerate(touching_pt):
if len(tpt) == 0:
continue
for independent_idx in range(len(tpt)):
independent = tpt[independent_idx]
independent_tri_idx = independent[0]
independent_corner_idx = independent[1]
independent_tri = surface_tris[independent_tri_idx]
for dependent_idx in range(independent_idx + 1, len(tpt)):
dependent = tpt[dependent_idx]
dependent_tri_idx = dependent[0]
dependent_corner_idx = dependent[1]
dependent_tri = surface_tris[dependent_tri_idx]
# Check for anything that touches across the fault.
side1 = side[independent_tri_idx]
side2 = side[dependent_tri_idx]
crosses = (side1 != side2) and (side1 != 0) and (side2 != 0)
fault_tri_idx = None
if crosses:
fault_tri_idxs, fault_corner_idxs = np.where(
fault_tris == dependent_tri[dependent_corner_idx]
)
if fault_tri_idxs.shape[0] != 0:
fault_tri_idx = fault_tri_idxs[0]
fault_corner_idx = fault_corner_idxs[0]
# plt_pts = np.vstack((
# pts[independent_tri],
# pts[dependent_tri],
# pts[fault_tris[fault_tri_idx]]
# ))
# import matplotlib.pyplot as plt
# plt.tripcolor(pts[:,0], pts[:,1], tris[:surface_tris.shape[0]], side[:surface_tris.shape[0]])
# plt.triplot(plt_pts[:,0], plt_pts[:,1], np.array([[0,1,2]]), 'b-')
# plt.triplot(plt_pts[:,0], plt_pts[:,1], np.array([[3,4,5]]), 'k-')
# plt.triplot(pts[:,0], pts[:,1], tris[fault_start_idx:], 'r-')
# plt.show()
for d in range(tensor_dim):
independent_dof = (independent_tri_idx * 3 + independent_corner_idx) * tensor_dim + d
dependent_dof = (dependent_tri_idx * 3 + dependent_corner_idx) * tensor_dim + d
if dependent_dof <= independent_dof:
continue
diff = 0.0
terms = [Term(1.0, dependent_dof), Term(-1.0, independent_dof)]
if fault_tri_idx is not None:
fault_dof = (
fault_start_idx * 9 +
fault_tri_idx * 9 + fault_corner_idx * 3 + d
)
if side1 < side2:
terms.append(Term(-1.0, fault_dof))
else:
terms.append(Term(1.0, fault_dof))
constraints.append(ConstraintEQ(terms, 0.0))
return constraints
def traction_admissibility_constraints(pts, tris, fault_start_idx):
# At each vertex, there should be three remaining degrees of freedom.
# Initially, there are n_tris*3 degrees of freedom.
# So, we need (n_tris-1)*3 constraints.
touching_pt = find_touching_pts(tris)
ns = normalize(unscaled_normals(pts[tris]))
side = get_side_of_fault(pts, tris, fault_start_idx)
continuity_cs = []
admissibility_cs = []
for tpt in touching_pt:
if len(tpt) == 0:
continue
# Separate the triangles touching at the vertex into a groups
# by the normal vectors for each triangle.
normal_groups = []
for i in range(len(tpt)):
tri_idx = tpt[i][0]
n = ns[tri_idx]
joined = False
for j in range(len(normal_groups)):
if np.allclose(normal_groups[j][0], n):
tri_idx2 = tpt[normal_groups[j][1][0]][0]
side1 = side[tri_idx]
side2 = side[tri_idx2]
crosses = (side1 != side2) and (side1 != 0) and (side2 != 0)
fault_tri_idx = None
# if crosses:
# continue
normal_groups[j][1].append(i)
joined = True
break
if not joined:
normal_groups.append((n, [i]))
# Continuity within normal group
for i in range(len(normal_groups)):
group = normal_groups[i][1]
independent_idx = group[0]
independent = tpt[independent_idx]
independent_tri_idx = independent[0]
independent_corner_idx = independent[1]
independent_dof_start = independent_tri_idx * 9 + independent_corner_idx * 3
for j in range(1, len(group)):
dependent_idx = group[j]
dependent = tpt[dependent_idx]
dependent_tri_idx = dependent[0]
dependent_corner_idx = dependent[1]
dependent_dof_start = dependent_tri_idx * 9 + dependent_corner_idx * 3
for d in range(3):
terms = [
Term(1.0, dependent_dof_start + d),
Term(-1.0, independent_dof_start + d)
]
continuity_cs.append(ConstraintEQ(terms, 0.0))
if len(normal_groups) == 1:
# Only continuity needed!
continue
# assert(len(normal_groups) == 2)
# Add constant stress constraints
for i in range(len(normal_groups)):
tpt_idx1 = normal_groups[i][1][0]
tri_idx1 = tpt[tpt_idx1][0]
corner_idx1 = tpt[tpt_idx1][1]
tri1 = pts[tris[tri_idx1]]
tri_data1 = (tri1, tri_idx1, corner_idx1)
for j in range(i + 1, len(normal_groups)):
tpt_idx2 = normal_groups[j][1][0]
tri_idx2 = tpt[tpt_idx2][0]
# print(tri_idx1, tri_idx2)
corner_idx2 = tpt[tpt_idx2][1]
tri2 = pts[tris[tri_idx2]]
tri_data2 = (tri2, tri_idx2, corner_idx2)
# for c in new_cs:
# print(', '.join(['(' + str(t.val) + ',' + str(t.dof) + ')' for t in c.terms]) + ' rhs: ' + str(c.rhs))
admissibility_cs.append(constant_stress_constraint(tri_data1, tri_data2))
admissibility_cs.append(equilibrium_constraint(tri_data1))
admissibility_cs.append(equilibrium_constraint(tri_data2))
return continuity_cs, admissibility_cs
| mit |
Sklearn-HMM/scikit-learn-HMM | sklean-hmm/semi_supervised/label_propagation.py | 8 | 14061 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
if sparse.isspmatrix(X):
self.X_ = X
else:
self.X_ = np.asarray(X)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
rseubert/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 15 | 10172 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
"""Ensure 1d Y is correctly interpreted"""
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
| bsd-3-clause |
madjelan/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/user_interfaces/embedding_in_wx4.py | 9 | 3640 | #!/usr/bin/env python
"""
An example of how to use wx or wxagg in an application with a custom
toolbar
"""
# Used to guarantee to use at least Wx2.8
import wxversion
wxversion.ensureMinimal('2.8')
from numpy import arange, sin, pi
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg
from matplotlib.backends.backend_wx import _load_bitmap
from matplotlib.figure import Figure
from numpy.random import rand
import wx
class MyNavigationToolbar(NavigationToolbar2WxAgg):
"""
Extend the default wx toolbar with your own event handlers
"""
ON_CUSTOM = wx.NewId()
def __init__(self, canvas, cankill):
NavigationToolbar2WxAgg.__init__(self, canvas)
# for simplicity I'm going to reuse a bitmap from wx, you'll
# probably want to add your own.
self.AddSimpleTool(self.ON_CUSTOM, _load_bitmap('stock_left.xpm'),
'Click me', 'Activate custom contol')
wx.EVT_TOOL(self, self.ON_CUSTOM, self._on_custom)
def _on_custom(self, evt):
# add some text to the axes in a random location in axes (0,1)
# coords) with a random color
# get the axes
ax = self.canvas.figure.axes[0]
# generate a random location can color
x,y = tuple(rand(2))
rgb = tuple(rand(3))
# add the text and draw
ax.text(x, y, 'You clicked me',
transform=ax.transAxes,
color=rgb)
self.canvas.draw()
evt.Skip()
class CanvasFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,-1,
'CanvasFrame',size=(550,350))
self.SetBackgroundColour(wx.NamedColour("WHITE"))
self.figure = Figure(figsize=(5,4), dpi=100)
self.axes = self.figure.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
self.axes.plot(t,s)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
# Capture the paint message
wx.EVT_PAINT(self, self.OnPaint)
self.toolbar = MyNavigationToolbar(self.canvas, True)
self.toolbar.Realize()
if wx.Platform == '__WXMAC__':
# Mac platform (OSX 10.3, MacPython) does not seem to cope with
# having a toolbar in a sizer. This work-around gets the buttons
# back, but at the expense of having the toolbar at the top
self.SetToolBar(self.toolbar)
else:
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
# As noted above, doesn't work for Mac.
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
# update the axes menu on the toolbar
self.toolbar.update()
self.SetSizer(self.sizer)
self.Fit()
def OnPaint(self, event):
self.canvas.draw()
event.Skip()
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame()
frame.Show(True)
return True
app = App(0)
app.MainLoop()
| mit |
ctools/ctools | examples/show_lightcurve.py | 1 | 4489 | #! /usr/bin/env python
# ==========================================================================
# Display lightcurve generated by cslightcrv
#
# Copyright (C) 2017-2020 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import sys
try:
import matplotlib.pyplot as plt
plt.figure()
plt.close()
except (ImportError, RuntimeError):
print('This script needs the "matplotlib" module')
sys.exit()
import gammalib
import cscripts
# =============== #
# Plot lightcurve #
# =============== #
def plot_lightcurve(filename, plotfile):
"""
Plot lightcurve
Parameters
----------
filename : str
Name of lightcurve FITS file
plotfile : str
Plot file name
"""
# Read spectrum file
fits = gammalib.GFits(filename)
table = fits.table(1)
# Extract standard columns
c_mjd = table['MJD']
c_emjd = table['e_MJD']
c_ts = table['TS']
# Extract columns dependent on flux type
if table.contains('EnergyFlux'):
c_flux = table['EnergyFlux']
c_eflux = table['e_EnergyFlux']
c_upper = table['EFluxUpperLimit']
ylabel = r'E $\times$ dN/dE (erg cm$^{-2}$ s$^{-1}$)'
elif table.contains('PhotonFlux'):
c_flux = table['PhotonFlux']
c_eflux = table['e_PhotonFlux']
c_upper = table['FluxUpperLimit']
ylabel = r'N(E) (ph cm$^{-2}$ s$^{-1}$)'
else:
c_flux = table['Prefactor']
c_eflux = table['e_Prefactor']
c_upper = table['DiffUpperLimit']
ylabel = r'dN/dE (cm$^{-2}$ s$^{-1}$ MeV$^{-1}$)'
# Initialise arrays to be filled
mjd = []
e_mjd = []
flux = []
e_flux = []
ul_mjd = []
ul_e_mjd = []
ul_flux = []
ul_e_flux = []
# Loop over rows of the file
nrows = table.nrows()
for row in range(nrows):
# Get Test Statistic, flux and flux error
ts = c_ts.real(row)
flx = c_flux.real(row)
e_flx = c_eflux.real(row)
# If Test Statistic is larger than 9 and twice the flux error is
# smaller than the flux, then append flux point ...
if ts > 9.0 and 2.0*e_flx < flx:
mjd.append(c_mjd.real(row))
e_mjd.append(c_emjd.real(row))
flux.append(c_flux.real(row))
e_flux.append(c_eflux.real(row))
# ... otherwise append upper limit
else:
ul_mjd.append(c_mjd.real(row))
ul_e_mjd.append(c_emjd.real(row))
ul_flux.append(c_upper.real(row))
ul_e_flux.append(0.5*c_upper.real(row))
# Plot the spectrum
plt.figure()
plt.semilogy()
plt.grid()
plt.errorbar(mjd, flux, yerr=e_flux, xerr=[e_mjd, e_mjd],
fmt='ro')
plt.errorbar(ul_mjd, ul_flux, xerr=[ul_e_mjd, ul_e_mjd],
yerr=ul_e_flux, uplims=True, fmt='ro')
plt.xlabel('MJD (days)')
plt.ylabel(ylabel)
# Show figure
if len(plotfile) > 0:
plt.savefig(plotfile)
else:
plt.show()
# Return
return
# =============== #
# Show lightcurve #
# =============== #
def show_lightcurve():
"""
Show lightcurve
"""
# Set usage string
usage = 'show_lightcurve.py [-p plotfile] [file]'
# Set default options
options = [{'option': '-p', 'value': ''}]
# Get arguments and options from command line arguments
args, options = cscripts.ioutils.get_args_options(options, usage)
# Extract script parameters from options
plotfile = options[0]['value']
# Plot lightcurve
plot_lightcurve(args[0], plotfile)
# Return
return
# ======================== #
# Main routine entry point #
# ======================== #
if __name__ == '__main__':
# Show lightcurve
show_lightcurve()
| gpl-3.0 |
NlGG/envelope | envelope.py | 1 | 3825 | #!/usr/bin/python
#-*- encoding: utf-8 -*-
# Quantitative Economics Web: http://quant-econ.net/py/index.html
from __future__ import division
import math
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
def envelope(expression, with_animation=False, **kwargs):
# 可変長キーワード引数(**kwargs)の初期化処理
x_list = kwargs.get('x_list', np.arange(0, 100, 0.5))
parameter_list = kwargs.get('parameter_list', np.arange(0, 10, 0.1))
title = kwargs.get('title', 'Show Envelope Curve')
xlabel = kwargs.get('xlabel', False)
ylabel = kwargs.get('ylabel', False)
color = kwargs.get('color', 'c')
legend = kwargs.get('legend', False)
parameter_name = kwargs.get('parameter_name', 'Parameter')
xlim = kwargs.get('xlim', [0, 100])
ylim = kwargs.get('ylim', [0, 30])
plot_size = kwargs.get('plot_size', 5)
# アニメーション関数
def __run(parameter):
y_list = expression(x_list, parameter)
min_index = y_list.argmin()
left_bound = max(min_index - plot_size, 0)
right_bound = min(min_index + plot_size + 1, len(x_list) - 1)
x_plot_list = x_list[left_bound : right_bound]
y_plot_list = y_list[left_bound : right_bound]
del plt.gca().texts[-1]
ax.annotate(str(parameter_name)+"="+str(parameter),
xy=(0.05, 0.9),
xycoords='axes fraction',
fontsize=16,
horizontalalignment='left',
verticalalignment='bottom'
)
ax.plot(x_plot_list, y_plot_list, color=color, linewidth=1)
# 図の初期化処理
fig, ax = plt.subplots()
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
ax.annotate(str(parameter_name)+"="+str(parameter_list[0]),
xy=(0, 0),
xycoords='axes fraction',
fontsize=16,
horizontalalignment='right',
verticalalignment='top'
)
ax.plot(0, 0, color=color, linewidth=1 ,label=str(legend))
if with_animation:
# __runの引数にparameter_listから1つずつ値を取りながら、figにグラフを描写する
animation.FuncAnimation(fig, __run, parameter_list, interval=5, repeat=False)
else:
for parameter in parameter_list:
__run(parameter)
plt.title(title)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
if legend:
plt.legend()
plt.show()
# 長期平均費用曲線を求める
envelope(lambda y, k: 1/8 * (y - 10 * k) ** 2 + 1/3 * k ** 2 - 2 * k + 4,
plot_size = 5,
title = 'Show Average Long-Run Cost Curve',
xlabel = 'Y: Production',
ylabel = 'C: Cost',
legend = 'Average Short-Run Cost Curves',
parameter_name = 'K'
)
"""
# 長期平均費用曲線を求める(with animation)
envelope(lambda y, k: 1/8 * (y - 10 * k) ** 2 + 1/3 * k ** 2 - 2 * k + 4,
True,
plot_size = 5,
title = 'Show Average Long-Run Cost Curve',
xlabel = 'Y: Production',
ylabel = 'C: Cost',
legend = 'Average Short-Run Cost Curves',
parameter_name = 'K'
)
"""
"""
# 長期総費用曲線を求める
envelope(lambda y, k: 1/150*((1/2*y-2*k**3) ** 3+(2*k**3)**3) + 1/10*k*((1/2*y-(5*k**3-5*k**2+k))**2-(5*k**3-5*k**2+k) **2) + (1/(4*k) + 1/25*k**6)*y + 5*k**3-5*k**2+5*k+5,
plot_size = 200,
title = 'Show Total Long-Run Cost Curve',
xlabel = 'Y: Production',
ylabel = 'C: Cost',
legend = 'Total Short-Run Cost Curves',
parameter_name = 'K',
xlim = [0, 100],
ylim = [0, 300],
xlist = np.arange(0, 200, 0.5),
parameter_list = np.arange(0.05, 5.05, 0.05)
)
"""
| bsd-3-clause |
adiIspas/Machine-Learning_A-Z | Machine Learning A-Z/Part 4 - Clustering/Section 25 - Hierarchical Clustering/hc.py | 7 | 1771 | # Hierarchical Clustering
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:, [3, 4]].values
# y = dataset.iloc[:, 3].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Using the dendrogram to find the optimal number of clusters
import scipy.cluster.hierarchy as sch
dendrogram = sch.dendrogram(sch.linkage(X, method = 'ward'))
plt.title('Dendrogram')
plt.xlabel('Customers')
plt.ylabel('Euclidean distances')
plt.show()
# Fitting Hierarchical Clustering to the dataset
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters = 5, affinity = 'euclidean', linkage = 'ward')
y_hc = hc.fit_predict(X)
# Visualising the clusters
plt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s = 100, c = 'red', label = 'Cluster 1')
plt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s = 100, c = 'blue', label = 'Cluster 2')
plt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1], s = 100, c = 'green', label = 'Cluster 3')
plt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4')
plt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5')
plt.title('Clusters of customers')
plt.xlabel('Annual Income (k$)')
plt.ylabel('Spending Score (1-100)')
plt.legend()
plt.show() | mit |
jchodera/mdtraj | mdtraj/nmr/shift_wrappers.py | 2 | 12126 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
from __future__ import print_function, absolute_import
import os
import sys
from distutils.version import LooseVersion
from distutils.spawn import find_executable as _find_executable
import numpy as np
import pandas as pd
import subprocess
from mdtraj.utils import enter_temp_directory
##############################################################################
# Globals
##############################################################################
# Possible names for the external commands -- these are expected
# to be found in the PATH.
SHIFTX2 = ['shiftx2.py']
SPARTA_PLUS = ['sparta+', 'SPARTA+', 'SPARTA+.linux']
PPM = ['ppm_linux_64.exe']
__all__ = ['chemical_shifts_shiftx2', 'chemical_shifts_ppm', 'chemical_shifts_spartaplus', "reindex_dataframe_by_atoms"]
def find_executable(names):
for possible in names:
result = _find_executable(possible)
if result is not None:
return result
return None
##############################################################################
# Code
##############################################################################
def compute_chemical_shifts(trj, model="shiftx2", **kwargs):
"""Predict chemical shifts of a trajectory using ShiftX2.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
model : str, optional, default="shiftx2"
The program to use for calculating chemical shifts. Must be one
of shiftx2, ppm, or sparta+
Returns
-------
results : pandas DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Notes
-----
You must have the appropriate chemical soft programs installed
and in your executable path.
Chemical shift prediction is for PROTEIN atoms; trajectory objects
with ligands, solvent, ions, or other non-protein components may give
UNKNOWN RESULTS.
Please cite the appropriate reference, see docstrings for chemical_shifts_*
for the various possible models.
"""
if model == "shiftx2":
return chemical_shifts_shiftx2(trj, **kwargs)
elif model == "ppm":
return chemical_shifts_ppm(trj, **kwargs)
elif model == "sparta+":
return chemical_shifts_spartaplus(trj, **kwargs)
else:
raise(ValueError("model must be one of shiftx2, ppm, or sparta+"))
def chemical_shifts_shiftx2(trj, pH=5.0, temperature=298.00):
"""Predict chemical shifts of a trajectory using ShiftX2.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
pH : float, optional, default=5.0
pH value which gets passed to the ShiftX2 predictor.
temperature : float, optional, default=298.00
Temperature which gets passed to the ShiftX2 predictor.
Returns
-------
results : pandas DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Notes
-----
You must have ShiftX2 available on your path; see (http://www.shiftx2.ca/).
Chemical shift prediction is for PROTEIN atoms; trajectory objects
with ligands, solvent, ions, or other non-protein components may give
UNKNOWN RESULTS.
Please cite the appropriate reference below.
References
----------
.. [1] Beomsoo Han, Yifeng Liu, Simon Ginzinger, and David Wishart.
"SHIFTX2: significantly improved protein chemical shift
prediction." J. Biomol. NMR, 50, 1 43-57 (2011)
"""
binary = find_executable(SHIFTX2)
if binary is None:
raise OSError('External command not found. Looked for {} in PATH. '
'`chemical_shifts_shiftx2` requires the external program SHIFTX2, '
'available at http://www.shiftx2.ca/'.format(', '.join(SHIFTX2)))
results = []
with enter_temp_directory():
for i in range(trj.n_frames):
fn = './trj%d.pdb' % i
trj[i].save(fn)
subprocess.check_call([binary,
'-b', fn,
'-p', "{:.1f}".format(pH),
'-t', "{:.2f}".format(temperature),
])
d = pd.read_csv("./trj%d.pdb.cs" % i)
d.rename(columns={"NUM": "resSeq", "RES": "resName", "ATOMNAME": "name"}, inplace=True)
d["frame"] = i
results.append(d)
results = pd.concat(results)
if LooseVersion(pd.__version__) < LooseVersion('0.14.0'):
results = results.pivot_table(rows=["resSeq", "name"], cols="frame", values="SHIFT")
else:
results = results.pivot_table(index=["resSeq", "name"], columns="frame", values="SHIFT")
return results
def chemical_shifts_ppm(trj):
"""Predict chemical shifts of a trajectory using ppm.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
Returns
-------
results : pandas.DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Notes
-----
You must have ppm available on your path; see
(http://spin.ccic.ohio-state.edu/index.php/download/index).
Chemical shift prediction is for PROTEIN atoms; trajectory objects
with ligands, solvent, ions, or other non-protein components may give
UNKNOWN RESULTS.
Please cite the appropriate reference below.
References
----------
.. [1] Li, DW, and Bruschweiler, R. "PPM: a side-chain and backbone chemical
shift predictor for the assessment of protein conformational ensembles."
J Biomol NMR. 2012 Nov;54(3):257-65.
"""
binary = find_executable(PPM)
first_resSeq = trj.top.residue(0).resSeq
if binary is None:
raise OSError('External command not found. Looked for %s in PATH. `chemical_shifts_ppm` requires the external program PPM, available at http://spin.ccic.ohio-state.edu/index.php/download/index' % ', '.join(PPM))
with enter_temp_directory():
trj.save("./trj.pdb")
cmd = "%s -pdb trj.pdb -mode detail" % binary
return_flag = os.system(cmd)
if return_flag != 0:
raise(IOError("Could not successfully execute command '%s', check your PPM installation or your input trajectory." % cmd))
d = pd.read_table("./bb_details.dat", index_col=False, header=None, sep="\s+").drop([3], axis=1)
d = d.rename(columns={0: "resSeq", 1: "resName", 2: "name"})
d["resSeq"] += first_resSeq - 1 # Fix bug in PPM that reindexes to 1
d = d.drop("resName", axis=1)
d = d.set_index(["resSeq", "name"])
d.columns = np.arange(trj.n_frames)
d.columns.name = "frame"
return d
def _get_lines_to_skip(filename):
"""Determine the number of comment lines in a SPARTA+ output file."""
format_string = """FORMAT %4d %4s %4s %9.3f %9.3f %9.3f %9.3f %9.3f %9.3f"""
handle = open(filename)
for i, line in enumerate(handle):
if line.find(format_string) != -1:
return i + 2
raise(Exception("No format string found in SPARTA+ file!"))
def chemical_shifts_spartaplus(trj, rename_HN=True):
"""Predict chemical shifts of a trajectory using SPARTA+.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
rename_HN : bool, optional, default=True
SPARTA+ calls the amide proton "HN" instead of the standard "H".
When True, this option renames the output as "H" to match the PDB
and BMRB nomenclature.
Returns
-------
results : pandas.DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Notes
-----
You must have SPARTA+ available on your path; see
(http://spin.niddk.nih.gov/bax/software/SPARTA+/). Also, the SPARTAP_DIR
environment variable must be set so that SPARTA+ knows where to find
its database files.
Chemical shift prediction is for PROTEIN atoms; trajectory objects
with ligands, solvent, ions, or other non-protein components may give
UNKNOWN RESULTS.
Please cite the appropriate reference below.
References
----------
.. [1] Shen, Y., and Bax, Ad. "SPARTA+: a modest improvement in empirical
NMR chemical shift prediction by means of an artificial neural network."
J. Biomol. NMR, 48, 13-22 (2010)
"""
binary = find_executable(SPARTA_PLUS)
if binary is None:
raise OSError('External command not found. Looked for %s in PATH. `chemical_shifts_spartaplus` requires the external program SPARTA+, available at http://spin.niddk.nih.gov/bax/software/SPARTA+/' % ', '.join(SPARTA_PLUS))
names = ["resSeq", "resName", "name", "SS_SHIFT", "SHIFT", "RC_SHIFT", "HM_SHIFT", "EF_SHIFT", "SIGMA"]
with enter_temp_directory():
for i in range(trj.n_frames):
trj[i].save("./trj%d.pdb" % i)
subprocess.check_call([binary, '-in'] + ["trj{}.pdb".format(i) for i in range(trj.n_frames)]
+ ['-out', 'trj0_pred.tab'])
lines_to_skip = _get_lines_to_skip("trj0_pred.tab")
results = []
for i in range(trj.n_frames):
d = pd.read_table("./trj%d_pred.tab" % i, names=names, header=None, sep="\s+", skiprows=lines_to_skip)
d["frame"] = i
results.append(d)
results = pd.concat(results)
if rename_HN:
results.name[results.name == "HN"] = "H"
if LooseVersion(pd.__version__) < LooseVersion('0.14.0'):
results = results.pivot_table(rows=["resSeq", "name"], cols="frame", values="SHIFT")
else:
results = results.pivot_table(index=["resSeq", "name"], columns="frame", values="SHIFT")
return results
def reindex_dataframe_by_atoms(trj, frame):
"""Reindex chemical shift output to use atom number (serial) indexing.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
frame : pandas.DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Returns
-------
new_frame : pandas.DataFrame
Dataframe containing results, with index consisting of atom
indices (AKA the 'serial' entry in a PDB). Columns correspond to
each frame in trj.
Notes
-----
Be aware that this function may DROP predictions if the atom naming
is different between the input trajectory and the output of various
chemical shift prediction tools.
"""
top, bonds = trj.top.to_dataframe()
top["serial"] = top.index
top = top.set_index(["resSeq", "name"])
new_frame = frame.copy()
new_frame["serial"] = top.ix[new_frame.index].serial
new_frame = new_frame.dropna().reset_index().set_index("serial").drop(["resSeq", "name"], axis=1)
return new_frame
| lgpl-2.1 |
mne-tools/mne-python | mne/cov.py | 4 | 79191 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hämäläinen <[email protected]>
# Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
from copy import deepcopy
from distutils.version import LooseVersion
import itertools as itt
from math import log
import os
import numpy as np
from .defaults import _EXTRAPOLATE_DEFAULT, _BORDER_DEFAULT, DEFAULTS
from .io.write import start_file, end_file
from .io.proj import (make_projector, _proj_equal, activate_proj,
_check_projs, _needs_eeg_average_ref_proj,
_has_eeg_average_ref_proj, _read_proj, _write_proj)
from .io import fiff_open, RawArray
from .io.pick import (pick_types, pick_channels_cov, pick_channels, pick_info,
_picks_by_type, _pick_data_channels, _picks_to_idx,
_DATA_CH_TYPES_SPLIT)
from .io.constants import FIFF
from .io.meas_info import _read_bad_channels, create_info
from .io.tag import find_tag
from .io.tree import dir_tree_find
from .io.write import (start_block, end_block, write_int, write_name_list,
write_double, write_float_matrix, write_string)
from .defaults import _handle_default
from .epochs import Epochs
from .event import make_fixed_length_events
from .evoked import EvokedArray
from .rank import compute_rank
from .utils import (check_fname, logger, verbose, check_version, _time_mask,
warn, copy_function_doc_to_method_doc, _pl,
_undo_scaling_cov, _scaled_array, _validate_type,
_check_option, eigh, fill_doc, _on_missing,
_check_on_missing)
from . import viz
from .fixes import (BaseEstimator, EmpiricalCovariance, _logdet,
empirical_covariance, log_likelihood)
def _check_covs_algebra(cov1, cov2):
if cov1.ch_names != cov2.ch_names:
raise ValueError('Both Covariance do not have the same list of '
'channels.')
projs1 = [str(c) for c in cov1['projs']]
projs2 = [str(c) for c in cov1['projs']]
if projs1 != projs2:
raise ValueError('Both Covariance do not have the same list of '
'SSP projections.')
def _get_tslice(epochs, tmin, tmax):
"""Get the slice."""
mask = _time_mask(epochs.times, tmin, tmax, sfreq=epochs.info['sfreq'])
tstart = np.where(mask)[0][0] if tmin is not None else None
tend = np.where(mask)[0][-1] + 1 if tmax is not None else None
tslice = slice(tstart, tend, None)
return tslice
@fill_doc
class Covariance(dict):
"""Noise covariance matrix.
.. warning:: This class should not be instantiated directly, but
instead should be created using a covariance reading or
computation function.
Parameters
----------
data : array-like
The data.
names : list of str
Channel names.
bads : list of str
Bad channels.
projs : list
Projection vectors.
nfree : int
Degrees of freedom.
eig : array-like | None
Eigenvalues.
eigvec : array-like | None
Eigenvectors.
method : str | None
The method used to compute the covariance.
loglik : float
The log likelihood.
%(verbose_meth)s
Attributes
----------
data : array of shape (n_channels, n_channels)
The covariance.
ch_names : list of str
List of channels' names.
nfree : int
Number of degrees of freedom i.e. number of time points used.
dim : int
The number of channels ``n_channels``.
See Also
--------
compute_covariance
compute_raw_covariance
make_ad_hoc_cov
read_cov
"""
def __init__(self, data, names, bads, projs, nfree, eig=None, eigvec=None,
method=None, loglik=None, verbose=None):
"""Init of covariance."""
diag = (data.ndim == 1)
projs = _check_projs(projs)
self.update(data=data, dim=len(data), names=names, bads=bads,
nfree=nfree, eig=eig, eigvec=eigvec, diag=diag,
projs=projs, kind=FIFF.FIFFV_MNE_NOISE_COV)
if method is not None:
self['method'] = method
if loglik is not None:
self['loglik'] = loglik
self.verbose = verbose
@property
def data(self):
"""Numpy array of Noise covariance matrix."""
return self['data']
@property
def ch_names(self):
"""Channel names."""
return self['names']
@property
def nfree(self):
"""Number of degrees of freedom."""
return self['nfree']
def save(self, fname):
"""Save covariance matrix in a FIF file.
Parameters
----------
fname : str
Output filename.
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz',
'_cov.fif', '_cov.fif.gz'))
fid = start_file(fname)
try:
_write_cov(fid, self)
except Exception:
fid.close()
os.remove(fname)
raise
end_file(fid)
def copy(self):
"""Copy the Covariance object.
Returns
-------
cov : instance of Covariance
The copied object.
"""
return deepcopy(self)
def as_diag(self):
"""Set covariance to be processed as being diagonal.
Returns
-------
cov : dict
The covariance.
Notes
-----
This function allows creation of inverse operators
equivalent to using the old "--diagnoise" mne option.
This function operates in place.
"""
if self['diag']:
return self
self['diag'] = True
self['data'] = np.diag(self['data'])
self['eig'] = None
self['eigvec'] = None
return self
def _as_square(self):
# This is a hack but it works because np.diag() behaves nicely
if self['diag']:
self['diag'] = False
self.as_diag()
self['diag'] = False
return self
def _get_square(self):
if self['diag'] != (self.data.ndim == 1):
raise RuntimeError(
'Covariance attributes inconsistent, got data with '
'dimensionality %d but diag=%s'
% (self.data.ndim, self['diag']))
return np.diag(self.data) if self['diag'] else self.data.copy()
def __repr__(self): # noqa: D105
if self.data.ndim == 2:
s = 'size : %s x %s' % self.data.shape
else: # ndim == 1
s = 'diagonal : %s' % self.data.size
s += ", n_samples : %s" % self.nfree
s += ", data : %s" % self.data
return "<Covariance | %s>" % s
def __add__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
this_cov = cov.copy()
this_cov['data'] = (((this_cov['data'] * this_cov['nfree']) +
(self['data'] * self['nfree'])) /
(self['nfree'] + this_cov['nfree']))
this_cov['nfree'] += self['nfree']
this_cov['bads'] = list(set(this_cov['bads']).union(self['bads']))
return this_cov
def __iadd__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
self['data'][:] = (((self['data'] * self['nfree']) +
(cov['data'] * cov['nfree'])) /
(self['nfree'] + cov['nfree']))
self['nfree'] += cov['nfree']
self['bads'] = list(set(self['bads']).union(cov['bads']))
return self
@verbose
@copy_function_doc_to_method_doc(viz.misc.plot_cov)
def plot(self, info, exclude=[], colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
return viz.misc.plot_cov(self, info, exclude, colorbar, proj, show_svd,
show, verbose)
@verbose
def plot_topomap(self, info, ch_type=None, vmin=None,
vmax=None, cmap=None, sensors=True, colorbar=True,
scalings=None, units=None, res=64,
size=1, cbar_fmt="%3.1f",
proj=False, show=True, show_names=False, title=None,
mask=None, mask_params=None, outlines='head',
contours=6, image_interp='bilinear',
axes=None, extrapolate=_EXTRAPOLATE_DEFAULT, sphere=None,
border=_BORDER_DEFAULT,
noise_cov=None, verbose=None):
"""Plot a topomap of the covariance diagonal.
Parameters
----------
info : instance of Info
The measurement information.
%(topomap_ch_type)s
%(topomap_vmin_vmax)s
%(topomap_cmap)s
%(topomap_sensors)s
%(topomap_colorbar)s
%(topomap_scalings)s
%(topomap_units)s
%(topomap_res)s
%(topomap_size)s
%(topomap_cbar_fmt)s
%(plot_proj)s
%(show)s
%(topomap_show_names)s
%(title_None)s
%(topomap_mask)s
%(topomap_mask_params)s
%(topomap_outlines)s
%(topomap_contours)s
%(topomap_image_interp)s
%(topomap_axes)s
%(topomap_extrapolate)s
%(topomap_sphere_auto)s
%(topomap_border)s
noise_cov : instance of Covariance | None
If not None, whiten the instance with ``noise_cov`` before
plotting.
%(verbose)s
Returns
-------
fig : instance of Figure
The matplotlib figure.
Notes
-----
.. versionadded:: 0.21
"""
from .viz.misc import _index_info_cov
info, C, _, _ = _index_info_cov(info, self, exclude=())
evoked = EvokedArray(np.diag(C)[:, np.newaxis], info)
if noise_cov is not None:
# need to left and right multiply whitener, which for the diagonal
# entries is the same as multiplying twice
evoked = whiten_evoked(whiten_evoked(evoked, noise_cov), noise_cov)
if units is None:
units = 'AU'
if scalings is None:
scalings = 1.
if units is None:
units = {k: f'({v})²' for k, v in DEFAULTS['units'].items()}
if scalings is None:
scalings = {k: v * v for k, v in DEFAULTS['scalings'].items()}
return evoked.plot_topomap(
times=[0], ch_type=ch_type, vmin=vmin, vmax=vmax, cmap=cmap,
sensors=sensors, colorbar=colorbar, scalings=scalings,
units=units, res=res, size=size, cbar_fmt=cbar_fmt,
proj=proj, show=show, show_names=show_names, title=title,
mask=mask, mask_params=mask_params, outlines=outlines,
contours=contours, image_interp=image_interp, axes=axes,
extrapolate=extrapolate, sphere=sphere, border=border,
time_format='')
def pick_channels(self, ch_names, ordered=False):
"""Pick channels from this covariance matrix.
Parameters
----------
ch_names : list of str
List of channels to keep. All other channels are dropped.
ordered : bool
If True (default False), ensure that the order of the channels
matches the order of ``ch_names``.
Returns
-------
cov : instance of Covariance.
The modified covariance matrix.
Notes
-----
Operates in-place.
.. versionadded:: 0.20.0
"""
return pick_channels_cov(self, ch_names, exclude=[], ordered=ordered,
copy=False)
###############################################################################
# IO
@verbose
def read_cov(fname, verbose=None):
"""Read a noise covariance from a FIF file.
Parameters
----------
fname : str
The name of file containing the covariance matrix. It should end with
-cov.fif or -cov.fif.gz.
%(verbose)s
Returns
-------
cov : Covariance
The noise covariance matrix.
See Also
--------
write_cov, compute_covariance, compute_raw_covariance
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz',
'_cov.fif', '_cov.fif.gz'))
f, tree = fiff_open(fname)[:2]
with f as fid:
return Covariance(**_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV,
limited=True))
###############################################################################
# Estimate from data
@verbose
def make_ad_hoc_cov(info, std=None, verbose=None):
"""Create an ad hoc noise covariance.
Parameters
----------
info : instance of Info
Measurement info.
std : dict of float | None
Standard_deviation of the diagonal elements. If dict, keys should be
``'grad'`` for gradiometers, ``'mag'`` for magnetometers and ``'eeg'``
for EEG channels. If None, default values will be used (see Notes).
%(verbose)s
Returns
-------
cov : instance of Covariance
The ad hoc diagonal noise covariance for the M/EEG data channels.
Notes
-----
The default noise values are 5 fT/cm, 20 fT, and 0.2 µV for gradiometers,
magnetometers, and EEG channels respectively.
.. versionadded:: 0.9.0
"""
picks = pick_types(info, meg=True, eeg=True, exclude=())
std = _handle_default('noise_std', std)
data = np.zeros(len(picks))
for meg, eeg, val in zip(('grad', 'mag', False), (False, False, True),
(std['grad'], std['mag'], std['eeg'])):
these_picks = pick_types(info, meg=meg, eeg=eeg)
data[np.searchsorted(picks, these_picks)] = val * val
ch_names = [info['ch_names'][pick] for pick in picks]
return Covariance(data, ch_names, info['bads'], info['projs'], nfree=0)
def _check_n_samples(n_samples, n_chan):
"""Check to see if there are enough samples for reliable cov calc."""
n_samples_min = 10 * (n_chan + 1) // 2
if n_samples <= 0:
raise ValueError('No samples found to compute the covariance matrix')
if n_samples < n_samples_min:
warn('Too few samples (required : %d got : %d), covariance '
'estimate may be unreliable' % (n_samples_min, n_samples))
@verbose
def compute_raw_covariance(raw, tmin=0, tmax=None, tstep=0.2, reject=None,
flat=None, picks=None, method='empirical',
method_params=None, cv=3, scalings=None, n_jobs=1,
return_estimators=False, reject_by_annotation=True,
rank=None, verbose=None):
"""Estimate noise covariance matrix from a continuous segment of raw data.
It is typically useful to estimate a noise covariance from empty room
data or time intervals before starting the stimulation.
.. note:: To estimate the noise covariance from epoched data, use
:func:`mne.compute_covariance` instead.
Parameters
----------
raw : instance of Raw
Raw data.
tmin : float
Beginning of time interval in seconds. Defaults to 0.
tmax : float | None (default None)
End of time interval in seconds. If None (default), use the end of the
recording.
tstep : float (default 0.2)
Length of data chunks for artifact rejection in seconds.
Can also be None to use a single epoch of (tmax - tmin)
duration. This can use a lot of memory for large ``Raw``
instances.
reject : dict | None (default None)
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None (default None)
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
%(picks_good_data_noref)s
method : str | list | None (default 'empirical')
The method used for covariance estimation.
See :func:`mne.compute_covariance`.
.. versionadded:: 0.12
method_params : dict | None (default None)
Additional parameters to the estimation procedure.
See :func:`mne.compute_covariance`.
.. versionadded:: 0.12
cv : int | sklearn.model_selection object (default 3)
The cross validation method. Defaults to 3, which will
internally trigger by default :class:`sklearn.model_selection.KFold`
with 3 splits.
.. versionadded:: 0.12
scalings : dict | None (default None)
Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
These defaults will scale magnetometers and gradiometers
at the same unit.
.. versionadded:: 0.12
%(n_jobs)s
.. versionadded:: 0.12
return_estimators : bool (default False)
Whether to return all estimators or the best. Only considered if
method equals 'auto' or is a list of str. Defaults to False.
.. versionadded:: 0.12
%(reject_by_annotation_epochs)s
.. versionadded:: 0.14
%(rank_None)s
.. versionadded:: 0.17
.. versionadded:: 0.18
Support for 'info' mode.
%(verbose)s
Returns
-------
cov : instance of Covariance | list
The computed covariance. If method equals 'auto' or is a list of str
and return_estimators equals True, a list of covariance estimators is
returned (sorted by log-likelihood, from high to low, i.e. from best
to worst).
See Also
--------
compute_covariance : Estimate noise covariance matrix from epoched data.
Notes
-----
This function will:
1. Partition the data into evenly spaced, equal-length epochs.
2. Load them into memory.
3. Subtract the mean across all time points and epochs for each channel.
4. Process the :class:`Epochs` by :func:`compute_covariance`.
This will produce a slightly different result compared to using
:func:`make_fixed_length_events`, :class:`Epochs`, and
:func:`compute_covariance` directly, since that would (with the recommended
baseline correction) subtract the mean across time *for each epoch*
(instead of across epochs) for each channel.
"""
tmin = 0. if tmin is None else float(tmin)
dt = 1. / raw.info['sfreq']
tmax = raw.times[-1] + dt if tmax is None else float(tmax)
tstep = tmax - tmin if tstep is None else float(tstep)
tstep_m1 = tstep - dt # inclusive!
events = make_fixed_length_events(raw, 1, tmin, tmax, tstep)
logger.info('Using up to %s segment%s' % (len(events), _pl(events)))
# don't exclude any bad channels, inverses expect all channels present
if picks is None:
# Need to include all channels e.g. if eog rejection is to be used
picks = np.arange(raw.info['nchan'])
pick_mask = np.in1d(
picks, _pick_data_channels(raw.info, with_ref_meg=False))
else:
pick_mask = slice(None)
picks = _picks_to_idx(raw.info, picks)
epochs = Epochs(raw, events, 1, 0, tstep_m1, baseline=None,
picks=picks, reject=reject, flat=flat, verbose=False,
preload=False, proj=False,
reject_by_annotation=reject_by_annotation)
if method is None:
method = 'empirical'
if isinstance(method, str) and method == 'empirical':
# potentially *much* more memory efficient to do it the iterative way
picks = picks[pick_mask]
data = 0
n_samples = 0
mu = 0
# Read data in chunks
for raw_segment in epochs:
raw_segment = raw_segment[pick_mask]
mu += raw_segment.sum(axis=1)
data += np.dot(raw_segment, raw_segment.T)
n_samples += raw_segment.shape[1]
_check_n_samples(n_samples, len(picks))
data -= mu[:, None] * (mu[None, :] / n_samples)
data /= (n_samples - 1.0)
logger.info("Number of samples used : %d" % n_samples)
logger.info('[done]')
ch_names = [raw.info['ch_names'][k] for k in picks]
bads = [b for b in raw.info['bads'] if b in ch_names]
return Covariance(data, ch_names, bads, raw.info['projs'],
nfree=n_samples - 1)
del picks, pick_mask
# This makes it equivalent to what we used to do (and do above for
# empirical mode), treating all epochs as if they were a single long one
epochs.load_data()
ch_means = epochs._data.mean(axis=0).mean(axis=1)
epochs._data -= ch_means[np.newaxis, :, np.newaxis]
# fake this value so there are no complaints from compute_covariance
epochs.baseline = (None, None)
return compute_covariance(epochs, keep_sample_mean=True, method=method,
method_params=method_params, cv=cv,
scalings=scalings, n_jobs=n_jobs,
return_estimators=return_estimators,
rank=rank)
def _check_method_params(method, method_params, keep_sample_mean=True,
name='method', allow_auto=True, rank=None):
"""Check that method and method_params are usable."""
accepted_methods = ('auto', 'empirical', 'diagonal_fixed', 'ledoit_wolf',
'oas', 'shrunk', 'pca', 'factor_analysis', 'shrinkage')
_method_params = {
'empirical': {'store_precision': False, 'assume_centered': True},
'diagonal_fixed': {'store_precision': False, 'assume_centered': True},
'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
'oas': {'store_precision': False, 'assume_centered': True},
'shrinkage': {'shrinkage': 0.1, 'store_precision': False,
'assume_centered': True},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
'store_precision': False, 'assume_centered': True},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}
}
for ch_type in _DATA_CH_TYPES_SPLIT:
_method_params['diagonal_fixed'][ch_type] = 0.1
if isinstance(method_params, dict):
for key, values in method_params.items():
if key not in _method_params:
raise ValueError('key (%s) must be "%s"' %
(key, '" or "'.join(_method_params)))
_method_params[key].update(method_params[key])
shrinkage = method_params.get('shrinkage', {}).get('shrinkage', 0.1)
if not 0 <= shrinkage <= 1:
raise ValueError('shrinkage must be between 0 and 1, got %s'
% (shrinkage,))
was_auto = False
if method is None:
method = ['empirical']
elif method == 'auto' and allow_auto:
was_auto = True
method = ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
if not isinstance(method, (list, tuple)):
method = [method]
if not all(k in accepted_methods for k in method):
raise ValueError(
'Invalid {name} ({method}). Accepted values (individually or '
'in a list) are any of "{accepted_methods}" or None.'.format(
name=name, method=method, accepted_methods=accepted_methods))
if not (isinstance(rank, str) and rank == 'full'):
if was_auto:
method.pop(method.index('factor_analysis'))
for method_ in method:
if method_ in ('pca', 'factor_analysis'):
raise ValueError('%s can so far only be used with rank="full",'
' got rank=%r' % (method_, rank))
if not keep_sample_mean:
if len(method) != 1 or 'empirical' not in method:
raise ValueError('`keep_sample_mean=False` is only supported'
'with %s="empirical"' % (name,))
for p, v in _method_params.items():
if v.get('assume_centered', None) is False:
raise ValueError('`assume_centered` must be True'
' if `keep_sample_mean` is False')
return method, _method_params
@verbose
def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
projs=None, method='empirical', method_params=None,
cv=3, scalings=None, n_jobs=1, return_estimators=False,
on_mismatch='raise', rank=None, verbose=None):
"""Estimate noise covariance matrix from epochs.
The noise covariance is typically estimated on pre-stimulus periods
when the stimulus onset is defined from events.
If the covariance is computed for multiple event types (events
with different IDs), the following two options can be used and combined:
1. either an Epochs object for each event type is created and
a list of Epochs is passed to this function.
2. an Epochs object is created for multiple events and passed
to this function.
.. note:: To estimate the noise covariance from non-epoched raw data, such
as an empty-room recording, use
:func:`mne.compute_raw_covariance` instead.
Parameters
----------
epochs : instance of Epochs, or list of Epochs
The epochs.
keep_sample_mean : bool (default True)
If False, the average response over epochs is computed for
each event type and subtracted during the covariance
computation. This is useful if the evoked response from a
previous stimulus extends into the baseline period of the next.
Note. This option is only implemented for method='empirical'.
tmin : float | None (default None)
Start time for baseline. If None start at first sample.
tmax : float | None (default None)
End time for baseline. If None end at last sample.
projs : list of Projection | None (default None)
List of projectors to use in covariance calculation, or None
to indicate that the projectors from the epochs should be
inherited. If None, then projectors from all epochs must match.
method : str | list | None (default 'empirical')
The method used for covariance estimation. If 'empirical' (default),
the sample covariance will be computed. A list can be passed to
perform estimates using multiple methods.
If 'auto' or a list of methods, the best estimator will be determined
based on log-likelihood and cross-validation on unseen data as
described in :footcite:`EngemannGramfort2015`. Valid methods are
'empirical', 'diagonal_fixed', 'shrunk', 'oas', 'ledoit_wolf',
'factor_analysis', 'shrinkage', and 'pca' (see Notes). If ``'auto'``,
it expands to::
['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
``'factor_analysis'`` is removed when ``rank`` is not 'full'.
The ``'auto'`` mode is not recommended if there are many
segments of data, since computation can take a long time.
.. versionadded:: 0.9.0
method_params : dict | None (default None)
Additional parameters to the estimation procedure. Only considered if
method is not None. Keys must correspond to the value(s) of ``method``.
If None (default), expands to the following (with the addition of
``{'store_precision': False, 'assume_centered': True} for all methods
except ``'factor_analysis'`` and ``'pca'``)::
{'diagonal_fixed': {'grad': 0.1, 'mag': 0.1, 'eeg': 0.1, ...},
'shrinkage': {'shrikage': 0.1},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30)},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}}
cv : int | sklearn.model_selection object (default 3)
The cross validation method. Defaults to 3, which will
internally trigger by default :class:`sklearn.model_selection.KFold`
with 3 splits.
scalings : dict | None (default None)
Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
These defaults will scale data to roughly the same order of
magnitude.
%(n_jobs)s
return_estimators : bool (default False)
Whether to return all estimators or the best. Only considered if
method equals 'auto' or is a list of str. Defaults to False.
on_mismatch : str
What to do when the MEG<->Head transformations do not match between
epochs. If "raise" (default) an error is raised, if "warn" then a
warning is emitted, if "ignore" then nothing is printed. Having
mismatched transforms can in some cases lead to unexpected or
unstable results in covariance calculation, e.g. when data
have been processed with Maxwell filtering but not transformed
to the same head position.
%(rank_None)s
.. versionadded:: 0.17
.. versionadded:: 0.18
Support for 'info' mode.
%(verbose)s
Returns
-------
cov : instance of Covariance | list
The computed covariance. If method equals 'auto' or is a list of str
and return_estimators equals True, a list of covariance estimators is
returned (sorted by log-likelihood, from high to low, i.e. from best
to worst).
See Also
--------
compute_raw_covariance : Estimate noise covariance from raw data, such as
empty-room recordings.
Notes
-----
Baseline correction or sufficient high-passing should be used
when creating the :class:`Epochs` to ensure that the data are zero mean,
otherwise the computed covariance matrix will be inaccurate.
Valid ``method`` strings are:
* ``'empirical'``
The empirical or sample covariance (default)
* ``'diagonal_fixed'``
A diagonal regularization based on channel types as in
:func:`mne.cov.regularize`.
* ``'shrinkage'``
Fixed shrinkage.
.. versionadded:: 0.16
* ``'ledoit_wolf'``
The Ledoit-Wolf estimator, which uses an
empirical formula for the optimal shrinkage value
:footcite:`LedoitWolf2004`.
* ``'oas'``
The OAS estimator :footcite:`ChenEtAl2010`, which uses a different
empricial formula for the optimal shrinkage value.
.. versionadded:: 0.16
* ``'shrunk'``
Like 'ledoit_wolf', but with cross-validation
for optimal alpha.
* ``'pca'``
Probabilistic PCA with low rank :footcite:`TippingBishop1999`.
* ``'factor_analysis'``
Factor analysis with low rank :footcite:`Barber2012`.
``'ledoit_wolf'`` and ``'pca'`` are similar to ``'shrunk'`` and
``'factor_analysis'``, respectively, except that they use
cross validation (which is useful when samples are correlated, which
is often the case for M/EEG data). The former two are not included in
the ``'auto'`` mode to avoid redundancy.
For multiple event types, it is also possible to create a
single :class:`Epochs` object with events obtained using
:func:`mne.merge_events`. However, the resulting covariance matrix
will only be correct if ``keep_sample_mean is True``.
The covariance can be unstable if the number of samples is small.
In that case it is common to regularize the covariance estimate.
The ``method`` parameter allows to regularize the covariance in an
automated way. It also allows to select between different alternative
estimation algorithms which themselves achieve regularization.
Details are described in :footcite:`EngemannGramfort2015`.
For more information on the advanced estimation methods, see
:ref:`the sklearn manual <sklearn:covariance>`.
References
----------
.. footbibliography::
"""
# scale to natural unit for best stability with MEG/EEG
scalings = _check_scalings_user(scalings)
method, _method_params = _check_method_params(
method, method_params, keep_sample_mean, rank=rank)
del method_params
# for multi condition support epochs is required to refer to a list of
# epochs objects
def _unpack_epochs(epochs):
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
if not isinstance(epochs, list):
epochs = _unpack_epochs(epochs)
else:
epochs = sum([_unpack_epochs(epoch) for epoch in epochs], [])
# check for baseline correction
if any(epochs_t.baseline is None and epochs_t.info['highpass'] < 0.5 and
keep_sample_mean for epochs_t in epochs):
warn('Epochs are not baseline corrected, covariance '
'matrix may be inaccurate')
orig = epochs[0].info['dev_head_t']
_check_on_missing(on_mismatch, 'on_mismatch')
for ei, epoch in enumerate(epochs):
epoch.info._check_consistency()
if (orig is None) != (epoch.info['dev_head_t'] is None) or \
(orig is not None and not
np.allclose(orig['trans'],
epoch.info['dev_head_t']['trans'])):
msg = ('MEG<->Head transform mismatch between epochs[0]:\n%s\n\n'
'and epochs[%s]:\n%s'
% (orig, ei, epoch.info['dev_head_t']))
_on_missing(on_mismatch, msg, 'on_mismatch')
bads = epochs[0].info['bads']
if projs is None:
projs = epochs[0].info['projs']
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.proj != epochs[0].proj:
raise ValueError('Epochs must agree on the use of projections')
for proj_a, proj_b in zip(epochs_t.info['projs'], projs):
if not _proj_equal(proj_a, proj_b):
raise ValueError('Epochs must have same projectors')
projs = _check_projs(projs)
ch_names = epochs[0].ch_names
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.info['bads'] != bads:
raise ValueError('Epochs must have same bad channels')
if epochs_t.ch_names != ch_names:
raise ValueError('Epochs must have same channel names')
picks_list = _picks_by_type(epochs[0].info)
picks_meeg = np.concatenate([b for _, b in picks_list])
picks_meeg = np.sort(picks_meeg)
ch_names = [epochs[0].ch_names[k] for k in picks_meeg]
info = epochs[0].info # we will overwrite 'epochs'
if not keep_sample_mean:
# prepare mean covs
n_epoch_types = len(epochs)
data_mean = [0] * n_epoch_types
n_samples = np.zeros(n_epoch_types, dtype=np.int64)
n_epochs = np.zeros(n_epoch_types, dtype=np.int64)
for ii, epochs_t in enumerate(epochs):
tslice = _get_tslice(epochs_t, tmin, tmax)
for e in epochs_t:
e = e[picks_meeg, tslice]
if not keep_sample_mean:
data_mean[ii] += e
n_samples[ii] += e.shape[1]
n_epochs[ii] += 1
n_samples_epoch = n_samples // n_epochs
norm_const = np.sum(n_samples_epoch * (n_epochs - 1))
data_mean = [1.0 / n_epoch * np.dot(mean, mean.T) for n_epoch, mean
in zip(n_epochs, data_mean)]
info = pick_info(info, picks_meeg)
tslice = _get_tslice(epochs[0], tmin, tmax)
epochs = [ee.get_data(picks=picks_meeg)[..., tslice] for ee in epochs]
picks_meeg = np.arange(len(picks_meeg))
picks_list = _picks_by_type(info)
if len(epochs) > 1:
epochs = np.concatenate(epochs, 0)
else:
epochs = epochs[0]
epochs = np.hstack(epochs)
n_samples_tot = epochs.shape[-1]
_check_n_samples(n_samples_tot, len(picks_meeg))
epochs = epochs.T # sklearn | C-order
cov_data = _compute_covariance_auto(
epochs, method=method, method_params=_method_params, info=info,
cv=cv, n_jobs=n_jobs, stop_early=True, picks_list=picks_list,
scalings=scalings, rank=rank)
if keep_sample_mean is False:
cov = cov_data['empirical']['data']
# undo scaling
cov *= (n_samples_tot - 1)
# ... apply pre-computed class-wise normalization
for mean_cov in data_mean:
cov -= mean_cov
cov /= norm_const
covs = list()
for this_method, data in cov_data.items():
cov = Covariance(data.pop('data'), ch_names, info['bads'], projs,
nfree=n_samples_tot - 1)
# add extra info
cov.update(method=this_method, **data)
covs.append(cov)
logger.info('Number of samples used : %d' % n_samples_tot)
covs.sort(key=lambda c: c['loglik'], reverse=True)
if len(covs) > 1:
msg = ['log-likelihood on unseen data (descending order):']
for c in covs:
msg.append('%s: %0.3f' % (c['method'], c['loglik']))
logger.info('\n '.join(msg))
if return_estimators:
out = covs
else:
out = covs[0]
logger.info('selecting best estimator: {}'.format(out['method']))
else:
out = covs[0]
logger.info('[done]')
return out
def _check_scalings_user(scalings):
if isinstance(scalings, dict):
for k, v in scalings.items():
_check_option('the keys in `scalings`', k, ['mag', 'grad', 'eeg'])
elif scalings is not None and not isinstance(scalings, np.ndarray):
raise TypeError('scalings must be a dict, ndarray, or None, got %s'
% type(scalings))
scalings = _handle_default('scalings', scalings)
return scalings
def _eigvec_subspace(eig, eigvec, mask):
"""Compute the subspace from a subset of eigenvectors."""
# We do the same thing we do with projectors:
P = np.eye(len(eigvec)) - np.dot(eigvec[~mask].conj().T, eigvec[~mask])
eig, eigvec = eigh(P)
eigvec = eigvec.conj().T
return eig, eigvec
def _get_iid_kwargs():
import sklearn
kwargs = dict()
if LooseVersion(sklearn.__version__) < LooseVersion('0.22'):
kwargs['iid'] = False
return kwargs
def _compute_covariance_auto(data, method, info, method_params, cv,
scalings, n_jobs, stop_early, picks_list, rank):
"""Compute covariance auto mode."""
# rescale to improve numerical stability
orig_rank = rank
rank = compute_rank(RawArray(data.T, info, copy=None, verbose=False),
rank, scalings, info)
with _scaled_array(data.T, picks_list, scalings):
C = np.dot(data.T, data)
_, eigvec, mask = _smart_eigh(C, info, rank, proj_subspace=True,
do_compute_rank=False)
eigvec = eigvec[mask]
data = np.dot(data, eigvec.T)
used = np.where(mask)[0]
sub_picks_list = [(key, np.searchsorted(used, picks))
for key, picks in picks_list]
sub_info = pick_info(info, used) if len(used) != len(mask) else info
logger.info('Reducing data rank from %s -> %s'
% (len(mask), eigvec.shape[0]))
estimator_cov_info = list()
msg = 'Estimating covariance using %s'
ok_sklearn = check_version('sklearn')
if not ok_sklearn and (len(method) != 1 or method[0] != 'empirical'):
raise ValueError('scikit-learn is not installed, `method` must be '
'`empirical`, got %s' % (method,))
for method_ in method:
data_ = data.copy()
name = method_.__name__ if callable(method_) else method_
logger.info(msg % name.upper())
mp = method_params[method_]
_info = {}
if method_ == 'empirical':
est = EmpiricalCovariance(**mp)
est.fit(data_)
estimator_cov_info.append((est, est.covariance_, _info))
del est
elif method_ == 'diagonal_fixed':
est = _RegCovariance(info=sub_info, **mp)
est.fit(data_)
estimator_cov_info.append((est, est.covariance_, _info))
del est
elif method_ == 'ledoit_wolf':
from sklearn.covariance import LedoitWolf
shrinkages = []
lw = LedoitWolf(**mp)
for ch_type, picks in sub_picks_list:
lw.fit(data_[:, picks])
shrinkages.append((ch_type, lw.shrinkage_, picks))
sc = _ShrunkCovariance(shrinkage=shrinkages, **mp)
sc.fit(data_)
estimator_cov_info.append((sc, sc.covariance_, _info))
del lw, sc
elif method_ == 'oas':
from sklearn.covariance import OAS
shrinkages = []
oas = OAS(**mp)
for ch_type, picks in sub_picks_list:
oas.fit(data_[:, picks])
shrinkages.append((ch_type, oas.shrinkage_, picks))
sc = _ShrunkCovariance(shrinkage=shrinkages, **mp)
sc.fit(data_)
estimator_cov_info.append((sc, sc.covariance_, _info))
del oas, sc
elif method_ == 'shrinkage':
sc = _ShrunkCovariance(**mp)
sc.fit(data_)
estimator_cov_info.append((sc, sc.covariance_, _info))
del sc
elif method_ == 'shrunk':
from sklearn.model_selection import GridSearchCV
from sklearn.covariance import ShrunkCovariance
shrinkage = mp.pop('shrinkage')
tuned_parameters = [{'shrinkage': shrinkage}]
shrinkages = []
gs = GridSearchCV(ShrunkCovariance(**mp),
tuned_parameters, cv=cv, **_get_iid_kwargs())
for ch_type, picks in sub_picks_list:
gs.fit(data_[:, picks])
shrinkages.append((ch_type, gs.best_estimator_.shrinkage,
picks))
shrinkages = [c[0] for c in zip(shrinkages)]
sc = _ShrunkCovariance(shrinkage=shrinkages, **mp)
sc.fit(data_)
estimator_cov_info.append((sc, sc.covariance_, _info))
del shrinkage, sc
elif method_ == 'pca':
assert orig_rank == 'full'
pca, _info = _auto_low_rank_model(
data_, method_, n_jobs=n_jobs, method_params=mp, cv=cv,
stop_early=stop_early)
pca.fit(data_)
estimator_cov_info.append((pca, pca.get_covariance(), _info))
del pca
elif method_ == 'factor_analysis':
assert orig_rank == 'full'
fa, _info = _auto_low_rank_model(
data_, method_, n_jobs=n_jobs, method_params=mp, cv=cv,
stop_early=stop_early)
fa.fit(data_)
estimator_cov_info.append((fa, fa.get_covariance(), _info))
del fa
else:
raise ValueError('Oh no! Your estimator does not have'
' a .fit method')
logger.info('Done.')
if len(method) > 1:
logger.info('Using cross-validation to select the best estimator.')
out = dict()
for ei, (estimator, cov, runtime_info) in \
enumerate(estimator_cov_info):
if len(method) > 1:
loglik = _cross_val(data, estimator, cv, n_jobs)
else:
loglik = None
# project back
cov = np.dot(eigvec.T, np.dot(cov, eigvec))
# undo bias
cov *= data.shape[0] / (data.shape[0] - 1)
# undo scaling
_undo_scaling_cov(cov, picks_list, scalings)
method_ = method[ei]
name = method_.__name__ if callable(method_) else method_
out[name] = dict(loglik=loglik, data=cov, estimator=estimator)
out[name].update(runtime_info)
return out
def _gaussian_loglik_scorer(est, X, y=None):
"""Compute the Gaussian log likelihood of X under the model in est."""
# compute empirical covariance of the test set
precision = est.get_precision()
n_samples, n_features = X.shape
log_like = -.5 * (X * (np.dot(X, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) - _logdet(precision))
out = np.mean(log_like)
return out
def _cross_val(data, est, cv, n_jobs):
"""Compute cross validation."""
from sklearn.model_selection import cross_val_score
return np.mean(cross_val_score(est, data, cv=cv, n_jobs=n_jobs,
scoring=_gaussian_loglik_scorer))
def _auto_low_rank_model(data, mode, n_jobs, method_params, cv,
stop_early=True, verbose=None):
"""Compute latent variable models."""
method_params = deepcopy(method_params)
iter_n_components = method_params.pop('iter_n_components')
if iter_n_components is None:
iter_n_components = np.arange(5, data.shape[1], 5)
from sklearn.decomposition import PCA, FactorAnalysis
if mode == 'factor_analysis':
est = FactorAnalysis
else:
assert mode == 'pca'
est = PCA
est = est(**method_params)
est.n_components = 1
scores = np.empty_like(iter_n_components, dtype=np.float64)
scores.fill(np.nan)
# make sure we don't empty the thing if it's a generator
max_n = max(list(deepcopy(iter_n_components)))
if max_n > data.shape[1]:
warn('You are trying to estimate %i components on matrix '
'with %i features.' % (max_n, data.shape[1]))
for ii, n in enumerate(iter_n_components):
est.n_components = n
try: # this may fail depending on rank and split
score = _cross_val(data=data, est=est, cv=cv, n_jobs=n_jobs)
except ValueError:
score = np.inf
if np.isinf(score) or score > 0:
logger.info('... infinite values encountered. stopping estimation')
break
logger.info('... rank: %i - loglik: %0.3f' % (n, score))
if score != -np.inf:
scores[ii] = score
if (ii >= 3 and np.all(np.diff(scores[ii - 3:ii]) < 0) and stop_early):
# early stop search when loglik has been going down 3 times
logger.info('early stopping parameter search.')
break
# happens if rank is too low right form the beginning
if np.isnan(scores).all():
raise RuntimeError('Oh no! Could not estimate covariance because all '
'scores were NaN. Please contact the MNE-Python '
'developers.')
i_score = np.nanargmax(scores)
best = est.n_components = iter_n_components[i_score]
logger.info('... best model at rank = %i' % best)
runtime_info = {'ranks': np.array(iter_n_components),
'scores': scores,
'best': best,
'cv': cv}
return est, runtime_info
###############################################################################
# Sklearn Estimators
class _RegCovariance(BaseEstimator):
"""Aux class."""
def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1,
ecog=0.1, hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1,
fnirs_fd_ac_amplitude=0.1, fnirs_fd_phase=0.1, fnirs_od=0.1,
csd=0.1, dbs=0.1, store_precision=False,
assume_centered=False):
self.info = info
# For sklearn compat, these cannot (easily?) be combined into
# a single dictionary
self.grad = grad
self.mag = mag
self.eeg = eeg
self.seeg = seeg
self.dbs = dbs
self.ecog = ecog
self.hbo = hbo
self.hbr = hbr
self.fnirs_cw_amplitude = fnirs_cw_amplitude
self.fnirs_fd_ac_amplitude = fnirs_fd_ac_amplitude
self.fnirs_fd_phase = fnirs_fd_phase
self.fnirs_od = fnirs_od
self.csd = csd
self.store_precision = store_precision
self.assume_centered = assume_centered
def fit(self, X):
"""Fit covariance model with classical diagonal regularization."""
self.estimator_ = EmpiricalCovariance(
store_precision=self.store_precision,
assume_centered=self.assume_centered)
self.covariance_ = self.estimator_.fit(X).covariance_
self.covariance_ = 0.5 * (self.covariance_ + self.covariance_.T)
cov_ = Covariance(
data=self.covariance_, names=self.info['ch_names'],
bads=self.info['bads'], projs=self.info['projs'],
nfree=len(self.covariance_))
cov_ = regularize(
cov_, self.info, proj=False, exclude='bads',
grad=self.grad, mag=self.mag, eeg=self.eeg,
ecog=self.ecog, seeg=self.seeg, dbs=self.dbs,
hbo=self.hbo, hbr=self.hbr, rank='full')
self.estimator_.covariance_ = self.covariance_ = cov_.data
return self
def score(self, X_test, y=None):
"""Delegate call to modified EmpiricalCovariance instance."""
return self.estimator_.score(X_test, y=y)
def get_precision(self):
"""Delegate call to modified EmpiricalCovariance instance."""
return self.estimator_.get_precision()
class _ShrunkCovariance(BaseEstimator):
"""Aux class."""
def __init__(self, store_precision, assume_centered,
shrinkage=0.1):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.shrinkage = shrinkage
def fit(self, X):
"""Fit covariance model with oracle shrinkage regularization."""
from sklearn.covariance import shrunk_covariance
self.estimator_ = EmpiricalCovariance(
store_precision=self.store_precision,
assume_centered=self.assume_centered)
cov = self.estimator_.fit(X).covariance_
if not isinstance(self.shrinkage, (list, tuple)):
shrinkage = [('all', self.shrinkage, np.arange(len(cov)))]
else:
shrinkage = self.shrinkage
zero_cross_cov = np.zeros_like(cov, dtype=bool)
for a, b in itt.combinations(shrinkage, 2):
picks_i, picks_j = a[2], b[2]
ch_ = a[0], b[0]
if 'eeg' in ch_:
zero_cross_cov[np.ix_(picks_i, picks_j)] = True
zero_cross_cov[np.ix_(picks_j, picks_i)] = True
self.zero_cross_cov_ = zero_cross_cov
# Apply shrinkage to blocks
for ch_type, c, picks in shrinkage:
sub_cov = cov[np.ix_(picks, picks)]
cov[np.ix_(picks, picks)] = shrunk_covariance(sub_cov,
shrinkage=c)
# Apply shrinkage to cross-cov
for a, b in itt.combinations(shrinkage, 2):
shrinkage_i, shrinkage_j = a[1], b[1]
picks_i, picks_j = a[2], b[2]
c_ij = np.sqrt((1. - shrinkage_i) * (1. - shrinkage_j))
cov[np.ix_(picks_i, picks_j)] *= c_ij
cov[np.ix_(picks_j, picks_i)] *= c_ij
# Set to zero the necessary cross-cov
if np.any(zero_cross_cov):
cov[zero_cross_cov] = 0.0
self.estimator_.covariance_ = self.covariance_ = cov
return self
def score(self, X_test, y=None):
"""Delegate to modified EmpiricalCovariance instance."""
# compute empirical covariance of the test set
test_cov = empirical_covariance(X_test - self.estimator_.location_,
assume_centered=True)
if np.any(self.zero_cross_cov_):
test_cov[self.zero_cross_cov_] = 0.
res = log_likelihood(test_cov, self.estimator_.get_precision())
return res
def get_precision(self):
"""Delegate to modified EmpiricalCovariance instance."""
return self.estimator_.get_precision()
###############################################################################
# Writing
def write_cov(fname, cov):
"""Write a noise covariance matrix.
Parameters
----------
fname : str
The name of the file. It should end with -cov.fif or -cov.fif.gz.
cov : Covariance
The noise covariance matrix.
See Also
--------
read_cov
"""
cov.save(fname)
###############################################################################
# Prepare for inverse modeling
def _unpack_epochs(epochs):
"""Aux Function."""
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
def _get_ch_whitener(A, pca, ch_type, rank):
"""Get whitener params for a set of channels."""
# whitening operator
eig, eigvec = eigh(A, overwrite_a=True)
eigvec = eigvec.conj().T
mask = np.ones(len(eig), bool)
eig[:-rank] = 0.0
mask[:-rank] = False
logger.info(' Setting small %s eigenvalues to zero (%s)'
% (ch_type, 'using PCA' if pca else 'without PCA'))
if pca: # No PCA case.
# This line will reduce the actual number of variables in data
# and leadfield to the true rank.
eigvec = eigvec[:-rank].copy()
return eig, eigvec, mask
@verbose
def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None,
scalings=None, on_rank_mismatch='ignore', verbose=None):
"""Prepare noise covariance matrix.
Parameters
----------
noise_cov : instance of Covariance
The noise covariance to process.
info : dict
The measurement info (used to get channel types and bad channels).
ch_names : list | None
The channel names to be considered. Can be None to use
``info['ch_names']``.
%(rank_None)s
.. versionadded:: 0.18
Support for 'info' mode.
scalings : dict | None
Data will be rescaled before rank estimation to improve accuracy.
If dict, it will override the following dict (default if None)::
dict(mag=1e12, grad=1e11, eeg=1e5)
%(on_rank_mismatch)s
%(verbose)s
Returns
-------
cov : instance of Covariance
A copy of the covariance with the good channels subselected
and parameters updated.
"""
# reorder C and info to match ch_names order
noise_cov_idx = list()
missing = list()
ch_names = info['ch_names'] if ch_names is None else ch_names
for c in ch_names:
# this could be try/except ValueError, but it is not the preferred way
if c in noise_cov.ch_names:
noise_cov_idx.append(noise_cov.ch_names.index(c))
else:
missing.append(c)
if len(missing):
raise RuntimeError('Not all channels present in noise covariance:\n%s'
% missing)
C = noise_cov._get_square()[np.ix_(noise_cov_idx, noise_cov_idx)]
info = pick_info(info, pick_channels(info['ch_names'], ch_names))
projs = info['projs'] + noise_cov['projs']
noise_cov = Covariance(
data=C, names=ch_names, bads=list(noise_cov['bads']),
projs=deepcopy(noise_cov['projs']), nfree=noise_cov['nfree'],
method=noise_cov.get('method', None),
loglik=noise_cov.get('loglik', None))
eig, eigvec, _ = _smart_eigh(noise_cov, info, rank, scalings, projs,
ch_names, on_rank_mismatch=on_rank_mismatch)
noise_cov.update(eig=eig, eigvec=eigvec)
return noise_cov
@verbose
def _smart_eigh(C, info, rank, scalings=None, projs=None,
ch_names=None, proj_subspace=False, do_compute_rank=True,
on_rank_mismatch='ignore', verbose=None):
"""Compute eigh of C taking into account rank and ch_type scalings."""
scalings = _handle_default('scalings_cov_rank', scalings)
projs = info['projs'] if projs is None else projs
ch_names = info['ch_names'] if ch_names is None else ch_names
if info['ch_names'] != ch_names:
info = pick_info(info, [info['ch_names'].index(c) for c in ch_names])
assert info['ch_names'] == ch_names
n_chan = len(ch_names)
# Create the projection operator
proj, ncomp, _ = make_projector(projs, ch_names)
if isinstance(C, Covariance):
C = C['data']
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension = %d)'
% ncomp)
C = np.dot(proj, np.dot(C, proj.T))
noise_cov = Covariance(C, ch_names, [], projs, 0)
if do_compute_rank: # if necessary
rank = compute_rank(
noise_cov, rank, scalings, info, on_rank_mismatch=on_rank_mismatch)
assert C.ndim == 2 and C.shape[0] == C.shape[1]
# time saving short-circuit
if proj_subspace and sum(rank.values()) == C.shape[0]:
return np.ones(n_chan), np.eye(n_chan), np.ones(n_chan, bool)
dtype = complex if C.dtype == np.complex_ else float
eig = np.zeros(n_chan, dtype)
eigvec = np.zeros((n_chan, n_chan), dtype)
mask = np.zeros(n_chan, bool)
for ch_type, picks in _picks_by_type(info, meg_combined=True,
ref_meg=False, exclude='bads'):
if len(picks) == 0:
continue
this_C = C[np.ix_(picks, picks)]
if ch_type not in rank and ch_type in ('mag', 'grad'):
this_rank = rank['meg'] # if there is only one or the other
else:
this_rank = rank[ch_type]
e, ev, m = _get_ch_whitener(this_C, False, ch_type.upper(), this_rank)
if proj_subspace:
# Choose the subspace the same way we do for projections
e, ev = _eigvec_subspace(e, ev, m)
eig[picks], eigvec[np.ix_(picks, picks)], mask[picks] = e, ev, m
# XXX : also handle ref for sEEG and ECoG
if ch_type == 'eeg' and _needs_eeg_average_ref_proj(info) and not \
_has_eeg_average_ref_proj(projs):
warn('No average EEG reference present in info["projs"], '
'covariance may be adversely affected. Consider recomputing '
'covariance using with an average eeg reference projector '
'added.')
return eig, eigvec, mask
@verbose
def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads',
proj=True, seeg=0.1, ecog=0.1, hbo=0.1, hbr=0.1,
fnirs_cw_amplitude=0.1, fnirs_fd_ac_amplitude=0.1,
fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, dbs=0.1,
rank=None, scalings=None, verbose=None):
"""Regularize noise covariance matrix.
This method works by adding a constant to the diagonal for each
channel type separately. Special care is taken to keep the
rank of the data constant.
.. note:: This function is kept for reasons of backward-compatibility.
Please consider explicitly using the ``method`` parameter in
:func:`mne.compute_covariance` to directly combine estimation
with regularization in a data-driven fashion. See the `faq
<http://mne.tools/dev/overview/faq.html#how-should-i-regularize-the-covariance-matrix>`_
for more information.
Parameters
----------
cov : Covariance
The noise covariance matrix.
info : dict
The measurement info (used to get channel types and bad channels).
mag : float (default 0.1)
Regularization factor for MEG magnetometers.
grad : float (default 0.1)
Regularization factor for MEG gradiometers. Must be the same as
``mag`` if data have been processed with SSS.
eeg : float (default 0.1)
Regularization factor for EEG.
exclude : list | 'bads' (default 'bads')
List of channels to mark as bad. If 'bads', bads channels
are extracted from both info['bads'] and cov['bads'].
proj : bool (default True)
Apply projections to keep rank of data.
seeg : float (default 0.1)
Regularization factor for sEEG signals.
ecog : float (default 0.1)
Regularization factor for ECoG signals.
hbo : float (default 0.1)
Regularization factor for HBO signals.
hbr : float (default 0.1)
Regularization factor for HBR signals.
fnirs_cw_amplitude : float (default 0.1)
Regularization factor for fNIRS CW raw signals.
fnirs_fd_ac_amplitude : float (default 0.1)
Regularization factor for fNIRS FD AC raw signals.
fnirs_fd_phase : float (default 0.1)
Regularization factor for fNIRS raw phase signals.
fnirs_od : float (default 0.1)
Regularization factor for fNIRS optical density signals.
csd : float (default 0.1)
Regularization factor for EEG-CSD signals.
dbs : float (default 0.1)
Regularization factor for DBS signals.
%(rank_None)s
.. versionadded:: 0.17
.. versionadded:: 0.18
Support for 'info' mode.
scalings : dict | None
Data will be rescaled before rank estimation to improve accuracy.
See :func:`mne.compute_covariance`.
.. versionadded:: 0.17
%(verbose)s
Returns
-------
reg_cov : Covariance
The regularized covariance matrix.
See Also
--------
mne.compute_covariance
""" # noqa: E501
from scipy import linalg
cov = cov.copy()
info._check_consistency()
scalings = _handle_default('scalings_cov_rank', scalings)
regs = dict(eeg=eeg, seeg=seeg, dbs=dbs, ecog=ecog, hbo=hbo, hbr=hbr,
fnirs_cw_amplitude=fnirs_cw_amplitude,
fnirs_fd_ac_amplitude=fnirs_fd_ac_amplitude,
fnirs_fd_phase=fnirs_fd_phase, fnirs_od=fnirs_od, csd=csd)
if exclude is None:
raise ValueError('exclude must be a list of strings or "bads"')
if exclude == 'bads':
exclude = info['bads'] + cov['bads']
picks_dict = {ch_type: [] for ch_type in _DATA_CH_TYPES_SPLIT}
meg_combined = 'auto' if rank != 'full' else False
picks_dict.update(dict(_picks_by_type(
info, meg_combined=meg_combined, exclude=exclude, ref_meg=False)))
if len(picks_dict.get('meg', [])) > 0 and rank != 'full': # combined
if mag != grad:
raise ValueError('On data where magnetometers and gradiometers '
'are dependent (e.g., SSSed data), mag (%s) must '
'equal grad (%s)' % (mag, grad))
logger.info('Regularizing MEG channels jointly')
regs['meg'] = mag
else:
regs.update(mag=mag, grad=grad)
if rank != 'full':
rank = compute_rank(cov, rank, scalings, info)
info_ch_names = info['ch_names']
ch_names_by_type = dict()
for ch_type, picks_type in picks_dict.items():
ch_names_by_type[ch_type] = [info_ch_names[i] for i in picks_type]
# This actually removes bad channels from the cov, which is not backward
# compatible, so let's leave all channels in
cov_good = pick_channels_cov(cov, include=info_ch_names, exclude=exclude)
ch_names = cov_good.ch_names
# Now get the indices for each channel type in the cov
idx_cov = {ch_type: [] for ch_type in ch_names_by_type}
for i, ch in enumerate(ch_names):
for ch_type in ch_names_by_type:
if ch in ch_names_by_type[ch_type]:
idx_cov[ch_type].append(i)
break
else:
raise Exception('channel %s is unknown type' % ch)
C = cov_good['data']
assert len(C) == sum(map(len, idx_cov.values()))
if proj:
projs = info['projs'] + cov_good['projs']
projs = activate_proj(projs)
for ch_type in idx_cov:
desc = ch_type.upper()
idx = idx_cov[ch_type]
if len(idx) == 0:
continue
reg = regs[ch_type]
if reg == 0.0:
logger.info(" %s regularization : None" % desc)
continue
logger.info(" %s regularization : %s" % (desc, reg))
this_C = C[np.ix_(idx, idx)]
U = np.eye(this_C.shape[0])
this_ch_names = [ch_names[k] for k in idx]
if rank == 'full':
if proj:
P, ncomp, _ = make_projector(projs, this_ch_names)
if ncomp > 0:
# This adjustment ends up being redundant if rank is None:
U = linalg.svd(P)[0][:, :-ncomp]
logger.info(' Created an SSP operator for %s '
'(dimension = %d)' % (desc, ncomp))
else:
this_picks = pick_channels(info['ch_names'], this_ch_names)
this_info = pick_info(info, this_picks)
# Here we could use proj_subspace=True, but this should not matter
# since this is already in a loop over channel types
_, eigvec, mask = _smart_eigh(this_C, this_info, rank)
U = eigvec[mask].T
this_C = np.dot(U.T, np.dot(this_C, U))
sigma = np.mean(np.diag(this_C))
this_C.flat[::len(this_C) + 1] += reg * sigma # modify diag inplace
this_C = np.dot(U, np.dot(this_C, U.T))
C[np.ix_(idx, idx)] = this_C
# Put data back in correct locations
idx = pick_channels(cov.ch_names, info_ch_names, exclude=exclude)
cov['data'][np.ix_(idx, idx)] = C
return cov
def _regularized_covariance(data, reg=None, method_params=None, info=None,
rank=None):
"""Compute a regularized covariance from data using sklearn.
This is a convenience wrapper for mne.decoding functions, which
adopted a slightly different covariance API.
Returns
-------
cov : ndarray, shape (n_channels, n_channels)
The covariance matrix.
"""
_validate_type(reg, (str, 'numeric', None))
if reg is None:
reg = 'empirical'
elif not isinstance(reg, str):
reg = float(reg)
if method_params is not None:
raise ValueError('If reg is a float, method_params must be None '
'(got %s)' % (type(method_params),))
method_params = dict(shrinkage=dict(
shrinkage=reg, assume_centered=True, store_precision=False))
reg = 'shrinkage'
method, method_params = _check_method_params(
reg, method_params, name='reg', allow_auto=False, rank=rank)
# use mag instead of eeg here to avoid the cov EEG projection warning
info = create_info(data.shape[-2], 1000., 'mag') if info is None else info
picks_list = _picks_by_type(info)
scalings = _handle_default('scalings_cov_rank', None)
cov = _compute_covariance_auto(
data.T, method=method, method_params=method_params,
info=info, cv=None, n_jobs=1, stop_early=True,
picks_list=picks_list, scalings=scalings,
rank=rank)[reg]['data']
return cov
@verbose
def compute_whitener(noise_cov, info=None, picks=None, rank=None,
scalings=None, return_rank=False, pca=False,
return_colorer=False, on_rank_mismatch='warn',
verbose=None):
"""Compute whitening matrix.
Parameters
----------
noise_cov : Covariance
The noise covariance.
info : dict | None
The measurement info. Can be None if ``noise_cov`` has already been
prepared with :func:`prepare_noise_cov`.
%(picks_good_data_noref)s
%(rank_None)s
.. versionadded:: 0.18
Support for 'info' mode.
scalings : dict | None
The rescaling method to be applied. See documentation of
``prepare_noise_cov`` for details.
return_rank : bool
If True, return the rank used to compute the whitener.
.. versionadded:: 0.15
pca : bool | str
Space to project the data into. Options:
:data:`python:True`
Whitener will be shape (n_nonzero, n_channels).
``'white'``
Whitener will be shape (n_channels, n_channels), potentially rank
deficient, and have the first ``n_channels - n_nonzero`` rows and
columns set to zero.
:data:`python:False` (default)
Whitener will be shape (n_channels, n_channels), potentially rank
deficient, and rotated back to the space of the original data.
.. versionadded:: 0.18
return_colorer : bool
If True, return the colorer as well.
%(on_rank_mismatch)s
%(verbose)s
Returns
-------
W : ndarray, shape (n_channels, n_channels) or (n_nonzero, n_channels)
The whitening matrix.
ch_names : list
The channel names.
rank : int
Rank reduction of the whitener. Returned only if return_rank is True.
colorer : ndarray, shape (n_channels, n_channels) or (n_channels, n_nonzero)
The coloring matrix.
""" # noqa: E501
_validate_type(pca, (str, bool), 'space')
_valid_pcas = (True, 'white', False)
if pca not in _valid_pcas:
raise ValueError('space must be one of %s, got %s'
% (_valid_pcas, pca))
if info is None:
if 'eig' not in noise_cov:
raise ValueError('info can only be None if the noise cov has '
'already been prepared with prepare_noise_cov')
ch_names = deepcopy(noise_cov['names'])
else:
picks = _picks_to_idx(info, picks, with_ref_meg=False)
ch_names = [info['ch_names'][k] for k in picks]
del picks
noise_cov = prepare_noise_cov(
noise_cov, info, ch_names, rank, scalings,
on_rank_mismatch=on_rank_mismatch)
n_chan = len(ch_names)
assert n_chan == len(noise_cov['eig'])
# Omit the zeroes due to projection
eig = noise_cov['eig'].copy()
nzero = (eig > 0)
eig[~nzero] = 0. # get rid of numerical noise (negative) ones
if noise_cov['eigvec'].dtype.kind == 'c':
dtype = np.complex128
else:
dtype = np.float64
W = np.zeros((n_chan, 1), dtype)
W[nzero, 0] = 1.0 / np.sqrt(eig[nzero])
# Rows of eigvec are the eigenvectors
W = W * noise_cov['eigvec'] # C ** -0.5
C = np.sqrt(eig) * noise_cov['eigvec'].conj().T # C ** 0.5
n_nzero = nzero.sum()
logger.info(' Created the whitener using a noise covariance matrix '
'with rank %d (%d small eigenvalues omitted)'
% (n_nzero, noise_cov['dim'] - n_nzero))
# Do the requested projection
if pca is True:
W = W[nzero]
C = C[:, nzero]
elif pca is False:
W = np.dot(noise_cov['eigvec'].conj().T, W)
C = np.dot(C, noise_cov['eigvec'])
# Triage return
out = W, ch_names
if return_rank:
out += (n_nzero,)
if return_colorer:
out += (C,)
return out
@verbose
def whiten_evoked(evoked, noise_cov, picks=None, diag=None, rank=None,
scalings=None, verbose=None):
"""Whiten evoked data using given noise covariance.
Parameters
----------
evoked : instance of Evoked
The evoked data.
noise_cov : instance of Covariance
The noise covariance.
%(picks_good_data)s
diag : bool (default False)
If True, whiten using only the diagonal of the covariance.
%(rank_None)s
.. versionadded:: 0.18
Support for 'info' mode.
scalings : dict | None (default None)
To achieve reliable rank estimation on multiple sensors,
sensors have to be rescaled. This parameter controls the
rescaling. If dict, it will override the
following default dict (default if None):
dict(mag=1e12, grad=1e11, eeg=1e5)
%(verbose)s
Returns
-------
evoked_white : instance of Evoked
The whitened evoked data.
"""
evoked = evoked.copy()
picks = _picks_to_idx(evoked.info, picks)
if diag:
noise_cov = noise_cov.as_diag()
W, _ = compute_whitener(noise_cov, evoked.info, picks=picks,
rank=rank, scalings=scalings)
evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks])
return evoked
@verbose
def _read_cov(fid, node, cov_kind, limited=False, verbose=None):
"""Read a noise covariance matrix."""
# Find all covariance matrices
from scipy import sparse
covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV)
if len(covs) == 0:
raise ValueError('No covariance matrices found')
# Is any of the covariance matrices a noise covariance
for p in range(len(covs)):
tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND)
if tag is not None and int(tag.data) == cov_kind:
this = covs[p]
# Find all the necessary data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM)
if tag is None:
raise ValueError('Covariance matrix dimension not found')
dim = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE)
if tag is None:
nfree = -1
else:
nfree = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_METHOD)
if tag is None:
method = None
else:
method = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_SCORE)
if tag is None:
score = None
else:
score = tag.data[0]
tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES)
if tag is None:
names = []
else:
names = tag.data.split(':')
if len(names) != dim:
raise ValueError('Number of names does not match '
'covariance matrix dimension')
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG)
if tag is None:
raise ValueError('No covariance matrix data found')
else:
# Diagonal is stored
data = tag.data
diag = True
logger.info(' %d x %d diagonal covariance (kind = '
'%d) found.' % (dim, dim, cov_kind))
else:
if not sparse.issparse(tag.data):
# Lower diagonal is stored
vals = tag.data
data = np.zeros((dim, dim))
data[np.tril(np.ones((dim, dim))) > 0] = vals
data = data + data.T
data.flat[::dim + 1] /= 2.0
diag = False
logger.info(' %d x %d full covariance (kind = %d) '
'found.' % (dim, dim, cov_kind))
else:
diag = False
data = tag.data
logger.info(' %d x %d sparse covariance (kind = %d)'
' found.' % (dim, dim, cov_kind))
# Read the possibly precomputed decomposition
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS)
if tag1 is not None and tag2 is not None:
eig = tag1.data
eigvec = tag2.data
else:
eig = None
eigvec = None
# Read the projection operator
projs = _read_proj(fid, this)
# Read the bad channel list
bads = _read_bad_channels(fid, this, None)
# Put it together
assert dim == len(data)
assert data.ndim == (1 if diag else 2)
cov = dict(kind=cov_kind, diag=diag, dim=dim, names=names,
data=data, projs=projs, bads=bads, nfree=nfree, eig=eig,
eigvec=eigvec)
if score is not None:
cov['loglik'] = score
if method is not None:
cov['method'] = method
if limited:
del cov['kind'], cov['dim'], cov['diag']
return cov
logger.info(' Did not find the desired covariance matrix (kind = %d)'
% cov_kind)
return None
def _write_cov(fid, cov):
"""Write a noise covariance matrix."""
start_block(fid, FIFF.FIFFB_MNE_COV)
# Dimensions etc.
write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind'])
write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim'])
if cov['nfree'] > 0:
write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree'])
# Channel names
if cov['names'] is not None and len(cov['names']) > 0:
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names'])
# Data
if cov['diag']:
write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data'])
else:
# Store only lower part of covariance matrix
dim = cov['dim']
mask = np.tril(np.ones((dim, dim), dtype=bool)) > 0
vals = cov['data'][mask].ravel()
write_double(fid, FIFF.FIFF_MNE_COV, vals)
# Eigenvalues and vectors if present
if cov['eig'] is not None and cov['eigvec'] is not None:
write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec'])
write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig'])
# Projection operator
if cov['projs'] is not None and len(cov['projs']) > 0:
_write_proj(fid, cov['projs'])
# Bad channels
if cov['bads'] is not None and len(cov['bads']) > 0:
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
# estimator method
if 'method' in cov:
write_string(fid, FIFF.FIFF_MNE_COV_METHOD, cov['method'])
# negative log-likelihood score
if 'loglik' in cov:
write_double(
fid, FIFF.FIFF_MNE_COV_SCORE, np.array(cov['loglik']))
# Done!
end_block(fid, FIFF.FIFFB_MNE_COV)
| bsd-3-clause |
vigilv/scikit-learn | sklearn/__init__.py | 59 | 3038 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
MaterialsDiscovery/PyChemia | setup.py | 1 | 6127 | import os
import json
import subprocess
from setuptools import setup, find_packages, Extension
from distutils.command.sdist import sdist as _sdist
import pathlib
try:
from Cython.Build import cythonize
from Cython.Distutils import build_ext
except ImportError:
USE_CYTHON = False
else:
USE_CYTHON = True
# Return the git revision as a string
# Copied from scipy's setup.py
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def get_version_info():
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of scipy.version messes
# up the build under Python 3.
basepath=pathlib.Path(__file__).parent.absolute()
print(basepath)
rf = open(str(basepath)+os.sep+'setup.json')
release_data = json.load(rf)
rf.close()
FULLVERSION = release_data['version']
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('scipy/version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load scipy/__init__.py
import runpy
ns = runpy.run_path('pychemia/version.py')
GIT_REVISION = ns['git_revision']
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return release_data, FULLVERSION, GIT_REVISION
def write_version_py(filename='pychemia/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM PYCHEMIA SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
name = '%(name)s'
description = '%(description)s'
url = '%(url)s'
author = '%(author)s'
email = '%(email)s'
status = '%(status)s'
copyright = '%(copyright)s'
date = '%(date)s'
release = %(isrelease)s
if not release:
version = full_version
"""
release_data, FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'version': release_data['version'],
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'name': release_data['name'],
'description': release_data['description'],
'url': release_data['url'],
'author': release_data['author'],
'email': release_data['email'],
'status': release_data['status'],
'copyright': release_data['copyright'],
'date': release_data['date'],
'isrelease': str(ISRELEASED)})
finally:
a.close()
return release_data
def get_scripts():
return ['scripts' + os.sep + x for x in os.listdir('scripts') if x[-3:] == '.py']
###################################################################
ISRELEASED = False
KEYWORDS = ["electronic", "structure", "analysis", "materials", "discovery", "metaheuristics"]
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: POSIX",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Scientific/Engineering :: Physics",
]
INSTALL_REQUIRES = ['numpy >= 1.19',
'scipy >= 1.5',
'spglib >= 1.9',
'pymongo >= 3.11',
'matplotlib >= 3.3',
'psutil >= 5.8']
###################################################################
print('Using Cython: %s' % USE_CYTHON)
data = write_version_py()
cmdclass = {}
ext = '.pyx' if USE_CYTHON else '.c'
class sdist(_sdist):
def run(self):
# Make sure the compiled Cython files in the distribution are
# up-to-date
from Cython.Build import cythonize
cythonize(ext_modules, annotate=True, compiler_directives={'embedsignature': True})
_sdist.run(self)
cmdclass['sdist'] = sdist
ext_modules = [Extension("pychemia.code.lennardjones.lj_utils", ['pychemia/code/lennardjones/lj_utils' + ext])]
if USE_CYTHON:
cmdclass.update({'build_ext': build_ext})
from Cython.Build import cythonize
ext_modules = cythonize([Extension("pychemia.code.lennardjones.lj_utils", ['pychemia/code/lennardjones/lj_utils' + ext])])
setup(
name=data['name'],
version=data['version'],
author=data['author'],
author_email=data['email'],
packages=find_packages(exclude=['scripts', 'docs', 'tests']),
url=data['url'],
license='LICENSE.txt',
description=data['description'],
long_description=open('README').read(),
install_requires=INSTALL_REQUIRES,
keywords=KEYWORDS,
classifiers=CLASSIFIERS,
python_requires='>=3.6, <4',
package_data={'': ['setup.json']},
scripts=get_scripts(),
cmdclass=cmdclass,
ext_modules=ext_modules
# ext_modules=cythonize(ext_modules, annotate=True, compiler_directives={'embedsignature': True})
)
# ext_modules=cythonize(Extension('pychemia.code.lennardjones.hello',
# ['pychemia/code/lennardjones/lj_utils.pyx'],
# language='c',
# extra_compile_args='-march=native'))
| mit |
Karel-van-de-Plassche/QLKNN-develop | qlknn/plots/comparison/topology.py | 1 | 2645 | from IPython import embed
import numpy as np
import scipy.stats as stats
import pandas as pd
import os
import sys
networks_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../networks'))
NNDB_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../NNDB'))
training_path = os.path.abspath(os.path.join((os.path.abspath(__file__)), '../../training'))
sys.path.append(networks_path)
sys.path.append(NNDB_path)
sys.path.append(training_path)
from model import Network, NetworkJSON, Hyperparameters
from run_model import QuaLiKizNDNN
from train_NDNN import shuffle_panda
from peewee import Param, Passthrough
import matplotlib.pyplot as plt
from matplotlib import gridspec
from load_data import load_data, load_nn
def find_similar_topology(network_id):
#query = Network.find_similar_topology_by_id(network_id)
query = Network.find_similar_networkpar_by_id(network_id)
query &= Network.find_similar_trainingpar_by_id(network_id)
train_dim, hidden_neurons, hidden_activation, output_activation, filter_id = (
Network
.select(Network.target_names,
Hyperparameters.hidden_neurons,
Hyperparameters.hidden_activation,
Hyperparameters.output_activation,
Network.filter_id)
.where(Network.id == network_id)
.join(Hyperparameters)
).tuples().get()
query &= (Network.select()
.where(Network.target_names == Param(train_dim))
#.where(Hyperparameters.hidden_neurons == hidden_neurons)
#.where(Hyperparameters.hidden_activation == Param(hidden_activation))
#.where(Hyperparameters.output_activation == output_activation)
.join(Hyperparameters)
)
df = []
for res in query:
df.append((res.id, res.hyperparameters.get().hidden_neurons, res.network_metadata.get().rms_test))
df = pd.DataFrame(df, columns=['id', 'topo', 'rms_test'])
df['topo'] = df['topo'].apply(tuple)
df.sort_values(['topo', 'rms_test'], inplace = True)
df_trim = pd.DataFrame(columns=['id', 'topo', 'rms_test'])
for index, row in df.iterrows():
df_best = df.iloc[df.loc[(df['topo'] == row['topo'])].index[0]]
df_best = df.loc[df.loc[(df['topo'] == row['topo'])].index[0]]
if ~(df_best['topo'] == df_trim['topo']).any():
df_trim = df_trim.append(df_best)
labels = [(line[0], '$topo = ' + str(line[1]) + '$') for line in df_trim[['id', 'topo']].values]
print('nn_list = OrderedDict([', end='')
print(*labels, sep=',\n', end='')
print('])')
embed()
find_similar_topology(37)
| mit |