repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mcdeaton13/dynamic | Python/dynamic/TPI.py | 2 | 23527 | '''
------------------------------------------------------------------------
Last updated 7/19/2015
This program solves for transition path of the distribution of wealth
and the aggregate capital stock using the time path iteration (TPI)
method, where labor in inelastically supplied.
This py-file calls the following other file(s):
tax.py
utils.py
household.py
firm.py
OUTPUT/SSinit/ss_init_vars.pkl
OUTPUT/SS/ss_vars.pkl
OUTPUT/SSinit/ss_init_tpi.pkl
OUTPUT/Saved_moments/params_given.pkl
OUTPUT/Saved_moments/params_changed.pkl
This py-file creates the following other file(s):
(make sure that an OUTPUT folder exists)
OUTPUT/TPIinit/TPIinit_vars.pkl
OUTPUT/TPI/TPI_vars.pkl
------------------------------------------------------------------------
'''
# Packages
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import cPickle as pickle
import scipy.optimize as opt
import tax
import utils
import household
import firm
'''
------------------------------------------------------------------------
Import steady state distribution, parameters and other objects from
steady state computation in ss_vars.pkl
------------------------------------------------------------------------
'''
from .parameters import get_parameters
globals().update(get_parameters())
def create_tpi_params(a_tax_income, b_tax_income, c_tax_income,
d_tax_income,
b_ellipse, upsilon, J, S, T, beta, sigma, alpha, Z,
delta, ltilde, nu, g_y, tau_payroll, retire,
mean_income_data, get_baseline=True, **kwargs):
#variables = pickle.load(open("OUTPUT/Saved_moments/params_given.pkl", "rb"))
#for key in variables:
# globals()[key] = variables[key]
if get_baseline:
variables = pickle.load(open("OUTPUT/SSinit/ss_init_vars.pkl", "rb"))
for key in variables:
globals()[key] = variables[key]
else:
variables = pickle.load(open("OUTPUT/Saved_moments/params_changed.pkl", "rb"))
for key in variables:
globals()[key] = variables[key]
variables = pickle.load(open("OUTPUT/SS/ss_vars.pkl", "rb"))
for key in variables:
globals()[key] = variables[key]
variables = pickle.load(open("OUTPUT/SSinit/ss_init_tpi_vars.pkl", "rb"))
for key in variables:
globals()[key] = variables[key]
'''
------------------------------------------------------------------------
Set other parameters and initial values
------------------------------------------------------------------------
'''
# Make a vector of all one dimensional parameters, to be used in the following functions
income_tax_params = [a_tax_income, b_tax_income, c_tax_income, d_tax_income]
wealth_tax_params = [h_wealth, p_wealth, m_wealth]
ellipse_params = [b_ellipse, upsilon]
parameters = [J, S, T, beta, sigma, alpha, Z, delta, ltilde, nu, g_y, g_n_ss, tau_payroll, retire, \
mean_income_data] + income_tax_params + wealth_tax_params + ellipse_params
N_tilde = omega.sum(1)
omega_stationary = omega / N_tilde.reshape(T+S, 1)
if get_baseline:
initial_b = bssmat_splus1
initial_n = nssmat
else:
initial_b = bssmat_init
initial_n = nssmat_init
# Get an initial distribution of capital with the initial population distribution
K0 = household.get_K(initial_b, omega_stationary[0].reshape(S, 1), lambdas, g_n_vector[0], 'SS')
b_sinit = np.array(list(np.zeros(J).reshape(1, J)) + list(initial_b[:-1]))
b_splus1init = initial_b
L0 = firm.get_L(e, initial_n, omega_stationary[0].reshape(S, 1), lambdas, 'SS')
Y0 = firm.get_Y(K0, L0, parameters)
w0 = firm.get_w(Y0, L0, parameters)
r0 = firm.get_r(Y0, K0, parameters)
BQ0 = household.get_BQ(r0, initial_b, omega_stationary[0].reshape(S, 1), lambdas, rho.reshape(S, 1), g_n_vector[0], 'SS')
T_H_0 = tax.get_lump_sum(r0, b_sinit, w0, e, initial_n, BQ0, lambdas, factor_ss, omega_stationary[0].reshape(S, 1), 'SS', parameters, theta, tau_bq)
tax0 = tax.total_taxes(r0, b_sinit, w0, e, initial_n, BQ0, lambdas, factor_ss, T_H_0, None, 'SS', False, parameters, theta, tau_bq)
c0 = household.get_cons(r0, b_sinit, w0, e, initial_n, BQ0.reshape(1, J), lambdas.reshape(1, J), b_splus1init, parameters, tax0)
return (income_tax_params, wealth_tax_params, ellipse_params, parameters,
N_tilde, omega_stationary, K0, b_sinit, b_splus1init, L0, Y0,
w0, r0, BQ0, T_H_0, tax0, c0, initial_b, initial_n)
def SS_TPI_firstdoughnutring(guesses, winit, rinit, BQinit, T_H_init, initial_b, factor_ss, j, parameters, theta, tau_bq):
'''
Solves the first entries of the upper triangle of the twist doughnut. This is
separate from the main TPI function because the the values of b and n are scalars,
so it is easier to just have a separate function for these cases.
Inputs:
guesses = guess for b and n (2x1 list)
winit = initial wage rate (scalar)
rinit = initial rental rate (scalar)
BQinit = initial aggregate bequest (scalar)
T_H_init = initial lump sum tax (scalar)
initial_b = initial distribution of capital (SxJ array)
factor_ss = steady state scaling factor (scalar)
j = which ability type is being solved for (scalar)
parameters = list of parameters (list)
theta = replacement rates (Jx1 array)
tau_bq = bequest tax rates (Jx1 array)
Output:
euler errors (2x1 list)
'''
b2 = float(guesses[0])
n1 = float(guesses[1])
b1 = float(initial_b[-2, j])
# Euler 1 equations
tax1 = tax.total_taxes(rinit, b1, winit, e[-1, j], n1, BQinit, lambdas[j], factor_ss, T_H_init, j, 'TPI_scalar', False, parameters, theta, tau_bq)
cons1 = household.get_cons(rinit, b1, winit, e[-1, j], n1, BQinit, lambdas[j], b2, parameters, tax1)
bequest_ut = rho[-1] * np.exp(-sigma * g_y) * chi_b[-1, j] * b2 ** (-sigma)
error1 = household.marg_ut_cons(cons1, parameters) - bequest_ut
# Euler 2 equations
income2 = (rinit * b1 + winit * e[-1, j] * n1) * factor_ss
deriv2 = 1 - tau_payroll - tax.tau_income(rinit, b1, winit, e[
-1, j], n1, factor_ss, parameters) - tax.tau_income_deriv(
rinit, b1, winit, e[-1, j], n1, factor_ss, parameters) * income2
error2 = household.marg_ut_cons(cons1, parameters) * winit * e[-1, j] * deriv2 - household.marg_ut_labor(n1, chi_n[-1], parameters)
if n1 <= 0 or n1 >= 1:
error2 += 1e12
if b2 <=0:
error1 += 1e12
if cons1 <= 0:
error1 += 1e12
return [error1] + [error2]
def Steady_state_TPI_solver(guesses, winit, rinit, BQinit, T_H_init, factor, j, s, t, params, theta, tau_bq, rho, lambdas, e, initial_b, chi_b, chi_n):
'''
Parameters:
guesses = distribution of capital and labor (various length list)
winit = wage rate ((T+S)x1 array)
rinit = rental rate ((T+S)x1 array)
BQinit = aggregate bequests ((T+S)x1 array)
T_H_init = lump sum tax over time ((T+S)x1 array)
factor = scaling factor (scalar)
j = which ability type is being solved for (scalar)
s = which upper triangle loop is being solved for (scalar)
t = which diagonal is being solved for (scalar)
params = list of parameters (list)
theta = replacement rates (Jx1 array)
tau_bq = bequest tax rate (Jx1 array)
rho = mortalit rate (Sx1 array)
lambdas = ability weights (Jx1 array)
e = ability type (SxJ array)
initial_b = capital stock distribution in period 0 (SxJ array)
chi_b = chi^b_j (Jx1 array)
chi_n = chi^n_s (Sx1 array)
Output:
Value of Euler error (various length list)
'''
J, S, T, beta, sigma, alpha, Z, delta, ltilde, nu, g_y, g_n_ss, tau_payroll, retire, mean_income_data, \
a_tax_income, b_tax_income, c_tax_income, d_tax_income, h_wealth, p_wealth, m_wealth, b_ellipse, upsilon = params
length = len(guesses)/2
b_guess = np.array(guesses[:length])
n_guess = np.array(guesses[length:])
if length == S:
b_s = np.array([0] + list(b_guess[:-1]))
else:
b_s = np.array([(initial_b[-(s+3), j])] + list(b_guess[:-1]))
b_splus1 = b_guess
b_splus2 = np.array(list(b_guess[1:]) + [0])
w_s = winit[t:t+length]
w_splus1 = winit[t+1:t+length+1]
r_s = rinit[t:t+length]
r_splus1 = rinit[t+1:t+length+1]
n_s = n_guess
n_extended = np.array(list(n_guess[1:]) + [0])
e_s = e[-length:, j]
e_extended = np.array(list(e[-length+1:, j]) + [0])
BQ_s = BQinit[t:t+length]
BQ_splus1 = BQinit[t+1:t+length+1]
T_H_s = T_H_init[t:t+length]
T_H_splus1 = T_H_init[t+1:t+length+1]
# Savings euler equations
tax_s = tax.total_taxes(r_s, b_s, w_s, e_s, n_s, BQ_s, lambdas[j], factor, T_H_s, j, 'TPI', False, params, theta, tau_bq)
tax_splus1 = tax.total_taxes(r_splus1, b_splus1, w_splus1, e_extended, n_extended, BQ_splus1, lambdas[j], factor, T_H_splus1, j, 'TPI', True, params, theta, tau_bq)
cons_s = household.get_cons(r_s, b_s, w_s, e_s, n_s, BQ_s, lambdas[j], b_splus1, params, tax_s)
cons_splus1 = household.get_cons(r_splus1, b_splus1, w_splus1, e_extended, n_extended, BQ_splus1, lambdas[j], b_splus2, params, tax_splus1)
income_splus1 = (r_splus1 * b_splus1 + w_splus1 * e_extended * n_extended) * factor
savings_ut = rho[-(length):] * np.exp(-sigma * g_y) * chi_b[-(length):, j] * b_splus1 ** (-sigma)
deriv_savings = 1 + r_splus1 * (1 - tax.tau_income(
r_splus1, b_splus1, w_splus1, e_extended, n_extended, factor, params) - tax.tau_income_deriv(
r_splus1, b_splus1, w_splus1, e_extended, n_extended, factor, params) * income_splus1) - tax.tau_w_prime(
b_splus1, params)*b_splus1 - tax.tau_wealth(b_splus1, params)
error1 = household.marg_ut_cons(cons_s, params) - beta * (1-rho[-(length):]) * np.exp(-sigma * g_y) * deriv_savings * household.marg_ut_cons(
cons_splus1, params) - savings_ut
# Labor leisure euler equations
income_s = (r_s * b_s + w_s * e_s * n_s) * factor
deriv_laborleisure = 1 - tau_payroll - tax.tau_income(r_s, b_s, w_s, e_s, n_s, factor, params) - tax.tau_income_deriv(
r_s, b_s, w_s, e_s, n_s, factor, params) * income_s
error2 = household.marg_ut_cons(cons_s, params) * w_s * e[-(length):, j] * deriv_laborleisure - household.marg_ut_labor(n_s, chi_n[-length:], params)
# Check and punish constraint violations
mask1 = n_guess < 0
error2[mask1] += 1e12
mask2 = n_guess > ltilde
error2[mask2] += 1e12
mask3 = cons_s < 0
error2[mask3] += 1e12
mask4 = b_guess <= 0
error2[mask4] += 1e12
mask5 = cons_splus1 < 0
error2[mask5] += 1e12
return list(error1.flatten()) + list(error2.flatten())
def run_time_path_iteration(Kss, Lss, Yss, BQss, theta, parameters, g_n_vector, omega_stationary, K0, b_sinit, b_splus1init, L0, Y0, r0, BQ0, T_H_0, tax0, c0, initial_b, initial_n, factor_ss, tau_bq, chi_b, chi_n, get_baseline=False, **kwargs):
# Initialize Time paths
domain = np.linspace(0, T, T)
Kinit = (-1/(domain + 1)) * (Kss-K0) + Kss
Kinit[-1] = Kss
Kinit = np.array(list(Kinit) + list(np.ones(S)*Kss))
Linit = np.ones(T+S) * Lss
Yinit = firm.get_Y(Kinit, Linit, parameters)
winit = firm.get_w(Yinit, Linit, parameters)
rinit = firm.get_r(Yinit, Kinit, parameters)
BQinit = np.zeros((T+S, J))
for j in xrange(J):
BQinit[:, j] = list(np.linspace(BQ0[j], BQss[j], T)) + [BQss[j]]*S
BQinit = np.array(BQinit)
T_H_init = np.ones(T+S) * T_Hss
# Make array of initial guesses
domain2 = np.tile(domain.reshape(T, 1, 1), (1, S, J))
ending_b = bssmat_splus1
guesses_b = (-1/(domain2 + 1)) * (ending_b-initial_b) + ending_b
ending_b_tail = np.tile(ending_b.reshape(1, S, J), (S, 1, 1))
guesses_b = np.append(guesses_b, ending_b_tail, axis=0)
domain3 = np.tile(np.linspace(0, 1, T).reshape(T, 1, 1), (1, S, J))
guesses_n = domain3 * (nssmat - initial_n) + initial_n
ending_n_tail = np.tile(nssmat.reshape(1, S, J), (S, 1, 1))
guesses_n = np.append(guesses_n, ending_n_tail, axis=0)
b_mat = np.zeros((T+S, S, J))
n_mat = np.zeros((T+S, S, J))
ind = np.arange(S)
TPIiter = 0
TPIdist = 10
euler_errors = np.zeros((T, 2*S, J))
TPIdist_vec = np.zeros(maxiter)
while (TPIiter < maxiter) and (TPIdist >= mindist_TPI):
Kpath_TPI = list(Kinit) + list(np.ones(10)*Kss)
Lpath_TPI = list(Linit) + list(np.ones(10)*Lss)
# Plot TPI for K for each iteration, so we can see if there is a problem
if PLOT_TPI == True:
plt.figure()
plt.axhline(
y=Kss, color='black', linewidth=2, label=r"Steady State $\hat{K}$", ls='--')
plt.plot(np.arange(
T+10), Kpath_TPI[:T+10], 'b', linewidth=2, label=r"TPI time path $\hat{K}_t$")
plt.savefig("OUTPUT/TPI_K")
# Uncomment the following print statements to make sure all euler equations are converging.
# If they don't, then you'll have negative consumption or consumption spikes. If they don't,
# it is the initial guesses. You might need to scale them differently. It is rather delicate for the first
# few periods and high ability groups.
for j in xrange(J):
b_mat[1, -1, j], n_mat[0, -1, j] = np.array(opt.fsolve(SS_TPI_firstdoughnutring, [guesses_b[1, -1, j], guesses_n[0, -1, j]],
args=(winit[1], rinit[1], BQinit[1, j], T_H_init[1], initial_b, factor_ss, j, parameters, theta, tau_bq), xtol=1e-13))
# if np.array(SS_TPI_firstdoughnutring([b_mat[1, -1, j], n_mat[0, -1, j]], winit[1], rinit[1], BQinit[1, j], T_H_init[1], initial_b, factor_ss, j, parameters, theta, tau_bq)).max() > 1e-6:
# print 'minidoughnut:', np.array(SS_TPI_firstdoughnutring([b_mat[1, -1, j], n_mat[0, -1, j]], winit[1], rinit[1], BQinit[1, j], T_H_init[1], initial_b, factor_ss, j, parameters, theta, tau_bq)).max()
for s in xrange(S-2): # Upper triangle
ind2 = np.arange(s+2)
b_guesses_to_use = np.diag(guesses_b[1:S+1, :, j], S-(s+2))
n_guesses_to_use = np.diag(guesses_n[:S, :, j], S-(s+2))
solutions = opt.fsolve(Steady_state_TPI_solver, list(
b_guesses_to_use) + list(n_guesses_to_use), args=(
winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, s, 0, parameters, theta, tau_bq, rho, lambdas, e, initial_b, chi_b, chi_n), xtol=1e-13)
b_vec = solutions[:len(solutions)/2]
b_mat[1+ind2, S-(s+2)+ind2, j] = b_vec
n_vec = solutions[len(solutions)/2:]
n_mat[ind2, S-(s+2)+ind2, j] = n_vec
# if abs(np.array(Steady_state_TPI_solver(solutions, winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, s, 0, parameters, theta, tau_bq, rho, lambdas, e, initial_b, chi_b, chi_n))).max() > 1e-6:
# print 's-loop:', abs(np.array(Steady_state_TPI_solver(solutions, winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, s, 0, parameters, theta, tau_bq, rho, lambdas, e, initial_b, chi_b, chi_n))).max()
for t in xrange(0, T):
b_guesses_to_use = .75 * np.diag(guesses_b[t+1:t+S+1, :, j])
n_guesses_to_use = np.diag(guesses_n[t:t+S, :, j])
solutions = opt.fsolve(Steady_state_TPI_solver, list(
b_guesses_to_use) + list(n_guesses_to_use), args=(
winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, None, t, parameters, theta, tau_bq, rho, lambdas, e, None, chi_b, chi_n), xtol=1e-13)
b_vec = solutions[:S]
b_mat[t+1+ind, ind, j] = b_vec
n_vec = solutions[S:]
n_mat[t+ind, ind, j] = n_vec
inputs = list(solutions)
euler_errors[t, :, j] = np.abs(Steady_state_TPI_solver(
inputs, winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, None, t, parameters, theta, tau_bq, rho, lambdas, e, None, chi_b, chi_n))
# if euler_errors.max() > 1e-6:
# print 't-loop:', euler_errors.max()
# Force the initial distribution of capital to be as given above.
b_mat[0, :, :] = initial_b
Kinit = household.get_K(b_mat[:T], omega_stationary[:T].reshape(T, S, 1), lambdas.reshape(1, 1, J), g_n_vector[:T], 'TPI')
Linit = firm.get_L(e.reshape(1, S, J), n_mat[:T], omega_stationary[:T, :].reshape(T, S, 1), lambdas.reshape(1, 1, J), 'TPI')
Ynew = firm.get_Y(Kinit, Linit, parameters)
wnew = firm.get_w(Ynew, Linit, parameters)
rnew = firm.get_r(Ynew, Kinit, parameters)
# the following needs a g_n term
BQnew = household.get_BQ(rnew.reshape(T, 1), b_mat[:T], omega_stationary[:T].reshape(T, S, 1), lambdas.reshape(1, 1, J), rho.reshape(1, S, 1), g_n_vector[:T].reshape(T, 1), 'TPI')
bmat_s = np.zeros((T, S, J))
bmat_s[:, 1:, :] = b_mat[:T, :-1, :]
T_H_new = np.array(list(tax.get_lump_sum(rnew.reshape(T, 1, 1), bmat_s, wnew.reshape(
T, 1, 1), e.reshape(1, S, J), n_mat[:T], BQnew.reshape(T, 1, J), lambdas.reshape(
1, 1, J), factor_ss, omega_stationary[:T].reshape(T, S, 1), 'TPI', parameters, theta, tau_bq)) + [T_Hss]*S)
winit[:T] = utils.convex_combo(wnew, winit[:T], parameters)
rinit[:T] = utils.convex_combo(rnew, rinit[:T], parameters)
BQinit[:T] = utils.convex_combo(BQnew, BQinit[:T], parameters)
T_H_init[:T] = utils.convex_combo(T_H_new[:T], T_H_init[:T], parameters)
guesses_b = utils.convex_combo(b_mat, guesses_b, parameters)
guesses_n = utils.convex_combo(n_mat, guesses_n, parameters)
if T_H_init.all() != 0:
TPIdist = np.array(list(utils.perc_dif_func(rnew, rinit[:T]))+list(utils.perc_dif_func(BQnew, BQinit[:T]).flatten())+list(
utils.perc_dif_func(wnew, winit[:T]))+list(utils.perc_dif_func(T_H_new, T_H_init))).max()
else:
TPIdist = np.array(list(utils.perc_dif_func(rnew, rinit[:T]))+list(utils.perc_dif_func(BQnew, BQinit[:T]).flatten())+list(
utils.perc_dif_func(wnew, winit[:T]))+list(np.abs(T_H_new, T_H_init))).max()
TPIdist_vec[TPIiter] = TPIdist
# After T=10, if cycling occurs, drop the value of nu
# wait til after T=10 or so, because sometimes there is a jump up
# in the first couple iterations
if TPIiter > 10:
if TPIdist_vec[TPIiter] - TPIdist_vec[TPIiter-1] > 0:
nu /= 2
print 'New Value of nu:', nu
TPIiter += 1
print '\tIteration:', TPIiter
print '\t\tDistance:', TPIdist
print 'Computing final solutions'
# As in SS, you need the final distributions of b and n to match the final w, r, BQ, etc. Otherwise the euler errors are large. You need one more fsolve.
for j in xrange(J):
b_mat[1, -1, j], n_mat[0, -1, j] = np.array(opt.fsolve(SS_TPI_firstdoughnutring, [guesses_b[1, -1, j], guesses_n[0, -1, j]],
args=(winit[1], rinit[1], BQinit[1, j], T_H_init[1], initial_b, factor_ss, j, parameters, theta, tau_bq), xtol=1e-13))
for s in xrange(S-2): # Upper triangle
ind2 = np.arange(s+2)
b_guesses_to_use = np.diag(guesses_b[1:S+1, :, j], S-(s+2))
n_guesses_to_use = np.diag(guesses_n[:S, :, j], S-(s+2))
solutions = opt.fsolve(Steady_state_TPI_solver, list(
b_guesses_to_use) + list(n_guesses_to_use), args=(
winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, s, 0, parameters, theta, tau_bq, rho, lambdas, e, initial_b, chi_b, chi_n), xtol=1e-13)
b_vec = solutions[:len(solutions)/2]
b_mat[1+ind2, S-(s+2)+ind2, j] = b_vec
n_vec = solutions[len(solutions)/2:]
n_mat[ind2, S-(s+2)+ind2, j] = n_vec
for t in xrange(0, T):
b_guesses_to_use = .75 * np.diag(guesses_b[t+1:t+S+1, :, j])
n_guesses_to_use = np.diag(guesses_n[t:t+S, :, j])
solutions = opt.fsolve(Steady_state_TPI_solver, list(
b_guesses_to_use) + list(n_guesses_to_use), args=(
winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, None, t, parameters, theta, tau_bq, rho, lambdas, e, None, chi_b, chi_n), xtol=1e-13)
b_vec = solutions[:S]
b_mat[t+1+ind, ind, j] = b_vec
n_vec = solutions[S:]
n_mat[t+ind, ind, j] = n_vec
inputs = list(solutions)
euler_errors[t, :, j] = np.abs(Steady_state_TPI_solver(
inputs, winit, rinit, BQinit[:, j], T_H_init, factor_ss, j, None, t, parameters, theta, tau_bq, rho, lambdas, e, None, chi_b, chi_n))
b_mat[0, :, :] = initial_b
'''
------------------------------------------------------------------------
Generate variables/values so they can be used in other modules
------------------------------------------------------------------------
'''
Kpath_TPI = np.array(list(Kinit) + list(np.ones(10)*Kss))
Lpath_TPI = np.array(list(Linit) + list(np.ones(10)*Lss))
BQpath_TPI = np.array(list(BQinit) + list(np.ones((10, J))*BQss))
b_s = np.zeros((T, S, J))
b_s[:, 1:, :] = b_mat[:T, :-1, :]
b_splus1 = np.zeros((T, S, J))
b_splus1[:, :, :] = b_mat[1:T+1, :, :]
tax_path = tax.total_taxes(rinit[:T].reshape(T, 1, 1), b_s, winit[:T].reshape(T, 1, 1), e.reshape(
1, S, J), n_mat[:T], BQinit[:T, :].reshape(T, 1, J), lambdas, factor_ss, T_H_init[:T].reshape(T, 1, 1), None, 'TPI', False, parameters, theta, tau_bq)
c_path = household.get_cons(rinit[:T].reshape(T, 1, 1), b_s, winit[:T].reshape(T, 1, 1), e.reshape(1, S, J), n_mat[:T], BQinit[:T].reshape(T, 1, J), lambdas.reshape(1, 1, J), b_splus1, parameters, tax_path)
Y_path = firm.get_Y(Kpath_TPI[:T], Lpath_TPI[:T], parameters)
C_path = household.get_C(c_path, omega_stationary[:T].reshape(T, S, 1), lambdas, 'TPI')
I_path = firm.get_I(Kpath_TPI[1:T+1], Kpath_TPI[:T], delta, g_y, g_n_vector[:T])
print 'Resource Constraint Difference:', Y_path - C_path - I_path
print'Checking time path for violations of constaints.'
for t in xrange(T):
household.constraint_checker_TPI(b_mat[t], n_mat[t], c_path[t], t, parameters)
eul_savings = euler_errors[:, :S, :].max(1).max(1)
eul_laborleisure = euler_errors[:, S:, :].max(1).max(1)
'''
------------------------------------------------------------------------
Save variables/values so they can be used in other modules
------------------------------------------------------------------------
'''
output = {'Kpath_TPI':Kpath_TPI, 'b_mat':b_mat, 'c_path':c_path,
'eul_savings':eul_savings, 'eul_laborleisure':eul_laborleisure,
'Lpath_TPI':Lpath_TPI, 'BQpath_TPI':BQpath_TPI, 'n_mat':n_mat,
'rinit':rinit, 'Yinit':Yinit, 'T_H_init':T_H_init,
'tax_path':tax_path, 'winit':winit}
if get_baseline:
pickle.dump(output, open("OUTPUT/TPIinit/TPIinit_vars.pkl", "wb"))
else:
pickle.dump(output, open("OUTPUT/TPI/TPI_vars.pkl", "wb"))
| mit |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/indexing/multiindex/test_multiindex.py | 2 | 3261 | import numpy as np
import pytest
import pandas._libs.index as _index
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
from pandas.util import testing as tm
class TestMultiIndexBasic:
def test_multiindex_perf_warn(self):
df = DataFrame(
{
"jim": [0, 0, 1, 1],
"joe": ["x", "x", "z", "y"],
"jolie": np.random.rand(4),
}
).set_index(["jim", "joe"])
with tm.assert_produces_warning(PerformanceWarning, clear=[pd.core.index]):
df.loc[(1, "z")]
df = df.iloc[[2, 1, 3, 0]]
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(0,)]
def test_multiindex_contains_dropped(self):
# GH 19027
# test that dropped MultiIndex levels are not in the MultiIndex
# despite continuing to be in the MultiIndex's levels
idx = MultiIndex.from_product([[1, 2], [3, 4]])
assert 2 in idx
idx = idx.drop(2)
# drop implementation keeps 2 in the levels
assert 2 in idx.levels[0]
# but it should no longer be in the index itself
assert 2 not in idx
# also applies to strings
idx = MultiIndex.from_product([["a", "b"], ["c", "d"]])
assert "a" in idx
idx = idx.drop("a")
assert "a" in idx.levels[0]
assert "a" not in idx
@pytest.mark.parametrize(
"data, expected",
[
(MultiIndex.from_product([(), ()]), True),
(MultiIndex.from_product([(1, 2), (3, 4)]), True),
(MultiIndex.from_product([("a", "b"), (1, 2)]), False),
],
)
def test_multiindex_is_homogeneous_type(self, data, expected):
assert data._is_homogeneous_type is expected
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n), MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
assert s[("a", 5)] == 5
assert s[("a", 6)] == 6
assert s[("a", 7)] == 7
_index._SIZE_CUTOFF = old_cutoff
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame(
{
"a": ["R1", "R2", np.nan, "R4"],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20],
}
)
result = df.set_index(["a", "b"], drop=False)
expected = DataFrame(
{
"a": ["R1", "R2", np.nan, "R4"],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20],
},
index=[
Index(["R1", "R2", np.nan, "R4"], name="a"),
Index(["C1", "C2", "C3", "C4"], name="b"),
],
)
tm.assert_frame_equal(result, expected)
def test_contains(self):
# GH 24570
tx = pd.timedelta_range("09:30:00", "16:00:00", freq="30 min")
idx = MultiIndex.from_arrays([tx, np.arange(len(tx))])
assert tx[0] in idx
assert "element_not_exit" not in idx
assert "0 day 09:30:00" in idx
| apache-2.0 |
louispotok/pandas | pandas/tests/indexes/period/test_astype.py | 4 | 3992 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import NaT, Period, PeriodIndex, Int64Index, Index, period_range
class TestPeriodIndexAsType(object):
@pytest.mark.parametrize('dtype', [
float, 'timedelta64', 'timedelta64[ns]'])
def test_astype_raises(self, dtype):
# GH#13149, GH#13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
msg = 'Cannot cast PeriodIndex to dtype'
with tm.assert_raises_regex(TypeError, msg):
idx.astype(dtype)
def test_astype_conversion(self):
# GH#13149, GH#13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
result = idx.astype(object)
expected = Index([Period('2016-05-16', freq='D')] +
[Period(NaT, freq='D')] * 3, dtype='object')
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([16937] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index(str(x) for x in idx)
tm.assert_index_equal(result, expected)
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
tm.assert_index_equal(result, Index(idx.asi8))
tm.assert_numpy_array_equal(result.values, idx.asi8)
def test_astype_object(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
# TODO: de-duplicate this version (from test_ops) with the one above
# (from test_period)
def test_astype_object2(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
assert result[i] == expected[i]
assert result[2] is pd.NaT
assert result.name == expected.name
result_list = idx.tolist()
for i in [0, 1, 3]:
assert result_list[i] == expected_list[i]
assert result_list[2] is pd.NaT
| bsd-3-clause |
jakobworldpeace/scikit-learn | examples/missing_values.py | 71 | 3055 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
kootenpv/brightml | brightml/utils.py | 1 | 1984 | import os
import simplejson
import pandas as pd
_NONE_PATH = None
def get_brightml_path(path=None):
global _NONE_PATH
if path is None:
if _NONE_PATH is None:
_USERNAME = os.getenv("SUDO_USER") or os.getenv("USER") or "/."
path = os.path.expanduser("~" + _USERNAME)
path = os.path.join(path, ".brightml")
_NONE_PATH = path
else:
path = _NONE_PATH
return os.path.expanduser(path)
def ensure_path_exists(path):
if not os.path.exists(path): # pragma: no cover
os.makedirs(path)
def get_data_path(path=None):
data_path = "data.jsonl"
path = path or get_brightml_path()
ensure_path_exists(path)
return os.path.join(path, data_path)
def prep_data(data):
data = data.sort_values(by="datetime_full")
if "new_brightness" in data.columns:
col_sort = [x for x in data.columns if x != "new_brightness"] + ["new_brightness"]
data = data.reindex(col_sort, axis=1)
return data
def get_training_data(path=None):
path = get_data_path(path)
try:
data = pd.read_json(path, lines=True)
except ValueError:
return None
return prep_data(data)
def save_sample(data, path=None):
path = get_data_path(path)
with open(path, "a") as f:
f.write(simplejson.dumps(data, ignore_nan=True) + "\n")
def get_brightness_paths():
base_dir = "/sys/class/backlight"
return [os.path.join(base_dir, x) + "/" for x in os.listdir(base_dir)]
def ensure_latest_update_path(brightml_path=None):
brightml_path = brightml_path or get_brightml_path()
ensure_path_exists(brightml_path)
last_update_dir = os.path.join(brightml_path, "last_updated/")
ensure_path_exists(last_update_dir)
last_update_file = os.path.join(last_update_dir, "update")
if not os.path.exists(last_update_file):
with open(last_update_file, "w") as f:
pass
return last_update_dir, last_update_file
| mit |
jalexvig/tensorflow | tensorflow/examples/learn/text_classification.py | 30 | 6589 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def estimator_spec_for_softmax_classification(logits, labels, mode):
"""Returns EstimatorSpec instance for softmax classification."""
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy':
tf.metrics.accuracy(labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def bag_of_words_model(features, labels, mode):
"""A bag-of-words model. Note it disregards the word order in the text."""
bow_column = tf.feature_column.categorical_column_with_identity(
WORDS_FEATURE, num_buckets=n_words)
bow_embedding_column = tf.feature_column.embedding_column(
bow_column, dimension=EMBEDDING_SIZE)
bow = tf.feature_column.input_layer(
features, feature_columns=[bow_embedding_column])
logits = tf.layers.dense(bow, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def rnn_model(features, labels, mode):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.nn.rnn_cell.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.nn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for softmax
# classification over output classes.
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def main(unused_argv):
global n_words
tf.logging.set_verbosity(tf.logging.INFO)
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.Series(dbpedia.train.data[:, 1])
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.Series(dbpedia.test.data[:, 1])
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_transform_train = vocab_processor.fit_transform(x_train)
x_transform_test = vocab_processor.transform(x_test)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
# Subtract 1 because VocabularyProcessor outputs a word-id matrix where word
# ids start from 1 and 0 means 'no word'. But
# categorical_column_with_identity assumes 0-based count and uses -1 for
# missing word.
x_train -= 1
x_test -= 1
model_fn = bag_of_words_model
classifier = tf.estimator.Estimator(model_fn=model_fn)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
ejhumphrey/harmonic-cnn | tests/test_driver.py | 1 | 10726 | import boltons.fileutils
import copy
import datetime
import glob
import json
import logging
import logging.config
import numpy as np
import os
import pandas as pd
import pytest
import hcnn.common.config as C
import hcnn.data.cqt
import hcnn.driver
logger = logging.getLogger(__name__)
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': "standard"
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
}
}
})
CONFIG_PATH = os.path.join(os.path.dirname(__file__), os.pardir,
"data", "integration_config.yaml")
config = C.Config.load(CONFIG_PATH)
@pytest.fixture
def config_with_workspace(workspace):
thisconfig = copy.deepcopy(config)
thisconfig.data['training']['iteration_write_frequency'] = 1
thisconfig.data['training']['iteration_print_frequency'] = 5
thisconfig.data['training']['max_iterations'] = 20
thisconfig.data['training']['batch_size'] = 12
thisconfig.data['training']['max_files_per_class'] = 3
thisconfig.data['paths']['model_dir'] = os.path.join(workspace, "models")
thisconfig.data['paths']['feature_dir'] = os.path.join(workspace, "cqt")
return thisconfig
def generate_example_training_loss(config, output_dir, n_iter):
def training_loss(n):
return pd.Series({
'timestamp': datetime.datetime.now(),
'batch_train_dur': np.random.random(),
'iteration': n_iter,
'loss': np.random.random()
})
training_loss_fp = config['experiment']['training_loss']
train_loss_path = os.path.join(output_dir, training_loss_fp)
train_stats = pd.DataFrame([training_loss(n) for n in range(5)])
train_stats.to_pickle(train_loss_path)
def generate_example_validation_loss(config, output_dir, n_iter):
def validation_series(n):
return pd.Series({
'mean_acc': np.random.random(),
'mean_loss': np.random.random(),
'model_file': "foobar.npz",
'model_iteration': n
})
validation_loss_fp = config['experiment']['validation_loss']
validation_loss_path = os.path.join(output_dir, validation_loss_fp)
validation_df = pd.DataFrame([validation_series(n) for n in range(5)])
validation_df.to_pickle(validation_loss_path)
def generate_example_predictions(config, output_dir, n_iter, n_classes):
def a_prediction():
return pd.Series({
'y_pred': np.random.randint(n_classes),
'target': np.random.randint(n_classes)
})
predictions_format = config['experiment']['predictions_format']
predictions_fp = predictions_format.format("00010")
predictions_path = os.path.join(output_dir, predictions_fp)
predictions_df = pd.DataFrame([a_prediction() for n in range(5)])
predictions_df.to_pickle(predictions_path)
@pytest.fixture
def available_datasets():
return ["rwc", 'uiowa', 'philharmonia']
@pytest.fixture
def pre_existing_experiment(config_with_workspace, available_datasets):
"""Create some template existing experiment data."""
experiment_name = "testexperiment"
model_dir = config_with_workspace['paths']['model_dir']
experiment_dir = os.path.join(model_dir, experiment_name)
n_iter = 5
n_classes = 12
for dataset in available_datasets:
dataset_dir = os.path.join(experiment_dir, dataset)
boltons.fileutils.mkdir_p(dataset_dir)
generate_example_training_loss(config, dataset_dir, n_iter)
generate_example_validation_loss(config, dataset_dir, n_iter)
generate_example_predictions(config, dataset_dir, n_iter, n_classes)
return experiment_name
@pytest.mark.slowtest
def test_extract_features(module_workspace, tiny_feats):
for idx, obs in tiny_feats.to_df().iterrows():
assert "cqt" in obs
assert os.path.exists(obs.cqt)
@pytest.mark.runme
@pytest.mark.slowtest
@pytest.mark.parametrize("model_name",
["cqt_MF_n16",
# "wcqt",
"cqt_M2_n8",
"hcqt_MH_n8"],
ids=["cqt_MF_n16",
"cqt_M2_n8",
"hcqt_MH_n8"])
def test_train_simple_model(model_name, module_workspace, workspace,
tiny_feats_csv):
thisconfig = copy.deepcopy(config)
thisconfig.data['training']['iteration_write_frequency'] = 2
thisconfig.data['training']['max_iterations'] = 10
thisconfig.data['training']['batch_size'] = 12
thisconfig.data['training']['max_files_per_class'] = 1
thisconfig.data['paths']['model_dir'] = workspace
thisconfig.data['paths']['feature_dir'] = module_workspace
# The features get loaded by tiny_feats_csv anyway
thisconfig.data['features']['cqt']['skip_existing'] = True
experiment_name = "testexperiment"
hold_out = "rwc"
driver = hcnn.driver.Driver(thisconfig,
model_name=model_name,
experiment_name=experiment_name,
dataset=tiny_feats_csv, load_features=True)
driver.setup_partitions(hold_out)
result = driver.train_model()
assert result is True
# Expected files this should generate
new_config = os.path.join(workspace, experiment_name, "config.yaml")
train_loss_fp = os.path.join(workspace, experiment_name, hold_out,
"training_loss.pkl")
assert os.path.exists(new_config)
assert os.path.exists(train_loss_fp)
@pytest.mark.slowtest
@pytest.mark.parametrize("model_name",
["cqt_MF_n16",
# "wcqt",
"cqt_M2_n8",
"hcqt_MH_n8"],
ids=["cqt_MF_n16",
"cqt_M2_n8",
"hcqt_MH_n8"])
def test_find_best_model(config_with_workspace, model_name, workspace):
experiment_name = "testexperiment"
hold_out = "rwc"
driver = hcnn.driver.Driver(config_with_workspace, model_name=model_name,
experiment_name=experiment_name,
load_features=True)
driver.setup_partitions(hold_out)
result = driver.train_model()
assert result is True
# Create a vastly reduced validation dataframe so it'll take less long.
validation_size = 3
driver.valid_set.df = driver.valid_set.df.sample(n=validation_size,
replace=True)
assert len(driver.valid_set.df) == validation_size
driver.test_set.df = driver.test_set.df.sample(n=validation_size,
replace=True)
results_df = driver.find_best_model()
# check that the results_df is ordered by iteration.
assert all(results_df["model_iteration"] ==
sorted(results_df["model_iteration"]))
# Get the best param
param_iter = driver.select_best_iteration(results_df)
assert param_iter is not None
# load it again to test the reloading thing.
# Just making sure this runs through
results_df2 = driver.find_best_model()
assert all(results_df == results_df2)
# Shrink the dataset so this doesn't take forever.
driver.dataset.df = driver.dataset.df.sample(n=10, replace=True)
predictions_df = driver.predict(param_iter)
assert not predictions_df.empty
predictions_df_path = os.path.join(
workspace, experiment_name, hold_out,
"model_{}_predictions.pkl".format(param_iter))
assert os.path.exists(predictions_df_path)
def test_collect_results(config_with_workspace, pre_existing_experiment,
available_datasets, workspace):
driver = hcnn.driver.Driver(config_with_workspace,
experiment_name=pre_existing_experiment,
load_features=False,
skip_load_dataset=True)
destination_dir = os.path.join(workspace, "results")
result = driver.collect_results(destination_dir)
assert result is True
new_experiment_dir = os.path.join(destination_dir, pre_existing_experiment)
assert os.path.isdir(new_experiment_dir)
for dataset in available_datasets:
dataset_results = os.path.join(new_experiment_dir, dataset)
assert os.path.isdir(dataset_results)
training_loss_fp = os.path.join(dataset_results, "training_loss.pkl")
assert os.path.isfile(training_loss_fp)
training_loss_df = pd.read_pickle(training_loss_fp)
assert [x in training_loss_df.columns for x in ['iteration', 'loss']]
validation_loss_fp = os.path.join(dataset_results,
"validation_loss.pkl")
assert os.path.isfile(validation_loss_fp)
validation_loss_df = pd.read_pickle(validation_loss_fp)
assert [x in validation_loss_df.columns
for x in ['mean_acc', 'mean_loss', 'model_file',
'model_iteration']]
prediction_glob = os.path.join(dataset_results, "*predictions.pkl")
assert len(prediction_glob) > 0
prediction_file = glob.glob(prediction_glob)[0]
prediction_df = pd.read_pickle(prediction_file)
assert [x in prediction_df.columns for x in ['y_pred', 'y_true']]
# Finally, collect_results should create an overall analysis of the
# three-fold validation, and put it in
overall_results_fp = os.path.join(
new_experiment_dir, "experiment_results.json")
assert os.path.isfile(overall_results_fp)
with open(overall_results_fp, 'r') as fh:
result_data = json.load(fh)
for dataset in available_datasets:
assert dataset in result_data
assert 'mean_accuracy' in result_data[dataset]
assert 'mean_precision' in result_data[dataset]
assert 'mean_recall' in result_data[dataset]
assert 'mean_f1' in result_data[dataset]
assert 'class_precision' in result_data[dataset]
assert 'class_recall' in result_data[dataset]
assert 'class_f1' in result_data[dataset]
assert 'sample_weight' in result_data[dataset]
| isc |
tleeuwenburg/ecostats | ecostats/utils.py | 1 | 2107 | '''
Walk a directory, looking for Python files and pulling out the imported
packages.
Designed to count package imports to find heavily-imported packages.
'''
import os
import pandas
def yield_relevant(abspath):
'''
Walk down from a directory, yield all the Python files
'''
gen = os.walk(abspath)
for dirpath, dirnames, filenames in gen:
relevant = [(dirpath, f) for f in filenames if '.py' in f[-3:]]
for r in relevant:
yield r
def yield_imports(path, fname, silent=True):
'''
Go through a Python file, yield every line with an import statement,
unless it's commented or documented out
'''
try:
f = open(path + '/' + fname).readlines()
importlines = [l.strip() for l in f if 'import' in l]
importlines = [l for l in importlines if l[0] != '#']
for il in importlines:
yield il.strip()
except:
if not silent:
print("Couldn't read %s/%s" % (path, fname))
def yield_packages(importline):
'''
For a line with an import statement, yield all the package names
'''
parts = importline.split(' ')
invalid = ['from', '*', 'import', 'as']
for p in parts:
p = p.strip()
if p in invalid:
continue
if p[:3] == '"""':
continue
if p[:1] == '"':
continue
if p[:2] == '!=':
continue
yield p
def yield_all_packages(relevant):
'''
Given a relevant file, get all the importing lines of code and extract
the package names
'''
for entry in relevant:
importlines = yield_imports(*entry)
for il in importlines:
packages = yield_packages(il)
for p in packages:
yield p
def package_stats(packages):
'''
Given the name, count pairs, stash these into a pandas dataframe
for easier analysis
'''
c = Counter(packages)
df = pandas.Series(c)
df = df.reset_index(0)
df.columns = ['pname', 'pcount']
df = df.drop(0) # drop the total total
return df
| apache-2.0 |
kdheepak89/mpld3 | visualize_tests.py | 4 | 8059 | """
Visualize Test Plots
This script will go through all the plots in the ``mpld3/test_plots``
directory, and save them as D3js to a single HTML file for inspection.
"""
import os
import glob
import sys
import gc
import traceback
import itertools
import json
import contextlib
import matplotlib
matplotlib.use('Agg') # don't display plots
import matplotlib.pyplot as plt
import mpld3
from mpld3 import urls
from mpld3._display import NumpyEncoder
from mpld3.mpld3renderer import MPLD3Renderer
from mpld3.mplexporter import Exporter
plt.rcParams['figure.figsize'] = (6, 4.5)
plt.rcParams['savefig.dpi'] = 80
TEMPLATE = """
<html>
<head>
<script type="text/javascript" src={d3_url}></script>
<script type="text/javascript" src={mpld3_url}></script>
<style type="text/css">
.left_col {{
float: left;
width: 50%;
}}
.right_col {{
margin-left: 50%;
width: 50%;
}}
.fig {{
height: 500px;
}}
{extra_css}
</style>
</head>
<body>
<div id="wrap">
<div class="left_col">
{left_col}
</div>
<div class="right_col">
{right_col}
</div>
</div>
<script>
{js_commands}
</script>
</body>
</html>
"""
MPLD3_TEMPLATE = """
<div class="fig" id="fig{figid:03d}"></div>
"""
JS_TEMPLATE = """
!function(mpld3){{
{extra_js}
mpld3.draw_figure("fig{figid:03d}", {figure_json});
}}(mpld3);
"""
@contextlib.contextmanager
def mpld3_noshow():
"""context manager to use mpld3 with show() disabled"""
import mpld3
_show = mpld3.show
mpld3.show = lambda *args, **kwargs: None
yield mpld3
mpld3.show = _show
@contextlib.contextmanager
def use_dir(dirname=None):
"""context manager to temporarily change the working directory"""
cwd = os.getcwd()
if dirname is None:
dirname = cwd
os.chdir(dirname)
yield
os.chdir(cwd)
class ExecFile(object):
"""
Class to execute plotting files, and extract the mpl and mpld3 figures.
"""
def __init__(self, filename, execute=True, pngdir='_pngs'):
self.filename = filename
if execute:
self.execute_file()
if not os.path.exists(pngdir):
os.makedirs(pngdir)
basename = os.path.splitext(os.path.basename(filename))[0]
self.pngfmt = os.path.join(pngdir, basename + "_{0:2d}.png")
def execute_file(self):
"""
Execute the file, catching matplotlib figures
"""
dirname, fname = os.path.split(self.filename)
print('plotting {0}'.format(fname))
# close any currently open figures
plt.close('all')
# close any currently open figures
plt.close('all')
with mpld3_noshow() as mpld3:
with use_dir(dirname):
try:
# execute file, forcing __name__ == '__main__'
exec(open(os.path.basename(self.filename)).read(),
{'plt': plt, 'mpld3': mpld3, '__name__': '__main__'})
gcf = matplotlib._pylab_helpers.Gcf
fig_mgr_list = gcf.get_all_fig_managers()
self.figlist = sorted([manager.canvas.figure
for manager in fig_mgr_list],
key=lambda fig: fig.number)
except:
print(80 * '_')
print('{0} is not compiling:'.format(fname))
traceback.print_exc()
print(80 * '_')
finally:
ncol = gc.collect()
def iter_png(self):
for fig in self.figlist:
fig_png = self.pngfmt.format(fig.number)
fig.savefig(fig_png)
yield fig_png
def iter_json(self):
for fig in self.figlist:
renderer = MPLD3Renderer()
Exporter(renderer, close_mpl=False).run(fig)
fig, fig_json, extra_css, extra_js = renderer.finished_figures[0]
yield (json.dumps(fig_json, cls=NumpyEncoder), extra_js, extra_css)
def combine_testplots(wildcard='mpld3/test_plots/*.py',
outfile='_test_plots.html',
pngdir='_pngs',
d3_url=None, mpld3_url=None):
"""Generate figures from the plots and save to an HTML file
Parameters
----------
wildcard : string or list
a regexp or list of regexps matching files to test
outfile : string
the path at which the output HTML will be saved
d3_url : string
the URL of the d3 library to use. If not specified, a standard web
address will be used.
mpld3_url : string
the URL of the mpld3 library to use. If not specified, a standard web
address will be used.
"""
if isinstance(wildcard, str):
filenames = glob.glob(wildcard)
else:
filenames = itertools.chain(*(glob.glob(w) for w in wildcard))
fig_png = []
fig_json = []
for filename in filenames:
result = ExecFile(filename, pngdir=pngdir)
fig_png.extend(result.iter_png())
fig_json.extend(result.iter_json())
left_col = [MPLD3_TEMPLATE.format(figid=i)
for i in range(len(fig_json))]
js_commands = [JS_TEMPLATE.format(figid=figid,
figure_json=figjson,
extra_js=figjs)
for figid, (figjson, figjs, _) in enumerate(fig_json)]
right_col = ['<div class="fig"><img src="{0}"></div>\n'.format(fig)
for fig in fig_png]
extra_css = [tup[2] for tup in fig_json]
print("writing results to {0}".format(outfile))
with open(outfile, 'w') as f:
f.write(TEMPLATE.format(left_col="".join(left_col),
right_col="".join(right_col),
d3_url=json.dumps(d3_url),
mpld3_url=json.dumps(mpld3_url),
js_commands="".join(js_commands),
extra_css="".join(extra_css)))
def run_main():
import argparse
parser = argparse.ArgumentParser(description=("Run files and convert "
"output to D3"))
parser.add_argument("files", nargs='*', type=str)
parser.add_argument("-d", "--d3-url",
help="location of d3 library",
type=str, default=None)
parser.add_argument("-m", "--mpld3-url",
help="location of the mpld3 library",
type=str, default=None)
parser.add_argument("-o", "--output",
help="output filename",
type=str, default='_test_plots.html')
parser.add_argument("-j", "--minjs", action="store_true")
parser.add_argument("-l", "--local", action="store_true")
parser.add_argument("-n", "--nolaunch", action="store_true")
args = parser.parse_args()
if len(args.files) == 0:
wildcard = ['mpld3/test_plots/*.py', 'examples/*.py']
else:
wildcard = args.files
if args.d3_url is None:
args.d3_url = urls.D3_URL
if args.mpld3_url is None:
args.mpld3_url = urls.MPLD3_URL
if args.local:
args.d3_url = urls.D3_LOCAL
if args.minjs:
args.mpld3_url = urls.MPLD3MIN_LOCAL
else:
args.mpld3_url = urls.MPLD3_LOCAL
else:
if args.minjs:
args.mpld3_url = urls.MPLD3MIN_URL
print("d3 url: {0}".format(args.d3_url))
print("mpld3 url: {0}".format(args.mpld3_url))
combine_testplots(wildcard=wildcard,
outfile=args.output,
d3_url=args.d3_url,
mpld3_url=args.mpld3_url)
return args.output, args.nolaunch
if __name__ == '__main__':
outfile, nolaunch = run_main()
if not nolaunch:
# Open local file (works on OSX; maybe not on other systems)
import webbrowser
webbrowser.open_new('file://localhost' + os.path.abspath(outfile))
| bsd-3-clause |
jonparrott/gcloud-python | bigquery/docs/snippets.py | 2 | 109699 | # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testable usage examples for Google BigQuery API wrapper
Each example function takes a ``client`` argument (which must be an instance
of :class:`google.cloud.bigquery.client.Client`) and uses it to perform a task
with the API.
To facilitate running the examples as system tests, each example is also passed
a ``to_delete`` list; the function adds to the list any objects created which
need to be deleted during teardown.
"""
import os
import time
import mock
import pytest
import six
try:
import pandas
except (ImportError, AttributeError):
pandas = None
try:
import pyarrow
except (ImportError, AttributeError):
pyarrow = None
from google.api_core import datetime_helpers
from google.api_core.exceptions import InternalServerError
from google.api_core.exceptions import ServiceUnavailable
from google.api_core.exceptions import TooManyRequests
from google.cloud import bigquery
from google.cloud import storage
from test_utils.retry import RetryErrors
ORIGINAL_FRIENDLY_NAME = 'Original friendly name'
ORIGINAL_DESCRIPTION = 'Original description'
LOCALLY_CHANGED_FRIENDLY_NAME = 'Locally-changed friendly name'
LOCALLY_CHANGED_DESCRIPTION = 'Locally-changed description'
UPDATED_FRIENDLY_NAME = 'Updated friendly name'
UPDATED_DESCRIPTION = 'Updated description'
SCHEMA = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
ROWS = [
('Phred Phlyntstone', 32),
('Bharney Rhubble', 33),
('Wylma Phlyntstone', 29),
('Bhettye Rhubble', 27),
]
QUERY = (
'SELECT name FROM `bigquery-public-data.usa_names.usa_1910_2013` '
'WHERE state = "TX"')
retry_429 = RetryErrors(TooManyRequests)
retry_storage_errors = RetryErrors(
(TooManyRequests, InternalServerError, ServiceUnavailable))
@pytest.fixture(scope='module')
def client():
return bigquery.Client()
@pytest.fixture
def to_delete(client):
doomed = []
yield doomed
for item in doomed:
if isinstance(item, (bigquery.Dataset, bigquery.DatasetReference)):
retry_429(client.delete_dataset)(item, delete_contents=True)
elif isinstance(item, storage.Bucket):
retry_storage_errors(item.delete)()
else:
retry_429(item.delete)()
def _millis():
return int(time.time() * 1000)
class _CloseOnDelete(object):
def __init__(self, wrapped):
self._wrapped = wrapped
def delete(self):
self._wrapped.close()
def test_create_client_default_credentials():
"""Create a BigQuery client with Application Default Credentials"""
# [START bigquery_client_default_credentials]
from google.cloud import bigquery
# If you don't specify credentials when constructing the client, the
# client library will look for credentials in the environment.
client = bigquery.Client()
# [END bigquery_client_default_credentials]
assert client is not None
def test_create_client_json_credentials():
"""Create a BigQuery client with Application Default Credentials"""
with open(os.environ['GOOGLE_APPLICATION_CREDENTIALS']) as creds_file:
creds_file_data = creds_file.read()
open_mock = mock.mock_open(read_data=creds_file_data)
with mock.patch('io.open', open_mock):
# [START bigquery_client_json_credentials]
from google.cloud import bigquery
# Explicitly use service account credentials by specifying the private
# key file. All clients in google-cloud-python have this helper.
client = bigquery.Client.from_service_account_json(
'path/to/service_account.json')
# [END bigquery_client_json_credentials]
assert client is not None
def test_list_datasets(client):
"""List datasets for a project."""
# [START bigquery_list_datasets]
# from google.cloud import bigquery
# client = bigquery.Client()
datasets = list(client.list_datasets())
project = client.project
if datasets:
print('Datasets in project {}:'.format(project))
for dataset in datasets: # API request(s)
print('\t{}'.format(dataset.dataset_id))
else:
print('{} project does not contain any datasets.'.format(project))
# [END bigquery_list_datasets]
def test_list_datasets_by_label(client, to_delete):
dataset_id = 'list_datasets_by_label_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.labels = {'color': 'green'}
dataset = client.create_dataset(dataset) # API request
to_delete.append(dataset)
# [START bigquery_list_datasets_by_label]
# from google.cloud import bigquery
# client = bigquery.Client()
# The following label filter example will find datasets with an
# arbitrary 'color' label set to 'green'
label_filter = 'labels.color:green'
datasets = list(client.list_datasets(filter=label_filter))
if datasets:
print('Datasets filtered by {}:'.format(label_filter))
for dataset in datasets: # API request(s)
print('\t{}'.format(dataset.dataset_id))
else:
print('No datasets found with this filter.')
# [END bigquery_list_datasets_by_label]
found = set([dataset.dataset_id for dataset in datasets])
assert dataset_id in found
def test_create_dataset(client, to_delete):
"""Create a dataset."""
dataset_id = 'create_dataset_{}'.format(_millis())
# [START bigquery_create_dataset]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# Create a DatasetReference using a chosen dataset ID.
# The project defaults to the Client's project if not specified.
dataset_ref = client.dataset(dataset_id)
# Construct a full Dataset object to send to the API.
dataset = bigquery.Dataset(dataset_ref)
# Specify the geographic location where the dataset should reside.
dataset.location = 'US'
# Send the dataset to the API for creation.
# Raises google.api_core.exceptions.AlreadyExists if the Dataset already
# exists within the project.
dataset = client.create_dataset(dataset) # API request
# [END bigquery_create_dataset]
to_delete.append(dataset)
def test_get_dataset_information(client, to_delete):
"""View information about a dataset."""
dataset_id = 'get_dataset_{}'.format(_millis())
dataset_labels = {'color': 'green'}
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.description = ORIGINAL_DESCRIPTION
dataset.labels = dataset_labels
dataset = client.create_dataset(dataset) # API request
to_delete.append(dataset)
# [START bigquery_get_dataset]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
dataset = client.get_dataset(dataset_ref) # API request
# View dataset properties
print('Dataset ID: {}'.format(dataset_id))
print('Description: {}'.format(dataset.description))
print('Labels:')
labels = dataset.labels
if labels:
for label, value in labels.items():
print('\t{}: {}'.format(label, value))
else:
print("\tDataset has no labels defined.")
# View tables in dataset
print('Tables:')
tables = list(client.list_tables(dataset_ref)) # API request(s)
if tables:
for table in tables:
print('\t{}'.format(table.table_id))
else:
print('\tThis dataset does not contain any tables.')
# [END bigquery_get_dataset]
assert dataset.description == ORIGINAL_DESCRIPTION
assert dataset.labels == dataset_labels
assert tables == []
# [START bigquery_dataset_exists]
def dataset_exists(client, dataset_reference):
"""Return if a dataset exists.
Args:
client (google.cloud.bigquery.client.Client):
A client to connect to the BigQuery API.
dataset_reference (google.cloud.bigquery.dataset.DatasetReference):
A reference to the dataset to look for.
Returns:
bool: ``True`` if the dataset exists, ``False`` otherwise.
"""
from google.cloud.exceptions import NotFound
try:
client.get_dataset(dataset_reference)
return True
except NotFound:
return False
# [END bigquery_dataset_exists]
def test_dataset_exists(client, to_delete):
"""Determine if a dataset exists."""
DATASET_ID = 'get_table_dataset_{}'.format(_millis())
dataset_ref = client.dataset(DATASET_ID)
dataset = bigquery.Dataset(dataset_ref)
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
assert dataset_exists(client, dataset_ref)
assert not dataset_exists(client, client.dataset('i_dont_exist'))
@pytest.mark.skip(reason=(
'update_dataset() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5588'))
def test_update_dataset_description(client, to_delete):
"""Update a dataset's description."""
dataset_id = 'update_dataset_description_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.description = 'Original description.'
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_update_dataset_description]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# dataset = client.get_dataset(dataset_ref) # API request
assert dataset.description == 'Original description.'
dataset.description = 'Updated description.'
dataset = client.update_dataset(dataset, ['description']) # API request
assert dataset.description == 'Updated description.'
# [END bigquery_update_dataset_description]
@pytest.mark.skip(reason=(
'update_dataset() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5588'))
def test_update_dataset_default_table_expiration(client, to_delete):
"""Update a dataset's default table expiration."""
dataset_id = 'update_dataset_default_expiration_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_update_dataset_expiration]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# dataset = client.get_dataset(dataset_ref) # API request
assert dataset.default_table_expiration_ms is None
one_day_ms = 24 * 60 * 60 * 1000 # in milliseconds
dataset.default_table_expiration_ms = one_day_ms
dataset = client.update_dataset(
dataset, ['default_table_expiration_ms']) # API request
assert dataset.default_table_expiration_ms == one_day_ms
# [END bigquery_update_dataset_expiration]
@pytest.mark.skip(reason=(
'update_dataset() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5588'))
def test_manage_dataset_labels(client, to_delete):
dataset_id = 'label_dataset_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_label_dataset]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# dataset = client.get_dataset(dataset_ref) # API request
assert dataset.labels == {}
labels = {'color': 'green'}
dataset.labels = labels
dataset = client.update_dataset(dataset, ['labels']) # API request
assert dataset.labels == labels
# [END bigquery_label_dataset]
# [START bigquery_get_dataset_labels]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
dataset = client.get_dataset(dataset_ref) # API request
# View dataset labels
print('Dataset ID: {}'.format(dataset_id))
print('Labels:')
if dataset.labels:
for label, value in dataset.labels.items():
print('\t{}: {}'.format(label, value))
else:
print("\tDataset has no labels defined.")
# [END bigquery_get_dataset_labels]
assert dataset.labels == labels
# [START bigquery_delete_label_dataset]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# dataset = client.get_dataset(dataset_ref) # API request
# This example dataset starts with one label
assert dataset.labels == {'color': 'green'}
# To delete a label from a dataset, set its value to None
dataset.labels['color'] = None
dataset = client.update_dataset(dataset, ['labels']) # API request
assert dataset.labels == {}
# [END bigquery_delete_label_dataset]
@pytest.mark.skip(reason=(
'update_dataset() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5588'))
def test_update_dataset_access(client, to_delete):
"""Update a dataset's access controls."""
dataset_id = 'update_dataset_access_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_update_dataset_access]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset = client.get_dataset(client.dataset('my_dataset'))
entry = bigquery.AccessEntry(
role='READER',
entity_type='userByEmail',
entity_id='[email protected]')
assert entry not in dataset.access_entries
entries = list(dataset.access_entries)
entries.append(entry)
dataset.access_entries = entries
dataset = client.update_dataset(dataset, ['access_entries']) # API request
assert entry in dataset.access_entries
# [END bigquery_update_dataset_access]
def test_delete_dataset(client):
"""Delete a dataset."""
from google.cloud.exceptions import NotFound
dataset1_id = 'delete_dataset_{}'.format(_millis())
dataset1 = bigquery.Dataset(client.dataset(dataset1_id))
client.create_dataset(dataset1)
dataset2_id = 'delete_dataset_with_tables{}'.format(_millis())
dataset2 = bigquery.Dataset(client.dataset(dataset2_id))
client.create_dataset(dataset2)
table = bigquery.Table(dataset2.table('new_table'))
client.create_table(table)
# [START bigquery_delete_dataset]
# from google.cloud import bigquery
# client = bigquery.Client()
# Delete a dataset that does not contain any tables
# dataset1_id = 'my_empty_dataset'
dataset1_ref = client.dataset(dataset1_id)
client.delete_dataset(dataset1_ref) # API request
print('Dataset {} deleted.'.format(dataset1_id))
# Use the delete_contents parameter to delete a dataset and its contents
# dataset2_id = 'my_dataset_with_tables'
dataset2_ref = client.dataset(dataset2_id)
client.delete_dataset(dataset2_ref, delete_contents=True) # API request
print('Dataset {} deleted.'.format(dataset2_id))
# [END bigquery_delete_dataset]
for dataset in [dataset1, dataset2]:
with pytest.raises(NotFound):
client.get_dataset(dataset) # API request
def test_list_tables(client, to_delete):
"""List tables within a dataset."""
dataset_id = 'list_tables_dataset_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = client.create_dataset(bigquery.Dataset(dataset_ref))
to_delete.append(dataset)
# [START bigquery_list_tables]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
tables = list(client.list_tables(dataset_ref)) # API request(s)
assert len(tables) == 0
table_ref = dataset.table('my_table')
table = bigquery.Table(table_ref)
client.create_table(table) # API request
tables = list(client.list_tables(dataset)) # API request(s)
assert len(tables) == 1
assert tables[0].table_id == 'my_table'
# [END bigquery_list_tables]
def test_create_table(client, to_delete):
"""Create a table."""
dataset_id = 'create_table_dataset_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_create_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
table_ref = dataset_ref.table('my_table')
table = bigquery.Table(table_ref, schema=schema)
table = client.create_table(table) # API request
assert table.table_id == 'my_table'
# [END bigquery_create_table]
def test_create_table_nested_repeated_schema(client, to_delete):
dataset_id = 'create_table_nested_repeated_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_nested_repeated_schema]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
schema = [
bigquery.SchemaField('id', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('first_name', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('last_name', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('dob', 'DATE', mode='NULLABLE'),
bigquery.SchemaField('addresses', 'RECORD', mode='REPEATED', fields=[
bigquery.SchemaField('status', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('address', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('city', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('state', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('zip', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('numberOfYears', 'STRING', mode='NULLABLE'),
]),
]
table_ref = dataset_ref.table('my_table')
table = bigquery.Table(table_ref, schema=schema)
table = client.create_table(table) # API request
print('Created table {}'.format(table.full_table_id))
# [END bigquery_nested_repeated_schema]
def test_create_table_cmek(client, to_delete):
dataset_id = 'create_table_cmek_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_create_table_cmek]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
table_ref = client.dataset(dataset_id).table('my_table')
table = bigquery.Table(table_ref)
# Set the encryption key to use for the table.
# TODO: Replace this key with a key you have created in Cloud KMS.
kms_key_name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
'cloud-samples-tests', 'us-central1', 'test', 'test')
table.encryption_configuration = bigquery.EncryptionConfiguration(
kms_key_name=kms_key_name)
table = client.create_table(table) # API request
assert table.encryption_configuration.kms_key_name == kms_key_name
# [END bigquery_create_table_cmek]
def test_create_partitioned_table(client, to_delete):
dataset_id = 'create_table_partitioned_{}'.format(_millis())
dataset_ref = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset_ref)
to_delete.append(dataset)
# [START bigquery_create_table_partitioned]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
table_ref = dataset_ref.table('my_partitioned_table')
schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING'),
bigquery.SchemaField('date', 'DATE')
]
table = bigquery.Table(table_ref, schema=schema)
table.time_partitioning = bigquery.TimePartitioning(
type_=bigquery.TimePartitioningType.DAY,
field='date', # name of column to use for partitioning
expiration_ms=7776000000) # 90 days
table = client.create_table(table)
print('Created table {}, partitioned on column {}'.format(
table.table_id, table.time_partitioning.field))
# [END bigquery_create_table_partitioned]
assert table.time_partitioning.type_ == 'DAY'
assert table.time_partitioning.field == 'date'
assert table.time_partitioning.expiration_ms == 7776000000
def test_load_and_query_partitioned_table(client, to_delete):
dataset_id = 'load_partitioned_table_dataset_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_partitioned]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
table_id = 'us_states_by_date'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING'),
bigquery.SchemaField('date', 'DATE')
]
job_config.skip_leading_rows = 1
job_config.time_partitioning = bigquery.TimePartitioning(
type_=bigquery.TimePartitioningType.DAY,
field='date', # name of column to use for partitioning
expiration_ms=7776000000) # 90 days
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv'
load_job = client.load_table_from_uri(
uri,
dataset_ref.table(table_id),
job_config=job_config) # API request
assert load_job.job_type == 'load'
load_job.result() # Waits for table load to complete.
table = client.get_table(dataset_ref.table(table_id))
print("Loaded {} rows to table {}".format(table.num_rows, table_id))
# [END bigquery_load_table_partitioned]
assert table.num_rows == 50
project_id = client.project
# [START bigquery_query_partitioned_table]
import datetime
# from google.cloud import bigquery
# client = bigquery.Client()
# project_id = 'my-project'
# dataset_id = 'my_dataset'
table_id = 'us_states_by_date'
sql_template = """
SELECT *
FROM `{}.{}.{}`
WHERE date BETWEEN @start_date AND @end_date
"""
sql = sql_template.format(project_id, dataset_id, table_id)
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = [
bigquery.ScalarQueryParameter(
'start_date',
'DATE',
datetime.date(1800, 1, 1)
),
bigquery.ScalarQueryParameter(
'end_date',
'DATE',
datetime.date(1899, 12, 31)
)
]
query_job = client.query(
sql,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request
rows = list(query_job)
print("{} states were admitted to the US in the 1800s".format(len(rows)))
# [END bigquery_query_partitioned_table]
assert len(rows) == 29
def test_get_table_information(client, to_delete):
"""Show a table's properties."""
dataset_id = 'show_table_dataset_{}'.format(_millis())
table_id = 'show_table_table_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table.description = ORIGINAL_DESCRIPTION
table = client.create_table(table)
# [START bigquery_get_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# table_id = 'my_table'
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref) # API Request
# View table properties
print(table.schema)
print(table.description)
print(table.num_rows)
# [END bigquery_get_table]
assert table.schema == SCHEMA
assert table.description == ORIGINAL_DESCRIPTION
assert table.num_rows == 0
# [START bigquery_table_exists]
def table_exists(client, table_reference):
"""Return if a table exists.
Args:
client (google.cloud.bigquery.client.Client):
A client to connect to the BigQuery API.
table_reference (google.cloud.bigquery.table.TableReference):
A reference to the table to look for.
Returns:
bool: ``True`` if the table exists, ``False`` otherwise.
"""
from google.cloud.exceptions import NotFound
try:
client.get_table(table_reference)
return True
except NotFound:
return False
# [END bigquery_table_exists]
def test_table_exists(client, to_delete):
"""Determine if a table exists."""
DATASET_ID = 'get_table_dataset_{}'.format(_millis())
TABLE_ID = 'get_table_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(DATASET_ID))
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
table_ref = dataset.table(TABLE_ID)
table = bigquery.Table(table_ref, schema=SCHEMA)
table = client.create_table(table)
assert table_exists(client, table_ref)
assert not table_exists(client, dataset.table('i_dont_exist'))
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_manage_table_labels(client, to_delete):
dataset_id = 'label_table_dataset_{}'.format(_millis())
table_id = 'label_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table = client.create_table(table)
# [START bigquery_label_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('my_table')
# table = client.get_table(table_ref) # API request
assert table.labels == {}
labels = {'color': 'green'}
table.labels = labels
table = client.update_table(table, ['labels']) # API request
assert table.labels == labels
# [END bigquery_label_table]
# [START bigquery_get_table_labels]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# table_id = 'my_table'
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref) # API Request
# View table labels
print('Table ID: {}'.format(table_id))
print('Labels:')
if table.labels:
for label, value in table.labels.items():
print('\t{}: {}'.format(label, value))
else:
print("\tTable has no labels defined.")
# [END bigquery_get_table_labels]
assert table.labels == labels
# [START bigquery_delete_label_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('my_table')
# table = client.get_table(table_ref) # API request
# This example table starts with one label
assert table.labels == {'color': 'green'}
# To delete a label from a table, set its value to None
table.labels['color'] = None
table = client.update_table(table, ['labels']) # API request
assert table.labels == {}
# [END bigquery_delete_label_table]
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_update_table_description(client, to_delete):
"""Update a table's description."""
dataset_id = 'update_table_description_dataset_{}'.format(_millis())
table_id = 'update_table_description_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table.description = 'Original description.'
table = client.create_table(table)
# [START bigquery_update_table_description]
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('my_table')
# table = client.get_table(table_ref) # API request
assert table.description == 'Original description.'
table.description = 'Updated description.'
table = client.update_table(table, ['description']) # API request
assert table.description == 'Updated description.'
# [END bigquery_update_table_description]
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_update_table_expiration(client, to_delete):
"""Update a table's expiration time."""
dataset_id = 'update_table_expiration_dataset_{}'.format(_millis())
table_id = 'update_table_expiration_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table = client.create_table(table)
# [START bigquery_update_table_expiration]
import datetime
import pytz
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('my_table')
# table = client.get_table(table_ref) # API request
assert table.expires is None
# set table to expire 5 days from now
expiration = datetime.datetime.now(pytz.utc) + datetime.timedelta(days=5)
table.expires = expiration
table = client.update_table(table, ['expires']) # API request
# expiration is stored in milliseconds
margin = datetime.timedelta(microseconds=1000)
assert expiration - margin <= table.expires <= expiration + margin
# [END bigquery_update_table_expiration]
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_add_empty_column(client, to_delete):
"""Adds an empty column to an existing table."""
dataset_id = 'add_empty_column_dataset_{}'.format(_millis())
table_id = 'add_empty_column_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table = client.create_table(table)
# [START bigquery_add_empty_column]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# table_id = 'my_table'
table_ref = client.dataset(dataset_id).table(table_id)
table = client.get_table(table_ref) # API request
original_schema = table.schema
new_schema = original_schema[:] # creates a copy of the schema
new_schema.append(bigquery.SchemaField('phone', 'STRING'))
table.schema = new_schema
table = client.update_table(table, ['schema']) # API request
assert len(table.schema) == len(original_schema) + 1 == len(new_schema)
# [END bigquery_add_empty_column]
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_relax_column(client, to_delete):
"""Updates a schema field from required to nullable."""
dataset_id = 'relax_column_dataset_{}'.format(_millis())
table_id = 'relax_column_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_relax_column]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# table_id = 'my_table'
original_schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
table_ref = client.dataset(dataset_id).table(table_id)
table = bigquery.Table(table_ref, schema=original_schema)
table = client.create_table(table)
assert all(field.mode == 'REQUIRED' for field in table.schema)
# SchemaField properties cannot be edited after initialization.
# To make changes, construct new SchemaField objects.
relaxed_schema = [
bigquery.SchemaField('full_name', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('age', 'INTEGER', mode='NULLABLE'),
]
table.schema = relaxed_schema
table = client.update_table(table, ['schema'])
assert all(field.mode == 'NULLABLE' for field in table.schema)
# [END bigquery_relax_column]
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_update_table_cmek(client, to_delete):
"""Patch a table's metadata."""
dataset_id = 'update_table_cmek_{}'.format(_millis())
table_id = 'update_table_cmek_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id))
original_kms_key_name = (
'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
'cloud-samples-tests', 'us-central1', 'test', 'test'))
table.encryption_configuration = bigquery.EncryptionConfiguration(
kms_key_name=original_kms_key_name)
table = client.create_table(table)
# [START bigquery_update_table_cmek]
# from google.cloud import bigquery
# client = bigquery.Client()
assert table.encryption_configuration.kms_key_name == original_kms_key_name
# Set a new encryption key to use for the destination.
# TODO: Replace this key with a key you have created in KMS.
updated_kms_key_name = (
'projects/cloud-samples-tests/locations/us-central1/'
'keyRings/test/cryptoKeys/otherkey')
table.encryption_configuration = bigquery.EncryptionConfiguration(
kms_key_name=updated_kms_key_name)
table = client.update_table(
table, ['encryption_configuration']) # API request
assert table.encryption_configuration.kms_key_name == updated_kms_key_name
assert original_kms_key_name != updated_kms_key_name
# [END bigquery_update_table_cmek]
def test_browse_table_data(client, to_delete, capsys):
"""Retreive selected row data from a table."""
# [START bigquery_browse_table]
# from google.cloud import bigquery
# client = bigquery.Client()
dataset_ref = client.dataset('samples', project='bigquery-public-data')
table_ref = dataset_ref.table('shakespeare')
table = client.get_table(table_ref) # API call
# Load all rows from a table
rows = client.list_rows(table)
assert len(list(rows)) == table.num_rows
# Load the first 10 rows
rows = client.list_rows(table, max_results=10)
assert len(list(rows)) == 10
# Specify selected fields to limit the results to certain columns
fields = table.schema[:2] # first two columns
rows = client.list_rows(table, selected_fields=fields, max_results=10)
assert len(rows.schema) == 2
assert len(list(rows)) == 10
# Use the start index to load an arbitrary portion of the table
rows = client.list_rows(table, start_index=10, max_results=10)
# Print row data in tabular format
format_string = '{!s:<16} ' * len(rows.schema)
field_names = [field.name for field in rows.schema]
print(format_string.format(*field_names)) # prints column headers
for row in rows:
print(format_string.format(*row)) # prints row data
# [END bigquery_browse_table]
out, err = capsys.readouterr()
out = list(filter(bool, out.split('\n'))) # list of non-blank lines
assert len(out) == 11
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_manage_views(client, to_delete):
project = client.project
source_dataset_id = 'source_dataset_{}'.format(_millis())
source_dataset_ref = client.dataset(source_dataset_id)
source_dataset = bigquery.Dataset(source_dataset_ref)
source_dataset = client.create_dataset(source_dataset)
to_delete.append(source_dataset)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
job_config.skip_leading_rows = 1
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.csv'
source_table_id = 'us_states'
load_job = client.load_table_from_uri(
uri, source_dataset.table(source_table_id), job_config=job_config)
load_job.result()
shared_dataset_id = 'shared_dataset_{}'.format(_millis())
shared_dataset_ref = client.dataset(shared_dataset_id)
shared_dataset = bigquery.Dataset(shared_dataset_ref)
shared_dataset = client.create_dataset(shared_dataset)
to_delete.append(shared_dataset)
# [START bigquery_create_view]
# from google.cloud import bigquery
# client = bigquery.Client()
# project = 'my-project'
# source_dataset_id = 'my_source_dataset'
# source_table_id = 'us_states'
# shared_dataset_ref = client.dataset('my_shared_dataset')
# This example shows how to create a shared view of a source table of
# US States. The source table contains all 50 states, while the view will
# contain only states with names starting with 'W'.
view_ref = shared_dataset_ref.table('my_shared_view')
view = bigquery.Table(view_ref)
sql_template = (
'SELECT name, post_abbr FROM `{}.{}.{}` WHERE name LIKE "W%"')
view.view_query = sql_template.format(
project, source_dataset_id, source_table_id)
view = client.create_table(view) # API request
print('Successfully created view at {}'.format(view.full_table_id))
# [END bigquery_create_view]
# [START bigquery_update_view_query]
# from google.cloud import bigquery
# client = bigquery.Client()
# project = 'my-project'
# source_dataset_id = 'my_source_dataset'
# source_table_id = 'us_states'
# shared_dataset_ref = client.dataset('my_shared_dataset')
# This example shows how to update a shared view of a source table of
# US States. The view's query will be updated to contain only states with
# names starting with 'M'.
view_ref = shared_dataset_ref.table('my_shared_view')
view = bigquery.Table(view_ref)
sql_template = (
'SELECT name, post_abbr FROM `{}.{}.{}` WHERE name LIKE "M%"')
view.view_query = sql_template.format(
project, source_dataset_id, source_table_id)
view = client.update_table(view, ['view_query']) # API request
# [END bigquery_update_view_query]
# [START bigquery_get_view]
# from google.cloud import bigquery
# client = bigquery.Client()
# shared_dataset_id = 'my_shared_dataset'
view_ref = client.dataset(shared_dataset_id).table('my_shared_view')
view = client.get_table(view_ref) # API Request
# Display view properties
print('View at {}'.format(view.full_table_id))
print('View Query:\n{}'.format(view.view_query))
# [END bigquery_get_view]
assert view.view_query is not None
analyst_group_email = '[email protected]'
# [START bigquery_grant_view_access]
# from google.cloud import bigquery
# client = bigquery.Client()
# Assign access controls to the dataset containing the view
# shared_dataset_id = 'my_shared_dataset'
# analyst_group_email = '[email protected]'
shared_dataset = client.get_dataset(
client.dataset(shared_dataset_id)) # API request
access_entries = shared_dataset.access_entries
access_entries.append(
bigquery.AccessEntry('READER', 'groupByEmail', analyst_group_email)
)
shared_dataset.access_entries = access_entries
shared_dataset = client.update_dataset(
shared_dataset, ['access_entries']) # API request
# Authorize the view to access the source dataset
# project = 'my-project'
# source_dataset_id = 'my_source_dataset'
source_dataset = client.get_dataset(
client.dataset(source_dataset_id)) # API request
view_reference = {
'projectId': project,
'datasetId': shared_dataset_id,
'tableId': 'my_shared_view',
}
access_entries = source_dataset.access_entries
access_entries.append(
bigquery.AccessEntry(None, 'view', view_reference)
)
source_dataset.access_entries = access_entries
source_dataset = client.update_dataset(
source_dataset, ['access_entries']) # API request
# [END bigquery_grant_view_access]
def test_table_insert_rows(client, to_delete):
"""Insert / fetch table data."""
dataset_id = 'table_insert_rows_dataset_{}'.format(_millis())
table_id = 'table_insert_rows_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset)
dataset.location = 'US'
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table = client.create_table(table)
# [START bigquery_table_insert_rows]
# TODO(developer): Uncomment the lines below and replace with your values.
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset' # replace with your dataset ID
# For this sample, the table must already exist and have a defined schema
# table_id = 'my_table' # replace with your table ID
# table_ref = client.dataset(dataset_id).table(table_id)
# table = client.get_table(table_ref) # API request
rows_to_insert = [
(u'Phred Phlyntstone', 32),
(u'Wylma Phlyntstone', 29),
]
errors = client.insert_rows(table, rows_to_insert) # API request
assert errors == []
# [END bigquery_table_insert_rows]
def test_load_table_from_file(client, to_delete):
"""Upload table data from a CSV file."""
dataset_id = 'load_table_from_file_dataset_{}'.format(_millis())
table_id = 'load_table_from_file_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.location = 'US'
client.create_dataset(dataset)
to_delete.append(dataset)
snippets_dir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(
snippets_dir, '..', '..', 'bigquery', 'tests', 'data', 'people.csv')
# [START bigquery_load_from_file]
# from google.cloud import bigquery
# client = bigquery.Client()
# filename = '/path/to/file.csv'
# dataset_id = 'my_dataset'
# table_id = 'my_table'
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.CSV
job_config.skip_leading_rows = 1
job_config.autodetect = True
with open(filename, 'rb') as source_file:
job = client.load_table_from_file(
source_file,
table_ref,
location='US', # Must match the destination dataset location.
job_config=job_config) # API request
job.result() # Waits for table load to complete.
print('Loaded {} rows into {}:{}.'.format(
job.output_rows, dataset_id, table_id))
# [END bigquery_load_from_file]
table = client.get_table(table_ref)
rows = list(client.list_rows(table)) # API request
assert len(rows) == 2
# Order is not preserved, so compare individually
row1 = bigquery.Row(('Wylma Phlyntstone', 29), {'full_name': 0, 'age': 1})
assert row1 in rows
row2 = bigquery.Row(('Phred Phlyntstone', 32), {'full_name': 0, 'age': 1})
assert row2 in rows
def test_load_table_from_uri_csv(client, to_delete, capsys):
dataset_id = 'load_table_from_uri_csv_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_csv]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
job_config.skip_leading_rows = 1
# The source format defaults to CSV, so the line below is optional.
job_config.source_format = bigquery.SourceFormat.CSV
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.csv'
load_job = client.load_table_from_uri(
uri,
dataset_ref.table('us_states'),
job_config=job_config) # API request
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
destination_table = client.get_table(dataset_ref.table('us_states'))
print('Loaded {} rows.'.format(destination_table.num_rows))
# [END bigquery_load_table_gcs_csv]
out, _ = capsys.readouterr()
assert 'Loaded 50 rows.' in out
def test_load_table_from_uri_json(client, to_delete, capsys):
dataset_id = 'load_table_from_uri_json_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.location = 'US'
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_json]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.json'
load_job = client.load_table_from_uri(
uri,
dataset_ref.table('us_states'),
location='US', # Location must match that of the destination dataset.
job_config=job_config) # API request
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
destination_table = client.get_table(dataset_ref.table('us_states'))
print('Loaded {} rows.'.format(destination_table.num_rows))
# [END bigquery_load_table_gcs_json]
out, _ = capsys.readouterr()
assert 'Loaded 50 rows.' in out
def test_load_table_from_uri_cmek(client, to_delete):
dataset_id = 'load_table_from_uri_cmek_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.location = 'US'
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_json_cmek]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.autodetect = True
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
# Set the encryption key to use for the destination.
# TODO: Replace this key with a key you have created in KMS.
kms_key_name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
'cloud-samples-tests', 'us-central1', 'test', 'test')
encryption_config = bigquery.EncryptionConfiguration(
kms_key_name=kms_key_name)
job_config.destination_encryption_configuration = encryption_config
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.json'
load_job = client.load_table_from_uri(
uri,
dataset_ref.table('us_states'),
location='US', # Location must match that of the destination dataset.
job_config=job_config) # API request
assert load_job.job_type == 'load'
load_job.result() # Waits for table load to complete.
assert load_job.state == 'DONE'
table = client.get_table(dataset_ref.table('us_states'))
assert table.encryption_configuration.kms_key_name == kms_key_name
# [END bigquery_load_table_gcs_json_cmek]
def test_load_table_from_uri_parquet(client, to_delete, capsys):
dataset_id = 'load_table_from_uri_parquet_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_parquet]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.PARQUET
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.parquet'
load_job = client.load_table_from_uri(
uri,
dataset_ref.table('us_states'),
job_config=job_config) # API request
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
destination_table = client.get_table(dataset_ref.table('us_states'))
print('Loaded {} rows.'.format(destination_table.num_rows))
# [END bigquery_load_table_gcs_parquet]
out, _ = capsys.readouterr()
assert 'Loaded 50 rows.' in out
def test_load_table_from_uri_orc(client, to_delete, capsys):
dataset_id = 'load_table_from_uri_orc_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_orc]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.ORC
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.orc'
load_job = client.load_table_from_uri(
uri,
dataset_ref.table('us_states'),
job_config=job_config) # API request
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
destination_table = client.get_table(dataset_ref.table('us_states'))
print('Loaded {} rows.'.format(destination_table.num_rows))
# [END bigquery_load_table_gcs_orc]
out, _ = capsys.readouterr()
assert 'Loaded 50 rows.' in out
def test_load_table_from_uri_autodetect(client, to_delete, capsys):
"""Load table from a GCS URI using various formats and auto-detected schema
Each file format has its own tested load from URI sample. Because most of
the code is common for autodetect, append, and truncate, this sample
includes snippets for all supported formats but only calls a single load
job.
This code snippet is made up of shared code, then format-specific code,
followed by more shared code. Note that only the last format in the
format-specific code section will be tested in this test.
"""
dataset_id = 'load_table_from_uri_auto_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# Shared code
# [START bigquery_load_table_gcs_csv_autodetect]
# [START bigquery_load_table_gcs_json_autodetect]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.autodetect = True
# [END bigquery_load_table_gcs_csv_autodetect]
# [END bigquery_load_table_gcs_json_autodetect]
# Format-specific code
# [START bigquery_load_table_gcs_csv_autodetect]
job_config.skip_leading_rows = 1
# The source format defaults to CSV, so the line below is optional.
job_config.source_format = bigquery.SourceFormat.CSV
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.csv'
# [END bigquery_load_table_gcs_csv_autodetect]
# unset csv-specific attribute
del job_config._properties['load']['skipLeadingRows']
# [START bigquery_load_table_gcs_json_autodetect]
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.json'
# [END bigquery_load_table_gcs_json_autodetect]
# Shared code
# [START bigquery_load_table_gcs_csv_autodetect]
# [START bigquery_load_table_gcs_json_autodetect]
load_job = client.load_table_from_uri(
uri,
dataset_ref.table('us_states'),
job_config=job_config) # API request
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
destination_table = client.get_table(dataset_ref.table('us_states'))
print('Loaded {} rows.'.format(destination_table.num_rows))
# [END bigquery_load_table_gcs_csv_autodetect]
# [END bigquery_load_table_gcs_json_autodetect]
out, _ = capsys.readouterr()
assert 'Loaded 50 rows.' in out
def test_load_table_from_uri_truncate(client, to_delete, capsys):
"""Replaces table data with data from a GCS URI using various formats
Each file format has its own tested load from URI sample. Because most of
the code is common for autodetect, append, and truncate, this sample
includes snippets for all supported formats but only calls a single load
job.
This code snippet is made up of shared code, then format-specific code,
followed by more shared code. Note that only the last format in the
format-specific code section will be tested in this test.
"""
dataset_id = 'load_table_from_uri_trunc_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
table_ref = dataset.table('us_states')
body = six.BytesIO(b'Washington,WA')
client.load_table_from_file(
body, table_ref, job_config=job_config).result()
# Shared code
# [START bigquery_load_table_gcs_csv_truncate]
# [START bigquery_load_table_gcs_json_truncate]
# [START bigquery_load_table_gcs_parquet_truncate]
# [START bigquery_load_table_gcs_orc_truncate]
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('existing_table')
previous_rows = client.get_table(table_ref).num_rows
assert previous_rows > 0
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE
# [END bigquery_load_table_gcs_csv_truncate]
# [END bigquery_load_table_gcs_json_truncate]
# [END bigquery_load_table_gcs_parquet_truncate]
# [END bigquery_load_table_gcs_orc_truncate]
# Format-specific code
# [START bigquery_load_table_gcs_csv_truncate]
job_config.skip_leading_rows = 1
# The source format defaults to CSV, so the line below is optional.
job_config.source_format = bigquery.SourceFormat.CSV
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.csv'
# [END bigquery_load_table_gcs_csv_truncate]
# unset csv-specific attribute
del job_config._properties['load']['skipLeadingRows']
# [START bigquery_load_table_gcs_json_truncate]
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.json'
# [END bigquery_load_table_gcs_json_truncate]
# [START bigquery_load_table_gcs_parquet_truncate]
job_config.source_format = bigquery.SourceFormat.PARQUET
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.parquet'
# [END bigquery_load_table_gcs_parquet_truncate]
# [START bigquery_load_table_gcs_orc_truncate]
job_config.source_format = bigquery.SourceFormat.ORC
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.orc'
# [END bigquery_load_table_gcs_orc_truncate]
# Shared code
# [START bigquery_load_table_gcs_csv_truncate]
# [START bigquery_load_table_gcs_json_truncate]
# [START bigquery_load_table_gcs_parquet_truncate]
# [START bigquery_load_table_gcs_orc_truncate]
load_job = client.load_table_from_uri(
uri,
table_ref,
job_config=job_config) # API request
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
destination_table = client.get_table(table_ref)
print('Loaded {} rows.'.format(destination_table.num_rows))
# [END bigquery_load_table_gcs_csv_truncate]
# [END bigquery_load_table_gcs_json_truncate]
# [END bigquery_load_table_gcs_parquet_truncate]
# [END bigquery_load_table_gcs_orc_truncate]
out, _ = capsys.readouterr()
assert 'Loaded 50 rows.' in out
def test_load_table_add_column(client, to_delete):
dataset_id = 'load_table_add_column_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
snippets_dir = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(
snippets_dir, '..', '..', 'bigquery', 'tests', 'data', 'people.csv')
table_ref = dataset_ref.table('my_table')
old_schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
]
table = client.create_table(bigquery.Table(table_ref, schema=old_schema))
# [START bigquery_add_column_load_append]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# filepath = 'path/to/your_file.csv'
# Retrieves the destination table and checks the length of the schema
table_id = 'my_table'
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
print("Table {} contains {} columns.".format(table_id, len(table.schema)))
# Configures the load job to append the data to the destination table,
# allowing field addition
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
job_config.schema_update_options = [
bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION,
]
# In this example, the existing table contains only the 'full_name' column.
# 'REQUIRED' fields cannot be added to an existing schema, so the
# additional column must be 'NULLABLE'.
job_config.schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='NULLABLE'),
]
job_config.source_format = bigquery.SourceFormat.CSV
job_config.skip_leading_rows = 1
with open(filepath, 'rb') as source_file:
job = client.load_table_from_file(
source_file,
table_ref,
location='US', # Must match the destination dataset location.
job_config=job_config) # API request
job.result() # Waits for table load to complete.
print('Loaded {} rows into {}:{}.'.format(
job.output_rows, dataset_id, table_ref.table_id))
# Checks the updated length of the schema
table = client.get_table(table)
print("Table {} now contains {} columns.".format(
table_id, len(table.schema)))
# [END bigquery_add_column_load_append]
assert len(table.schema) == 2
assert table.num_rows > 0
def test_load_table_relax_column(client, to_delete):
dataset_id = 'load_table_relax_column_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
snippets_dir = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(
snippets_dir, '..', '..', 'bigquery', 'tests', 'data', 'people.csv')
table_ref = dataset_ref.table('my_table')
old_schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
bigquery.SchemaField('favorite_color', 'STRING', mode='REQUIRED'),
]
table = client.create_table(bigquery.Table(table_ref, schema=old_schema))
# [START bigquery_relax_column_load_append]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# filepath = 'path/to/your_file.csv'
# Retrieves the destination table and checks the number of required fields
table_id = 'my_table'
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
original_required_fields = sum(
field.mode == 'REQUIRED' for field in table.schema)
# In this example, the existing table has 3 required fields.
print("{} fields in the schema are required.".format(
original_required_fields))
# Configures the load job to append the data to a destination table,
# allowing field relaxation
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
job_config.schema_update_options = [
bigquery.SchemaUpdateOption.ALLOW_FIELD_RELAXATION,
]
# In this example, the existing table contains three required fields
# ('full_name', 'age', and 'favorite_color'), while the data to load
# contains only the first two fields.
job_config.schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
job_config.source_format = bigquery.SourceFormat.CSV
job_config.skip_leading_rows = 1
with open(filepath, 'rb') as source_file:
job = client.load_table_from_file(
source_file,
table_ref,
location='US', # Must match the destination dataset location.
job_config=job_config) # API request
job.result() # Waits for table load to complete.
print('Loaded {} rows into {}:{}.'.format(
job.output_rows, dataset_id, table_ref.table_id))
# Checks the updated number of required fields
table = client.get_table(table)
current_required_fields = sum(
field.mode == 'REQUIRED' for field in table.schema)
print("{} fields in the schema are now required.".format(
current_required_fields))
# [END bigquery_relax_column_load_append]
assert original_required_fields - current_required_fields == 1
assert len(table.schema) == 3
assert table.schema[2].mode == 'NULLABLE'
assert table.num_rows > 0
def test_copy_table(client, to_delete):
dataset_id = 'copy_table_dataset_{}'.format(_millis())
dest_dataset = bigquery.Dataset(client.dataset(dataset_id))
dest_dataset.location = 'US'
dest_dataset = client.create_dataset(dest_dataset)
to_delete.append(dest_dataset)
# [START bigquery_copy_table]
# from google.cloud import bigquery
# client = bigquery.Client()
source_dataset = client.dataset('samples', project='bigquery-public-data')
source_table_ref = source_dataset.table('shakespeare')
# dataset_id = 'my_dataset'
dest_table_ref = client.dataset(dataset_id).table('destination_table')
job = client.copy_table(
source_table_ref,
dest_table_ref,
# Location must match that of the source and destination tables.
location='US') # API request
job.result() # Waits for job to complete.
assert job.state == 'DONE'
dest_table = client.get_table(dest_table_ref) # API request
assert dest_table.num_rows > 0
# [END bigquery_copy_table]
def test_copy_table_multiple_source(client, to_delete):
dest_dataset_id = 'dest_dataset_{}'.format(_millis())
dest_dataset = bigquery.Dataset(client.dataset(dest_dataset_id))
dest_dataset.location = 'US'
dest_dataset = client.create_dataset(dest_dataset)
to_delete.append(dest_dataset)
source_dataset_id = 'source_dataset_{}'.format(_millis())
source_dataset = bigquery.Dataset(client.dataset(source_dataset_id))
source_dataset.location = 'US'
source_dataset = client.create_dataset(source_dataset)
to_delete.append(source_dataset)
schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
table_data = {'table1': b'Washington,WA', 'table2': b'California,CA'}
for table_id, data in table_data.items():
table_ref = source_dataset.table(table_id)
job_config = bigquery.LoadJobConfig()
job_config.schema = schema
body = six.BytesIO(data)
client.load_table_from_file(
body,
table_ref,
# Location must match that of the destination dataset.
location='US',
job_config=job_config).result()
# [START bigquery_copy_table_multiple_source]
# from google.cloud import bigquery
# client = bigquery.Client()
# source_dataset_id = 'my_source_dataset'
# dest_dataset_id = 'my_destination_dataset'
table1_ref = client.dataset(source_dataset_id).table('table1')
table2_ref = client.dataset(source_dataset_id).table('table2')
dest_table_ref = client.dataset(dest_dataset_id).table('destination_table')
job = client.copy_table(
[table1_ref, table2_ref],
dest_table_ref,
# Location must match that of the source and destination tables.
location='US') # API request
job.result() # Waits for job to complete.
assert job.state == 'DONE'
dest_table = client.get_table(dest_table_ref) # API request
assert dest_table.num_rows > 0
# [END bigquery_copy_table_multiple_source]
assert dest_table.num_rows == 2
def test_copy_table_cmek(client, to_delete):
dataset_id = 'copy_table_cmek_{}'.format(_millis())
dest_dataset = bigquery.Dataset(client.dataset(dataset_id))
dest_dataset.location = 'US'
dest_dataset = client.create_dataset(dest_dataset)
to_delete.append(dest_dataset)
# [START bigquery_copy_table_cmek]
# from google.cloud import bigquery
# client = bigquery.Client()
source_dataset = bigquery.DatasetReference(
'bigquery-public-data', 'samples')
source_table_ref = source_dataset.table('shakespeare')
# dataset_id = 'my_dataset'
dest_dataset_ref = client.dataset(dataset_id)
dest_table_ref = dest_dataset_ref.table('destination_table')
# Set the encryption key to use for the destination.
# TODO: Replace this key with a key you have created in KMS.
kms_key_name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
'cloud-samples-tests', 'us-central1', 'test', 'test')
encryption_config = bigquery.EncryptionConfiguration(
kms_key_name=kms_key_name)
job_config = bigquery.CopyJobConfig()
job_config.destination_encryption_configuration = encryption_config
job = client.copy_table(
source_table_ref,
dest_table_ref,
# Location must match that of the source and destination tables.
location='US',
job_config=job_config) # API request
job.result() # Waits for job to complete.
assert job.state == 'DONE'
dest_table = client.get_table(dest_table_ref)
assert dest_table.encryption_configuration.kms_key_name == kms_key_name
# [END bigquery_copy_table_cmek]
def test_extract_table(client, to_delete):
bucket_name = 'extract_shakespeare_{}'.format(_millis())
storage_client = storage.Client()
bucket = retry_storage_errors(storage_client.create_bucket)(bucket_name)
to_delete.append(bucket)
# [START bigquery_extract_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# bucket_name = 'my-bucket'
project = 'bigquery-public-data'
dataset_id = 'samples'
table_id = 'shakespeare'
destination_uri = 'gs://{}/{}'.format(bucket_name, 'shakespeare.csv')
dataset_ref = client.dataset(dataset_id, project=project)
table_ref = dataset_ref.table(table_id)
extract_job = client.extract_table(
table_ref,
destination_uri,
# Location must match that of the source table.
location='US') # API request
extract_job.result() # Waits for job to complete.
print('Exported {}:{}.{} to {}'.format(
project, dataset_id, table_id, destination_uri))
# [END bigquery_extract_table]
blob = retry_storage_errors(bucket.get_blob)('shakespeare.csv')
assert blob.exists
assert blob.size > 0
to_delete.insert(0, blob)
def test_extract_table_json(client, to_delete):
bucket_name = 'extract_shakespeare_json_{}'.format(_millis())
storage_client = storage.Client()
bucket = retry_storage_errors(storage_client.create_bucket)(bucket_name)
to_delete.append(bucket)
# [START bigquery_extract_table_json]
# from google.cloud import bigquery
# client = bigquery.Client()
# bucket_name = 'my-bucket'
destination_uri = 'gs://{}/{}'.format(bucket_name, 'shakespeare.json')
dataset_ref = client.dataset('samples', project='bigquery-public-data')
table_ref = dataset_ref.table('shakespeare')
job_config = bigquery.job.ExtractJobConfig()
job_config.destination_format = (
bigquery.DestinationFormat.NEWLINE_DELIMITED_JSON)
extract_job = client.extract_table(
table_ref,
destination_uri,
job_config=job_config,
# Location must match that of the source table.
location='US') # API request
extract_job.result() # Waits for job to complete.
# [END bigquery_extract_table_json]
blob = retry_storage_errors(bucket.get_blob)('shakespeare.json')
assert blob.exists
assert blob.size > 0
to_delete.insert(0, blob)
def test_extract_table_compressed(client, to_delete):
bucket_name = 'extract_shakespeare_compress_{}'.format(_millis())
storage_client = storage.Client()
bucket = retry_storage_errors(storage_client.create_bucket)(bucket_name)
to_delete.append(bucket)
# [START bigquery_extract_table_compressed]
# from google.cloud import bigquery
# client = bigquery.Client()
# bucket_name = 'my-bucket'
destination_uri = 'gs://{}/{}'.format(bucket_name, 'shakespeare.csv.gz')
dataset_ref = client.dataset('samples', project='bigquery-public-data')
table_ref = dataset_ref.table('shakespeare')
job_config = bigquery.job.ExtractJobConfig()
job_config.compression = bigquery.Compression.GZIP
extract_job = client.extract_table(
table_ref,
destination_uri,
# Location must match that of the source table.
location='US',
job_config=job_config) # API request
extract_job.result() # Waits for job to complete.
# [END bigquery_extract_table_compressed]
blob = retry_storage_errors(bucket.get_blob)('shakespeare.csv.gz')
assert blob.exists
assert blob.size > 0
to_delete.insert(0, blob)
def test_delete_table(client, to_delete):
"""Delete a table."""
from google.cloud.exceptions import NotFound
dataset_id = 'delete_table_dataset_{}'.format(_millis())
table_id = 'delete_table_table_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
table_ref = dataset.table(table_id)
table = bigquery.Table(table_ref, schema=SCHEMA)
client.create_table(table)
# [START bigquery_delete_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# table_id = 'my_table'
table_ref = client.dataset(dataset_id).table(table_id)
client.delete_table(table_ref) # API request
print('Table {}:{} deleted.'.format(dataset_id, table_id))
# [END bigquery_delete_table]
with pytest.raises(NotFound):
client.get_table(table) # API request
def test_undelete_table(client, to_delete):
dataset_id = 'undelete_table_dataset_{}'.format(_millis())
table_id = 'undelete_table_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.location = 'US'
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
client.create_table(table)
# [START bigquery_undelete_table]
# TODO(developer): Uncomment the lines below and replace with your values.
# import time
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset' # Replace with your dataset ID.
# table_id = 'my_table' # Replace with your table ID.
table_ref = client.dataset(dataset_id).table(table_id)
# TODO(developer): Choose an appropriate snapshot point as epoch
# milliseconds. For this example, we choose the current time as we're about
# to delete the table immediately afterwards.
snapshot_epoch = int(time.time() * 1000)
# [END bigquery_undelete_table]
# Due to very short lifecycle of the table, ensure we're not picking a time
# prior to the table creation due to time drift between backend and client.
table = client.get_table(table_ref)
created_epoch = datetime_helpers.to_microseconds(table.created)
if created_epoch > snapshot_epoch:
snapshot_epoch = created_epoch
# [START bigquery_undelete_table]
# "Accidentally" delete the table.
client.delete_table(table_ref) # API request
# Construct the restore-from table ID using a snapshot decorator.
snapshot_table_id = '{}@{}'.format(table_id, snapshot_epoch)
source_table_ref = client.dataset(dataset_id).table(snapshot_table_id)
# Choose a new table ID for the recovered table data.
recovered_table_id = '{}_recovered'.format(table_id)
dest_table_ref = client.dataset(dataset_id).table(recovered_table_id)
# Construct and run a copy job.
job = client.copy_table(
source_table_ref,
dest_table_ref,
# Location must match that of the source and destination tables.
location='US') # API request
job.result() # Waits for job to complete.
print('Copied data from deleted table {} to {}'.format(
table_id, recovered_table_id))
# [END bigquery_undelete_table]
def test_client_query(client):
"""Run a simple query."""
# [START bigquery_query]
# from google.cloud import bigquery
# client = bigquery.Client()
query = (
'SELECT name FROM `bigquery-public-data.usa_names.usa_1910_2013` '
'WHERE state = "TX" '
'LIMIT 100')
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US') # API request - starts the query
for row in query_job: # API request - fetches results
# Row values can be accessed by field name or index
assert row[0] == row.name == row['name']
print(row)
# [END bigquery_query]
def test_client_query_legacy_sql(client):
"""Run a query with Legacy SQL explicitly set"""
# [START bigquery_query_legacy]
# from google.cloud import bigquery
# client = bigquery.Client()
query = (
'SELECT name FROM [bigquery-public-data:usa_names.usa_1910_2013] '
'WHERE state = "TX" '
'LIMIT 100')
# Set use_legacy_sql to True to use legacy SQL syntax.
job_config = bigquery.QueryJobConfig()
job_config.use_legacy_sql = True
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request - starts the query
# Print the results.
for row in query_job: # API request - fetches results
print(row)
# [END bigquery_query_legacy]
def test_manage_job(client):
sql = """
SELECT corpus
FROM `bigquery-public-data.samples.shakespeare`
GROUP BY corpus;
"""
location = 'us'
job = client.query(sql, location=location)
job_id = job.job_id
# [START bigquery_cancel_job]
# TODO(developer): Uncomment the lines below and replace with your values.
# from google.cloud import bigquery
# client = bigquery.Client()
# job_id = 'bq-job-123x456-123y123z123c' # replace with your job ID
# location = 'us' # replace with your location
job = client.cancel_job(job_id, location=location)
# [END bigquery_cancel_job]
# [START bigquery_get_job]
# TODO(developer): Uncomment the lines below and replace with your values.
# from google.cloud import bigquery
# client = bigquery.Client()
# job_id = 'bq-job-123x456-123y123z123c' # replace with your job ID
# location = 'us' # replace with your location
job = client.get_job(job_id, location=location) # API request
# Print selected job properties
print('Details for job {} running in {}:'.format(job_id, location))
print('\tType: {}\n\tState: {}\n\tCreated: {}'.format(
job.job_type, job.state, job.created))
# [END bigquery_get_job]
def test_client_query_destination_table(client, to_delete):
"""Run a query"""
dataset_id = 'query_destination_table_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
to_delete.append(dataset_ref)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
client.create_dataset(dataset)
# [START bigquery_query_destination_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'your_dataset_id'
job_config = bigquery.QueryJobConfig()
# Set the destination table
table_ref = client.dataset(dataset_id).table('your_table_id')
job_config.destination = table_ref
sql = """
SELECT corpus
FROM `bigquery-public-data.samples.shakespeare`
GROUP BY corpus;
"""
# Start the query, passing in the extra configuration.
query_job = client.query(
sql,
# Location must match that of the dataset(s) referenced in the query
# and of the destination table.
location='US',
job_config=job_config) # API request - starts the query
query_job.result() # Waits for the query to finish
print('Query results loaded to table {}'.format(table_ref.path))
# [END bigquery_query_destination_table]
def test_client_query_destination_table_legacy(client, to_delete):
dataset_id = 'query_destination_table_legacy_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
to_delete.append(dataset_ref)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
client.create_dataset(dataset)
# [START bigquery_query_legacy_large_results]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'your_dataset_id'
job_config = bigquery.QueryJobConfig()
# Set use_legacy_sql to True to use legacy SQL syntax.
job_config.use_legacy_sql = True
# Set the destination table
table_ref = client.dataset(dataset_id).table('your_table_id')
job_config.destination = table_ref
job_config.allow_large_results = True
sql = """
SELECT corpus
FROM [bigquery-public-data:samples.shakespeare]
GROUP BY corpus;
"""
# Start the query, passing in the extra configuration.
query_job = client.query(
sql,
# Location must match that of the dataset(s) referenced in the query
# and of the destination table.
location='US',
job_config=job_config) # API request - starts the query
query_job.result() # Waits for the query to finish
print('Query results loaded to table {}'.format(table_ref.path))
# [END bigquery_query_legacy_large_results]
def test_client_query_destination_table_cmek(client, to_delete):
"""Run a query"""
dataset_id = 'query_destination_table_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
to_delete.append(dataset_ref)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
client.create_dataset(dataset)
# [START bigquery_query_destination_table_cmek]
# from google.cloud import bigquery
# client = bigquery.Client()
job_config = bigquery.QueryJobConfig()
# Set the destination table. Here, dataset_id is a string, such as:
# dataset_id = 'your_dataset_id'
table_ref = client.dataset(dataset_id).table('your_table_id')
job_config.destination = table_ref
# Set the encryption key to use for the destination.
# TODO: Replace this key with a key you have created in KMS.
kms_key_name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
'cloud-samples-tests', 'us-central1', 'test', 'test')
encryption_config = bigquery.EncryptionConfiguration(
kms_key_name=kms_key_name)
job_config.destination_encryption_configuration = encryption_config
# Start the query, passing in the extra configuration.
query_job = client.query(
'SELECT 17 AS my_col;',
# Location must match that of the dataset(s) referenced in the query
# and of the destination table.
location='US',
job_config=job_config) # API request - starts the query
query_job.result()
# The destination table is written using the encryption configuration.
table = client.get_table(table_ref)
assert table.encryption_configuration.kms_key_name == kms_key_name
# [END bigquery_query_destination_table_cmek]
def test_client_query_batch(client, to_delete):
# [START bigquery_query_batch]
# from google.cloud import bigquery
# client = bigquery.Client()
job_config = bigquery.QueryJobConfig()
# Run at batch priority, which won't count toward concurrent rate limit.
job_config.priority = bigquery.QueryPriority.BATCH
sql = """
SELECT corpus
FROM `bigquery-public-data.samples.shakespeare`
GROUP BY corpus;
"""
# Location must match that of the dataset(s) referenced in the query.
location = 'US'
# API request - starts the query
query_job = client.query(sql, location=location, job_config=job_config)
# Check on the progress by getting the job's updated state. Once the state
# is `DONE`, the results are ready.
query_job = client.get_job(
query_job.job_id, location=location) # API request - fetches job
print('Job {} is currently in state {}'.format(
query_job.job_id, query_job.state))
# [END bigquery_query_batch]
def test_client_query_relax_column(client, to_delete):
dataset_id = 'query_relax_column_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
table_ref = dataset_ref.table('my_table')
schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
table = client.create_table(
bigquery.Table(table_ref, schema=schema))
# [START bigquery_relax_column_query_append]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# Retrieves the destination table and checks the number of required fields
table_id = 'my_table'
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
original_required_fields = sum(
field.mode == 'REQUIRED' for field in table.schema)
# In this example, the existing table has 2 required fields
print("{} fields in the schema are required.".format(
original_required_fields))
# Configures the query to append the results to a destination table,
# allowing field relaxation
job_config = bigquery.QueryJobConfig()
job_config.schema_update_options = [
bigquery.SchemaUpdateOption.ALLOW_FIELD_RELAXATION,
]
job_config.destination = table_ref
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
query_job = client.query(
# In this example, the existing table contains 'full_name' and 'age' as
# required columns, but the query results will omit the second column.
'SELECT "Beyonce" as full_name;',
# Location must match that of the dataset(s) referenced in the query
# and of the destination table.
location='US',
job_config=job_config
) # API request - starts the query
query_job.result() # Waits for the query to finish
print("Query job {} complete.".format(query_job.job_id))
# Checks the updated number of required fields
table = client.get_table(table)
current_required_fields = sum(
field.mode == 'REQUIRED' for field in table.schema)
print("{} fields in the schema are now required.".format(
current_required_fields))
# [END bigquery_relax_column_query_append]
assert original_required_fields - current_required_fields > 0
assert len(table.schema) == 2
assert table.schema[1].mode == 'NULLABLE'
assert table.num_rows > 0
def test_client_query_add_column(client, to_delete):
dataset_id = 'query_add_column_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
table_ref = dataset_ref.table('my_table')
schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
table = client.create_table(bigquery.Table(table_ref, schema=schema))
# [START bigquery_add_column_query_append]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# Retrieves the destination table and checks the length of the schema
table_id = 'my_table'
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
print("Table {} contains {} columns.".format(table_id, len(table.schema)))
# Configures the query to append the results to a destination table,
# allowing field addition
job_config = bigquery.QueryJobConfig()
job_config.schema_update_options = [
bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION,
]
job_config.destination = table_ref
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
query_job = client.query(
# In this example, the existing table contains only the 'full_name' and
# 'age' columns, while the results of this query will contain an
# additional 'favorite_color' column.
'SELECT "Timmy" as full_name, 85 as age, "Blue" as favorite_color;',
# Location must match that of the dataset(s) referenced in the query
# and of the destination table.
location='US',
job_config=job_config
) # API request - starts the query
query_job.result() # Waits for the query to finish
print("Query job {} complete.".format(query_job.job_id))
# Checks the updated length of the schema
table = client.get_table(table)
print("Table {} now contains {} columns.".format(
table_id, len(table.schema)))
# [END bigquery_add_column_query_append]
assert len(table.schema) == 3
assert table.num_rows > 0
def test_client_query_w_named_params(client, capsys):
"""Run a query using named query parameters"""
# [START bigquery_query_params_named]
# from google.cloud import bigquery
# client = bigquery.Client()
query = """
SELECT word, word_count
FROM `bigquery-public-data.samples.shakespeare`
WHERE corpus = @corpus
AND word_count >= @min_word_count
ORDER BY word_count DESC;
"""
query_params = [
bigquery.ScalarQueryParameter('corpus', 'STRING', 'romeoandjuliet'),
bigquery.ScalarQueryParameter('min_word_count', 'INT64', 250)
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request - starts the query
# Print the results
for row in query_job:
print('{}: \t{}'.format(row.word, row.word_count))
assert query_job.state == 'DONE'
# [END bigquery_query_params_named]
out, _ = capsys.readouterr()
assert 'the' in out
def test_client_query_w_positional_params(client, capsys):
"""Run a query using query parameters"""
# [START bigquery_query_params_positional]
# from google.cloud import bigquery
# client = bigquery.Client()
query = """
SELECT word, word_count
FROM `bigquery-public-data.samples.shakespeare`
WHERE corpus = ?
AND word_count >= ?
ORDER BY word_count DESC;
"""
# Set the name to None to use positional parameters.
# Note that you cannot mix named and positional parameters.
query_params = [
bigquery.ScalarQueryParameter(None, 'STRING', 'romeoandjuliet'),
bigquery.ScalarQueryParameter(None, 'INT64', 250)
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request - starts the query
# Print the results
for row in query_job:
print('{}: \t{}'.format(row.word, row.word_count))
assert query_job.state == 'DONE'
# [END bigquery_query_params_positional]
out, _ = capsys.readouterr()
assert 'the' in out
def test_client_query_w_timestamp_params(client, capsys):
"""Run a query using query parameters"""
# [START bigquery_query_params_timestamps]
# from google.cloud import bigquery
# client = bigquery.Client()
import datetime
import pytz
query = 'SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);'
query_params = [
bigquery.ScalarQueryParameter(
'ts_value',
'TIMESTAMP',
datetime.datetime(2016, 12, 7, 8, 0, tzinfo=pytz.UTC))
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request - starts the query
# Print the results
for row in query_job:
print(row)
assert query_job.state == 'DONE'
# [END bigquery_query_params_timestamps]
out, _ = capsys.readouterr()
assert '2016, 12, 7, 9, 0' in out
def test_client_query_w_array_params(client, capsys):
"""Run a query using array query parameters"""
# [START bigquery_query_params_arrays]
# from google.cloud import bigquery
# client = bigquery.Client()
query = """
SELECT name, sum(number) as count
FROM `bigquery-public-data.usa_names.usa_1910_2013`
WHERE gender = @gender
AND state IN UNNEST(@states)
GROUP BY name
ORDER BY count DESC
LIMIT 10;
"""
query_params = [
bigquery.ScalarQueryParameter('gender', 'STRING', 'M'),
bigquery.ArrayQueryParameter(
'states', 'STRING', ['WA', 'WI', 'WV', 'WY'])
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request - starts the query
# Print the results
for row in query_job:
print('{}: \t{}'.format(row.name, row.count))
assert query_job.state == 'DONE'
# [END bigquery_query_params_arrays]
out, _ = capsys.readouterr()
assert 'James' in out
def test_client_query_w_struct_params(client, capsys):
"""Run a query using struct query parameters"""
# [START bigquery_query_params_structs]
# from google.cloud import bigquery
# client = bigquery.Client()
query = 'SELECT @struct_value AS s;'
query_params = [
bigquery.StructQueryParameter(
'struct_value',
bigquery.ScalarQueryParameter('x', 'INT64', 1),
bigquery.ScalarQueryParameter('y', 'STRING', 'foo')
)
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request - starts the query
# Print the results
for row in query_job:
print(row.s)
assert query_job.state == 'DONE'
# [END bigquery_query_params_structs]
out, _ = capsys.readouterr()
assert '1' in out
assert 'foo' in out
def test_client_query_dry_run(client):
"""Run a dry run query"""
# [START bigquery_query_dry_run]
# from google.cloud import bigquery
# client = bigquery.Client()
job_config = bigquery.QueryJobConfig()
job_config.dry_run = True
job_config.use_query_cache = False
query_job = client.query(
('SELECT name, COUNT(*) as name_count '
'FROM `bigquery-public-data.usa_names.usa_1910_2013` '
"WHERE state = 'WA' "
'GROUP BY name'),
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request
# A dry run query completes immediately.
assert query_job.state == 'DONE'
assert query_job.dry_run
print("This query will process {} bytes.".format(
query_job.total_bytes_processed))
# [END bigquery_query_dry_run]
assert query_job.total_bytes_processed > 0
def test_query_no_cache(client):
# [START bigquery_query_no_cache]
# from google.cloud import bigquery
# client = bigquery.Client()
job_config = bigquery.QueryJobConfig()
job_config.use_query_cache = False
sql = """
SELECT corpus
FROM `bigquery-public-data.samples.shakespeare`
GROUP BY corpus;
"""
query_job = client.query(
sql,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request
# Print the results.
for row in query_job: # API request - fetches results
print(row)
# [END bigquery_query_no_cache]
def test_query_external_gcs_temporary_table(client):
# [START bigquery_query_external_gcs_temp]
# from google.cloud import bigquery
# client = bigquery.Client()
# Configure the external data source and query job
external_config = bigquery.ExternalConfig('CSV')
external_config.source_uris = [
'gs://cloud-samples-data/bigquery/us-states/us-states.csv',
]
external_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
external_config.options.skip_leading_rows = 1 # optionally skip header row
table_id = 'us_states'
job_config = bigquery.QueryJobConfig()
job_config.table_definitions = {table_id: external_config}
# Example query to find states starting with 'W'
sql = 'SELECT * FROM `{}` WHERE name LIKE "W%"'.format(table_id)
query_job = client.query(sql, job_config=job_config) # API request
w_states = list(query_job) # Waits for query to finish
print('There are {} states with names starting with W.'.format(
len(w_states)))
# [END bigquery_query_external_gcs_temp]
assert len(w_states) == 4
def test_query_external_gcs_permanent_table(client, to_delete):
dataset_id = 'query_external_gcs_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_query_external_gcs_perm]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# Configure the external data source
dataset_ref = client.dataset(dataset_id)
table_id = 'us_states'
schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
table = bigquery.Table(dataset_ref.table(table_id), schema=schema)
external_config = bigquery.ExternalConfig('CSV')
external_config.source_uris = [
'gs://cloud-samples-data/bigquery/us-states/us-states.csv',
]
external_config.options.skip_leading_rows = 1 # optionally skip header row
table.external_data_configuration = external_config
# Create a permanent table linked to the GCS file
table = client.create_table(table) # API request
# Example query to find states starting with 'W'
sql = 'SELECT * FROM `{}.{}` WHERE name LIKE "W%"'.format(
dataset_id, table_id)
query_job = client.query(sql) # API request
w_states = list(query_job) # Waits for query to finish
print('There are {} states with names starting with W.'.format(
len(w_states)))
# [END bigquery_query_external_gcs_perm]
assert len(w_states) == 4
def test_query_external_sheets_temporary_table(client):
# [START bigquery_query_external_sheets_temp]
# [START bigquery_auth_drive_scope]
import google.auth
# from google.cloud import bigquery
# Create credentials with Drive & BigQuery API scopes
# Both APIs must be enabled for your project before running this code
credentials, project = google.auth.default(scopes=[
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/bigquery',
])
client = bigquery.Client(credentials=credentials, project=project)
# [END bigquery_auth_drive_scope]
# Configure the external data source and query job
external_config = bigquery.ExternalConfig('GOOGLE_SHEETS')
# Use a shareable link or grant viewing access to the email address you
# used to authenticate with BigQuery (this example Sheet is public)
sheet_url = (
'https://docs.google.com/spreadsheets'
'/d/1i_QCL-7HcSyUZmIbP9E6lO_T5u3HnpLe7dnpHaijg_E/edit?usp=sharing')
external_config.source_uris = [sheet_url]
external_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
external_config.options.skip_leading_rows = 1 # optionally skip header row
table_id = 'us_states'
job_config = bigquery.QueryJobConfig()
job_config.table_definitions = {table_id: external_config}
# Example query to find states starting with 'W'
sql = 'SELECT * FROM `{}` WHERE name LIKE "W%"'.format(table_id)
query_job = client.query(sql, job_config=job_config) # API request
w_states = list(query_job) # Waits for query to finish
print('There are {} states with names starting with W.'.format(
len(w_states)))
# [END bigquery_query_external_sheets_temp]
assert len(w_states) == 4
def test_query_external_sheets_permanent_table(client, to_delete):
dataset_id = 'query_external_sheets_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_query_external_sheets_perm]
import google.auth
# from google.cloud import bigquery
# dataset_id = 'my_dataset'
# Create credentials with Drive & BigQuery API scopes
# Both APIs must be enabled for your project before running this code
credentials, project = google.auth.default(scopes=[
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/bigquery',
])
client = bigquery.Client(credentials=credentials, project=project)
# Configure the external data source
dataset_ref = client.dataset(dataset_id)
table_id = 'us_states'
schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
table = bigquery.Table(dataset_ref.table(table_id), schema=schema)
external_config = bigquery.ExternalConfig('GOOGLE_SHEETS')
# Use a shareable link or grant viewing access to the email address you
# used to authenticate with BigQuery (this example Sheet is public)
sheet_url = (
'https://docs.google.com/spreadsheets'
'/d/1i_QCL-7HcSyUZmIbP9E6lO_T5u3HnpLe7dnpHaijg_E/edit?usp=sharing')
external_config.source_uris = [sheet_url]
external_config.options.skip_leading_rows = 1 # optionally skip header row
table.external_data_configuration = external_config
# Create a permanent table linked to the Sheets file
table = client.create_table(table) # API request
# Example query to find states starting with 'W'
sql = 'SELECT * FROM `{}.{}` WHERE name LIKE "W%"'.format(
dataset_id, table_id)
query_job = client.query(sql) # API request
w_states = list(query_job) # Waits for query to finish
print('There are {} states with names starting with W.'.format(
len(w_states)))
# [END bigquery_query_external_sheets_perm]
assert len(w_states) == 4
def test_ddl_create_view(client, to_delete, capsys):
"""Create a view via a DDL query."""
project = client.project
dataset_id = 'ddl_view_{}'.format(_millis())
table_id = 'new_view'
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_ddl_create_view]
# from google.cloud import bigquery
# project = 'my-project'
# dataset_id = 'my_dataset'
# table_id = 'new_view'
# client = bigquery.Client(project=project)
sql = """
CREATE VIEW `{}.{}.{}`
OPTIONS(
expiration_timestamp=TIMESTAMP_ADD(
CURRENT_TIMESTAMP(), INTERVAL 48 HOUR),
friendly_name="new_view",
description="a view that expires in 2 days",
labels=[("org_unit", "development")]
)
AS SELECT name, state, year, number
FROM `bigquery-public-data.usa_names.usa_1910_current`
WHERE state LIKE 'W%'
""".format(project, dataset_id, table_id)
job = client.query(sql) # API request.
job.result() # Waits for the query to finish.
print('Created new view "{}.{}.{}".'.format(
job.destination.project,
job.destination.dataset_id,
job.destination.table_id))
# [END bigquery_ddl_create_view]
out, _ = capsys.readouterr()
assert 'Created new view "{}.{}.{}".'.format(
project, dataset_id, table_id) in out
# Test that listing query result rows succeeds so that generic query
# processing tools work with DDL statements.
rows = list(job)
assert len(rows) == 0
if pandas is not None:
df = job.to_dataframe()
assert len(df) == 0
def test_client_list_jobs(client):
"""List jobs for a project."""
# [START bigquery_list_jobs]
# TODO(developer): Uncomment the lines below and replace with your values.
# from google.cloud import bigquery
# project = 'my_project' # replace with your project ID
# client = bigquery.Client(project=project)
import datetime
# List the 10 most recent jobs in reverse chronological order.
# Omit the max_results parameter to list jobs from the past 6 months.
print("Last 10 jobs:")
for job in client.list_jobs(max_results=10): # API request(s)
print(job.job_id)
# The following are examples of additional optional parameters:
# Use min_creation_time and/or max_creation_time to specify a time window.
print("Jobs from the last ten minutes:")
ten_mins_ago = datetime.datetime.utcnow() - datetime.timedelta(minutes=10)
for job in client.list_jobs(min_creation_time=ten_mins_ago):
print(job.job_id)
# Use all_users to include jobs run by all users in the project.
print("Last 10 jobs run by all users:")
for job in client.list_jobs(max_results=10, all_users=True):
print("{} run by user: {}".format(job.job_id, job.user_email))
# Use state_filter to filter by job state.
print("Jobs currently running:")
for job in client.list_jobs(state_filter='RUNNING'):
print(job.job_id)
# [END bigquery_list_jobs]
@pytest.mark.skipif(pandas is None, reason='Requires `pandas`')
def test_query_results_as_dataframe(client):
# [START bigquery_query_results_dataframe]
# from google.cloud import bigquery
# client = bigquery.Client()
sql = """
SELECT name, SUM(number) as count
FROM `bigquery-public-data.usa_names.usa_1910_current`
GROUP BY name
ORDER BY count DESC
LIMIT 10
"""
df = client.query(sql).to_dataframe()
# [END bigquery_query_results_dataframe]
assert isinstance(df, pandas.DataFrame)
assert len(list(df)) == 2 # verify the number of columns
assert len(df) == 10 # verify the number of rows
@pytest.mark.skipif(pandas is None, reason='Requires `pandas`')
def test_list_rows_as_dataframe(client):
# [START bigquery_list_rows_dataframe]
# from google.cloud import bigquery
# client = bigquery.Client()
dataset_ref = client.dataset('samples', project='bigquery-public-data')
table_ref = dataset_ref.table('shakespeare')
table = client.get_table(table_ref)
df = client.list_rows(table).to_dataframe()
# [END bigquery_list_rows_dataframe]
assert isinstance(df, pandas.DataFrame)
assert len(list(df)) == len(table.schema) # verify the number of columns
assert len(df) == table.num_rows # verify the number of rows
@pytest.mark.skipif(pandas is None, reason='Requires `pandas`')
@pytest.mark.skipif(pyarrow is None, reason='Requires `pyarrow`')
def test_load_table_from_dataframe(client, to_delete):
dataset_id = 'load_table_from_dataframe_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_dataframe]
# from google.cloud import bigquery
# import pandas
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table('monty_python')
records = [
{'title': 'The Meaning of Life', 'release_year': 1983},
{'title': 'Monty Python and the Holy Grail', 'release_year': 1975},
{'title': 'Life of Brian', 'release_year': 1979},
{
'title': 'And Now for Something Completely Different',
'release_year': 1971
},
]
# Optionally set explicit indices.
# If indices are not specified, a column will be created for the default
# indices created by pandas.
index = ['Q24980', 'Q25043', 'Q24953', 'Q16403']
dataframe = pandas.DataFrame(
records, index=pandas.Index(index, name='wikidata_id'))
job = client.load_table_from_dataframe(dataframe, table_ref, location='US')
job.result() # Waits for table load to complete.
assert job.state == 'DONE'
table = client.get_table(table_ref)
assert table.num_rows == 4
# [END bigquery_load_table_dataframe]
column_names = [field.name for field in table.schema]
assert sorted(column_names) == ['release_year', 'title', 'wikidata_id']
if __name__ == '__main__':
pytest.main()
| apache-2.0 |
stefanodoni/mtperf | main.py | 2 | 18296 | #!/usr/bin/python3
import os
import argparse
import csv
import sqlite3
import sqlalchemy as sqlal
import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from database import DBConstants
from datasets.BenchmarkDataset import BenchmarkDataset
from graph_plotters.HTModelPlotter import HTModelPlotter
from parsers.SarParser import SarParser
from parsers.PCMParser import PCMParser
from parsers.BenchmarkParser import BenchmarkParser
from parsers.PerfParser import PerfParser
from parsers.SysConfigParser import SysConfigParser
from statistics.HTLinearModel import HTLinearModel
from config.SUTConfig import SUTConfig
import config.BenchmarkAnalysisConfig as bac
parser = argparse.ArgumentParser(description='HTperf tool: parse, aggregate, select and plot data.')
parser.add_argument('benchmarkdirpath', metavar='benchmarkdirpath', help='path to directory containing n benchmark report directories, each one containing the csv report files')
parser.add_argument('reportdirpath', metavar='reportdirpath', help='path to directory in which the tool generates the reports')
parser.add_argument('-pcm', help='indicates if a pcm.csv file must be parsed', dest='pcm', action='store_true')
parser.add_argument('-sysconfig', help='indicates if a sysConfig.csv file must be parsed', dest='sysconfig', action='store_true')
parser.add_argument('--chart-no-legend', help='do not include legend in charts', action='store_true', default=False)
parser.add_argument('--chart-no-model', help='do not include regression model in charts', action='store_true')
parser.add_argument('--chart-xmax', help='max value of the throughput axis in charts', type=int, default=None)
parser.add_argument('--chart-umax', help='max value of the utilization axis in charts', type=int, default=None)
parser.add_argument('--chart-line-p-max', help='max value of the extrapolation line for productivity in charts', type=int, default=None)
parser.add_argument('--chart-line-u-max', help='max value of the extrapolation line for utilization in charts', type=int, default=None)
args = parser.parse_args()
# Settings
using_pcm = args.pcm
using_sysconfig = args.sysconfig
# Get the chosen output dir and create it if necessary
OUTPUT_DIR = os.path.join(args.reportdirpath, '')
os.makedirs(os.path.dirname(OUTPUT_DIR), exist_ok=True)
# Set path and file names
path_to_tests = args.benchmarkdirpath
test_names = [name for name in os.listdir(path_to_tests) if not os.path.isfile(path_to_tests + "/" + name)]
test_names.sort()
test_numbers = [i + 1 for i in range(len(test_names))]
# benchmark_detailed_file = "/benchmark-detailed.csv"
benchmark_file = "/benchmark.csv"
sar_file = "/sar.csv"
pcm_file = "/pcm.csv"
perf_file = "/perf.csv"
sysconfig_file = "/sysConfig.csv"
# Create output directory
for test in test_names:
os.makedirs(os.path.dirname(OUTPUT_DIR + test + '/'), exist_ok=True)
# Create DB file and empty it
open(DBConstants.DB_NAME, 'w').close()
# Data structures
system_config = {}
benchmark_dataframes = {}
benchmark_SUTconfigs = {}
sar_dataframes = {}
pcm_dataframes = {}
perf_dataframes = {}
benchmark_datasets = {}
ht_linear_models = {}
# ======================= DATA IMPORT =============================
if not using_sysconfig:
my_sut_config = SUTConfig()
my_sut_config.set_manual()
for test in test_names:
# benchmark_detailed_dataframe = BenchmarkParser().parse(benchmark_detailed_file, "detailed") # Only if using the detailed version of benchmark report file
benchmark_dataframes[test] = BenchmarkParser().parse(path_to_tests + '/' + test + benchmark_file)
sar_dataframes[test] = SarParser().parse(path_to_tests + '/' + test + sar_file)
perf_dataframes[test] = PerfParser().parse(path_to_tests + '/' + test + perf_file)
if using_sysconfig:
print("Setting SysConfig file of test: " + test)
system_config = SysConfigParser().parse(path_to_tests + '/' + test + sysconfig_file)
benchmark_SUTconfigs[test] = SUTConfig()
benchmark_SUTconfigs[test].set(system_config)
if using_pcm:
pcm_dataframes[test] = PCMParser().parse(path_to_tests + '/' + test + pcm_file)
# ======================= PERSIST DATA IN SQLITE ====================
conn = sqlite3.connect(DBConstants.DB_NAME)
c = conn.cursor()
for test in test_names:
#benchmark_detailed_dataframe.to_sql(DBConstants.BENCHMARK_DETAILED_TABLE, conn)
benchmark_dataframes[test].to_sql(DBConstants.BENCHMARK_TABLE, conn, if_exists='append')
sar_dataframes[test].to_sql(DBConstants.SAR_TABLE, conn, if_exists='append')
perf_dataframes[test].to_sql(DBConstants.PERF_TABLE, conn, if_exists='append')
if using_pcm:
pcm_dataframes[test].to_sql(DBConstants.PCM_TABLE, conn, if_exists='append')
conn.commit()
# c.execute("DROP TABLE IF EXISTS " + DBConstants.BENCHMARK_DETAILED_TABLE)
# Query to show table fields: PRAGMA table_info(tablename)
# for row in c.execute("PRAGMA table_info(perf)"):
# print(row)
# for row in c.execute("SELECT * FROM " + DBConstants.PERF_TABLE):
# print(row)
# c.execute("SELECT * FROM prova")
# print(c.fetchone())
#print(pd.read_sql_query("SELECT * FROM " + DBConstants.BENCHMARK_TABLE, conn))
#print(pd.read_sql_query("SELECT * FROM benchmark WHERE \"Timestamp Start\" < \"2015-10-11 08:14:18\"", conn))
# c.execute("DROP TABLE IF EXISTS prova")
# c.execute("CREATE TABLE prova (c1, c2, asd TEXT)")
# c.execute("INSERT INTO prova VALUES (5,3,4)")
for test in test_names:
benchmark_datasets[test] = BenchmarkDataset().create(benchmark_dataframes[test], conn, OUTPUT_DIR, test, using_pcm)
conn.close()
# Alternative to sqlite3: SQLAlchemy in order to use pd.read_sql_table
#engine = sqlal.create_engine('sqlite:///htperf.db')
#print(pd.read_sql_table('benchmark', engine))
#print(pd.read_sql_query("SELECT * FROM benchmark WHERE \"Timestamp Start\" <= \"2015-10-11 08:14:18\"", engine))
# ======================= STATISTICS =====================================
for test in test_names:
if using_sysconfig:
ht_linear_models[test] = HTLinearModel().estimate(benchmark_datasets[test], OUTPUT_DIR, test, benchmark_SUTconfigs[test])
else:
ht_linear_models[test] = HTLinearModel().estimate(benchmark_datasets[test], OUTPUT_DIR, test, my_sut_config)
### Full Dump of benchmark, perf and models data to CSV
for test in test_names:
benchmark_datasets[test]['perf-stats']['mean'].to_csv("mtperf-perf-dump-" + test + ".csv", sep=";")
benchmark_datasets[test]['runs']['XavgTot'].to_csv("mtperf-bench-dump-" + test + ".csv", sep=";")
ht_linear_models[test].Sys_mean_real_IPC.to_csv("mtperf-models-realIPC-dump-" + test + ".csv", sep=";")
# ======================= PLOT GRAPHS =====================================
# colors = ['#E12727', '#504FAF', '#088DA5', '#FE9900', '#12AD2A'] #281D46
colors = ['#0041CC', '#FF0000', '#E6C700', '#FF00BF', '#00CC22']
colors_second_ax = ['#f0f465', '#9cec5b', '#50c5b7', '#6184d8', '#533a71']
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
# First plot scatter and standard points in order to determinate the maximum X value
for test, color in zip(test_names, colors):
color = (colors[0] if len(test_names) == 1 else color)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_utilization, 0, 0, color, (None if len(test_names) > 1 else "CPU Utilization \\%"), False, False, 0, 100)
if not args.chart_no_model:
# Then use the x_max value to print the lr lines
for test, color in zip(test_names, colors):
color = (colors[1] if len(test_names) == 1 else color)
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_utilization, 0, 0, color, (test if len(test_names) > 1 else "Utilization Law"), False, False, 0, 100)
plotter.gen_graph("U-vs-X", "",
#"Stima dell'Utilizzo (sui primi " + str(bac.NUM_SAMPLES) + " campioni)" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
{0: 'Throughput (req/sec)'}, {0: 'Utilization \\%'}, X_axis_max=args.chart_xmax, include_legend=not args.chart_no_legend)
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
# First plot scatter and standard points in order to determinate the maximum X value
for test, color in zip(test_names, colors):
color = (colors[0] if len(test_names) == 1 else color)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_productivity, 1, 0, color, (None if len(test_names) > 1 else "Productivity"), False, True)#, 0, 100)
# Then use the x_max value to print the lr lines
for test, color in zip(test_names, colors):
color = (colors[1] if len(test_names) == 1 else color)
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_productivity, 1, 0, color, (test if len(test_names) > 1 else "Linear Regression"), False, True)#, 0, 100)
plotter.gen_graph("P-vs-X", "",
#""Stima della Productivity (sui primi " + str(bac.NUM_SAMPLES) + " campioni)" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
{0: 'Throughput (req/sec)'}, {0: 'Productivity \\%'}, None, None, True)
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
# First plot scatter and standard points in order to determinate the maximum X value
for test, color in zip(test_names, colors):
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_utilization, 0, 0,
(colors[0] if len(test_names) == 1 else color), (None if len(test_names) > 1 else "Utilization"),
False, False)#, 0, 100)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_productivity, 0, 0,
(colors[1] if len(test_names) == 1 else color), (None if len(test_names) > 1 else "Productivity"),
False, True)#, 0, 100)
if not args.chart_no_model:
# Then use the x_max value to print the lr lines
for test, color in zip(test_names, colors):
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_utilization, 0, 0,
(colors[0] if len(test_names) == 1 else color),
(test if len(test_names) > 1 else "Utilization Law"), False, False, x_line_max=args.chart_line_u_max)#, 0, 100)#, False)
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_productivity, 0, 0,
(colors[1] if len(test_names) == 1 else color),
(test if len(test_names) > 1 else "Extrapolated Prod."), False, True, x_line_max=args.chart_line_p_max)
plotter.gen_graph("U,P-vs-X", "",
#"Stima dell'Utilizzo (sui primi " + str(bac.NUM_SAMPLES) + " campioni)" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
{0: 'Throughput (req/sec)'}, {0: 'Utilization \\%, Productivity \\%'}, X_axis_max=args.chart_xmax, legend_inside_graph=True, include_legend=not args.chart_no_legend)
## plotter = HTModelPlotter().init(OUTPUT_DIR, 2)
# # First plot scatter and standard points in order to determinate the maximum X value
# for test, color in zip(test_names, colors):
# plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], benchmark_datasets[test]['runs']['RavgTot'], 0, 0, color, test + '\nTot Avg Response Time (ms)')
# plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_atd, 1, 0, color, test + '\nTot Avg Thread Concurrency', False, False, 1, 2)
#
# plotter.gen_graph("R,atc-vs-X", bac.TITLE, {0: 'Throughput', 1: 'Throughput'}, {0: 'Tot Avg Response Time (ms)', 1: 'Tot Avg Thread Concurrency'})
#plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
## First plot scatter and standard points in order to determinate the maximum X value
#for test, color in zip(test_names, colors):
# color = (colors[0] if len(test_names) == 1 else color)
# plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_atd, 0, 0, color, (test if len(test_names) > 1 else "ATC"), False, False, 1, 2)
#
#plotter.gen_graph("Atc-vs-X", "",
# #"Andamento dell'Average Thread Concurrency" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
# {0: 'Throughput'}, {0: 'Average Thread Concurrency'}, None, None, True)
#
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
# First plot scatter and standard points in order to determinate the maximum X value
for test, color in zip(test_names, colors):
color = (colors[0] if len(test_names) == 1 else color)
plotter.plot_scatter(ht_linear_models[test].Sys_mean_utilization, benchmark_datasets[test]['runs']['RavgTot'], 0, 0, color, (test if len(test_names) > 1 else "Response Time (ms)"))
plotter.gen_graph("R-vs-U", "",
#"Andamento del Response Time rispetto all'Utilizzo" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
{0: 'Utilization \\%'}, {0: 'Response Time (ms)'}, X_axis_max=args.chart_umax, include_legend=not args.chart_no_legend)
#
#plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
## First plot scatter and standard points in order to determinate the maximum X value
#for test, color in zip(test_names, colors):
# color = (colors[0] if len(test_names) == 1 else color)
# plotter.plot_scatter(ht_linear_models[test].Sys_mean_productivity, benchmark_datasets[test]['runs']['RavgTot'], 0, 0, color, (test if len(test_names) > 1 else "Response Time (ms)"), True)
#
#plotter.gen_graph("R-vs-P", "",
# #"Andamento del Response Time rispetto alla Productivity" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
# {0: 'Productivity'}, {0: 'Response Time (ms)'}, None, 140, True)
#
#plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
## First plot scatter and standard points in order to determinate the maximum X value
#for test, color in zip(test_names, colors):
# if using_sysconfig:
# my_sut_config = benchmark_SUTconfigs[test]
#
# color = (colors[0] if len(test_names) == 1 else color)
# plotter.plot_scatter( ht_linear_models[test].Sys_mean_active_frequency, 0, 0,
# color, (test if len(test_names) > 1 else "AFREQ (GHz)"),
# False, False, 0, (my_sut_config.CPU_MAX_FREQUENCY_ALL_CORES_BUSY + 600000000))
#
#plotter.gen_graph("AFREQ-vs-X", "",
# #"Andamento dell'Active Frequency" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
# {0: 'Throughput'}, {0: 'Active Frequency (GHz)'}, None, None, True, "lower right")
#
benchX = pd.Series([15633,30742,45689,60752,75282,90151,105483,120570,136335,148312])
#afreq = pd.Series([1.2863893771,1.7623052723,2.1674793625,2.4566290458,2.6498259159,2.7822519266,2.8569867656,2.896732531,2.9050008713,2.8996203862])
#core_busy_time = pd.Series([ 0.112894609, 0.2221528827, 0.3224394861, 0.4312730359, 0.539689001, 0.6395914782, 0.7470188007, 0.8404833952, 0.9391003009, 1])
instr = pd.Series([188.7400993175, 368.113962475, 542.7210293267, 718.3456908025, 892.9922278983, 1061.2639747475, 1246.3635704375, 1423.1804586467, 1610.9732021967, 1754.9474657242])
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
for test, color in zip(test_names, colors):
color = (colors[0] if len(test_names) == 1 else color)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_active_frequency, 0, 0, color, "test chart", False, False, 0, None)
if not args.chart_no_model:
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_active_frequency, 0, 0, color, "AFREQ", False, False, 0, None)
plotter.gen_graph("AFREQ-vs-X", "", {0: 'Throughput'}, {0: 'Active Frequency (GHz)'}, X_axis_max=args.chart_xmax, include_legend=not args.chart_no_legend)
for test in test_names:
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_active_frequency, 0, 0, colors[0] , "test chart", False, False, 0, None)
if not args.chart_no_model:
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_active_frequency, 0, 0, colors[1], "AFREQ", False, False, 0, None)
plotter.gen_graph(test + "-AFREQ-vs-X", "", {0: 'Throughput'}, {0: 'Active Frequency (GHz)'}, X_axis_max=args.chart_xmax, include_legend=not args.chart_no_legend)
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], benchmark_datasets[test]['perf-stats']['mean']['CPU0_cpu_clk_unhalted_thread_any'] , 0, 0, colors[0] , "test chart", False, False, 0, None)
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], benchmark_datasets[test]['perf-stats']['mean']['CPU0_cpu_clk_unhalted_thread_any'], 0, 0, colors[1], "Core unhalted cycles", False, False, 0, None)
plotter.gen_graph(test + "-CUC-vs-X", "", {0: 'Throughput'}, {0: 'Core Unhalted Cycles'}, None, None, True, "lower right", False)
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_real_IPC, 0, 0, colors[0], "chart", False, False, 0, None)
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_real_IPC, 0, 0, colors[1], "Instructions per cycle", False, False, 0, None)
plotter.gen_graph(test + "-IPC-vs-X", "", {0: 'Throughput'}, {0: 'Instructions per cycle'}, None, None, True, "lower right", False)
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
plotter.plot_scatter(benchX, instr, 0, 0, colors[0] , "test chart", False, False, 0, None)
if not args.chart_no_model:
plotter.plot_lin_regr(benchX, instr, 0, 0, colors[1], "Retired instructions (Millions/sec)", False, False, 0, None)
#plotter.gen_graph("INSTR-vs-X", "", {0: 'Throughput'}, {0: 'Retired instructions (Millions/sec)'}, None, None, True, "lower right", False)
plotter.gen_graph("INSTR-vs-X", "", {0: 'Throughput'}, {0: 'Retired instructions (Millions/sec)'}, X_axis_max=args.chart_xmax, include_legend=not args.chart_no_legend)
| gpl-2.0 |
glorizen/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/text.py | 69 | 55366 | """
Classes for including text in a figure.
"""
from __future__ import division
import math
import numpy as np
from matplotlib import cbook
from matplotlib import rcParams
import artist
from artist import Artist
from cbook import is_string_like, maxdict
from font_manager import FontProperties
from patches import bbox_artist, YAArrow, FancyBboxPatch, \
FancyArrowPatch, Rectangle
import transforms as mtransforms
from transforms import Affine2D, Bbox
from lines import Line2D
import matplotlib.nxutils as nxutils
def _process_text_args(override, fontdict=None, **kwargs):
"Return an override dict. See :func:`~pyplot.text' docstring for info"
if fontdict is not None:
override.update(fontdict)
override.update(kwargs)
return override
# Extracted from Text's method to serve as a function
def get_rotation(rotation):
"""
Return the text angle as float.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
if rotation in ('horizontal', None):
angle = 0.
elif rotation == 'vertical':
angle = 90.
else:
angle = float(rotation)
return angle%360
# these are not available for the object inspector until after the
# class is build so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Text'] = """
========================== =========================================================================
Property Value
========================== =========================================================================
alpha float
animated [True | False]
backgroundcolor any matplotlib color
bbox rectangle prop dict plus key 'pad' which is a pad in points
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
color any matplotlib color
family [ 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
figure a matplotlib.figure.Figure instance
fontproperties a matplotlib.font_manager.FontProperties instance
horizontalalignment or ha [ 'center' | 'right' | 'left' ]
label any string
linespacing float
lod [True | False]
multialignment ['left' | 'right' | 'center' ]
name or fontname string eg, ['Sans' | 'Courier' | 'Helvetica' ...]
position (x,y)
rotation [ angle in degrees 'vertical' | 'horizontal'
size or fontsize [ size in points | relative size eg 'smaller', 'x-large' ]
style or fontstyle [ 'normal' | 'italic' | 'oblique']
text string
transform a matplotlib.transform transformation instance
variant [ 'normal' | 'small-caps' ]
verticalalignment or va [ 'center' | 'top' | 'bottom' | 'baseline' ]
visible [True | False]
weight or fontweight [ 'normal' | 'bold' | 'heavy' | 'light' | 'ultrabold' | 'ultralight']
x float
y float
zorder any number
========================== =========================================================================
"""
# TODO : This function may move into the Text class as a method. As a
# matter of fact, The information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text. Unlike
:meth:`matplotlib.text.Text.get_extents` method, The bbox size of
the text before the rotation is calculated.
"""
projected_xs = []
projected_ys = []
theta = text.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(-theta)
for t, wh, x, y in text._get_layout(renderer)[1]:
w, h = wh
xt1, yt1 = tr.transform_point((x, y))
xt2, yt2 = xt1+w, yt1+h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
tr = mtransforms.Affine2D().rotate(theta)
x_box, y_box = tr.transform_point((xt_box, yt_box))
return x_box, y_box, w_box, h_box
class Text(Artist):
"""
Handle storing and drawing of text in window or data coordinates.
"""
zorder = 3
def __str__(self):
return "Text(%g,%g,%s)"%(self._y,self._y,repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='bottom',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
**kwargs
):
"""
Create a :class:`~matplotlib.text.Text` instance at *x*, *y*
with string *text*.
Valid kwargs are
%(Text)s
"""
Artist.__init__(self)
self.cached = maxdict(5)
self._x, self._y = x, y
if color is None: color = rcParams['text.color']
if fontproperties is None: fontproperties=FontProperties()
elif is_string_like(fontproperties): fontproperties=FontProperties(fontproperties)
self.set_text(text)
self.set_color(color)
self._verticalalignment = verticalalignment
self._horizontalalignment = horizontalalignment
self._multialignment = multialignment
self._rotation = rotation
self._fontproperties = fontproperties
self._bbox = None
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self._linespacing = linespacing
self.update(kwargs)
#self.set_bbox(dict(pad=0))
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the patch.
In the case of text, a hit is true anywhere in the
axis-aligned bounding-box containing the text.
Returns True or False.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible() or self._renderer is None:
return False,{}
l,b,w,h = self.get_window_extent().bounds
r = l+w
t = b+h
xyverts = (l,b), (l, t), (r, t), (r, b)
x, y = mouseevent.x, mouseevent.y
inside = nxutils.pnpoly(x, y, xyverts)
return inside,{}
def _get_xy_display(self):
'get the (possibly unit converted) transformed x, y in display coords'
x, y = self.get_position()
return self.get_transform().transform_point((x,y))
def _get_multialignment(self):
if self._multialignment is not None: return self._multialignment
else: return self._horizontalalignment
def get_rotation(self):
'return the text angle as float in degrees'
return get_rotation(self._rotation) # string_or_number -> number
def update_from(self, other):
'Copy properties from other to self'
Artist.update_from(self, other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._rotation = other._rotation
self._picker = other._picker
self._linespacing = other._linespacing
def _get_layout(self, renderer):
key = self.get_prop_tup()
if key in self.cached: return self.cached[key]
horizLayout = []
thisx, thisy = 0.0, 0.0
xmin, ymin = 0.0, 0.0
width, height = 0.0, 0.0
lines = self._text.split('\n')
whs = np.zeros((len(lines), 2))
horizLayout = np.zeros((len(lines), 4))
# Find full vertical extent of font,
# including ascenders and descenders:
tmp, heightt, bl = renderer.get_text_width_height_descent(
'lp', self._fontproperties, ismath=False)
offsety = heightt * self._linespacing
baseline = None
for i, line in enumerate(lines):
clean_line, ismath = self.is_math_text(line)
w, h, d = renderer.get_text_width_height_descent(
clean_line, self._fontproperties, ismath=ismath)
if baseline is None:
baseline = h - d
whs[i] = w, h
horizLayout[i] = thisx, thisy, w, h
thisy -= offsety
width = max(width, w)
ymin = horizLayout[-1][1]
ymax = horizLayout[0][1] + horizLayout[0][3]
height = ymax-ymin
xmax = xmin + width
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
offsetLayout = np.zeros((len(lines), 2))
offsetLayout[:] = horizLayout[:, 0:2]
# now offset the individual text lines within the box
if len(lines)>1: # do the multiline aligment
malign = self._get_multialignment()
if malign == 'center':
offsetLayout[:, 0] += width/2.0 - horizLayout[:, 2] / 2.0
elif malign == 'right':
offsetLayout[:, 0] += width - horizLayout[:, 2]
# the corners of the unrotated bounding box
cornersHoriz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)],
np.float_)
# now rotate the bbox
cornersRotated = M.transform(cornersHoriz)
txs = cornersRotated[:, 0]
tys = cornersRotated[:, 1]
# compute the bounds of the rotated box
xmin, xmax = txs.min(), txs.max()
ymin, ymax = tys.min(), tys.max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the targe position offset the display bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign=='center': offsetx = (xmin + width/2.0)
elif halign=='right': offsetx = (xmin + width)
else: offsetx = xmin
if valign=='center': offsety = (ymin + height/2.0)
elif valign=='top': offsety = (ymin + height)
elif valign=='baseline': offsety = (ymin + height) - baseline
else: offsety = ymin
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first x,y position
xys = M.transform(offsetLayout)
xys -= (offsetx, offsety)
xs, ys = xys[:, 0], xys[:, 1]
ret = bbox, zip(lines, whs, xs, ys)
self.cached[key] = ret
return ret
def set_bbox(self, rectprops):
"""
Draw a bounding box around self. rectprops are any settable
properties for a rectangle, eg facecolor='red', alpha=0.5.
t.set_bbox(dict(facecolor='red', alpha=0.5))
If rectprops has "boxstyle" key. A FancyBboxPatch
is initialized with rectprops and will be drawn. The mutation
scale of the FancyBboxPath is set to the fontsize.
ACCEPTS: rectangle prop dict
"""
# The self._bbox_patch object is created only if rectprops has
# boxstyle key. Otherwise, self._bbox will be set to the
# rectprops and the bbox will be drawn using bbox_artist
# function. This is to keep the backward compatibility.
if rectprops is not None and "boxstyle" in rectprops:
props = rectprops.copy()
boxstyle = props.pop("boxstyle")
bbox_transmuter = props.pop("bbox_transmuter", None)
self._bbox_patch = FancyBboxPatch((0., 0.),
1., 1.,
boxstyle=boxstyle,
bbox_transmuter=bbox_transmuter,
transform=mtransforms.IdentityTransform(),
**props)
self._bbox = None
else:
self._bbox_patch = None
self._bbox = rectprops
def get_bbox_patch(self):
"""
Return the bbox Patch object. Returns None if the the
FancyBboxPatch is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox. This method
should be used when the position and size of the bbox needs to
be updated before actually drawing the bbox.
"""
# For arrow_patch, use textbox as patchA by default.
if not isinstance(self.arrow_patch, FancyArrowPatch):
return
if self._bbox_patch:
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
#self._bbox_patch.draw(renderer)
else:
props = self._bbox
if props is None: props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = self.get_window_extent(renderer)
l,b,w,h = bbox.bounds
l-=pad/2.
b-=pad/2.
w+=pad
h+=pad
r = Rectangle(xy=(l,b),
width=w,
height=h,
)
r.set_transform(mtransforms.IdentityTransform())
r.set_clip_on( False )
r.update(props)
self.arrow_patch.set_patchA(r)
def _draw_bbox(self, renderer, posx, posy):
""" Update the location and the size of the bbox
(FancyBoxPatch), and draw
"""
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
self._bbox_patch.draw(renderer)
def draw(self, renderer):
"""
Draws the :class:`Text` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible(): return
if self._text=='': return
bbox, info = self._get_layout(renderer)
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
canvasw, canvash = renderer.get_canvas_width_height()
# draw the FancyBboxPatch
if self._bbox_patch:
self._draw_bbox(renderer, posx, posy)
gc = renderer.new_gc()
gc.set_foreground(self._color)
gc.set_alpha(self._alpha)
gc.set_url(self._url)
if self.get_clip_on():
gc.set_clip_rectangle(self.clipbox)
if self._bbox:
bbox_artist(self, renderer, self._bbox)
angle = self.get_rotation()
if rcParams['text.usetex']:
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_tex(gc, x, y, clean_line,
self._fontproperties, angle)
return
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_text(gc, x, y, clean_line,
self._fontproperties, angle,
ismath=ismath)
def get_color(self):
"Return the color of the text"
return self._color
def get_fontproperties(self):
"Return the :class:`~font_manager.FontProperties` object"
return self._fontproperties
def get_font_properties(self):
'alias for get_fontproperties'
return self.get_fontproperties
def get_family(self):
"Return the list of font families used for font lookup"
return self._fontproperties.get_family()
def get_fontfamily(self):
'alias for get_family'
return self.get_family()
def get_name(self):
"Return the font name as string"
return self._fontproperties.get_name()
def get_style(self):
"Return the font style as string"
return self._fontproperties.get_style()
def get_size(self):
"Return the font size as integer"
return self._fontproperties.get_size_in_points()
def get_variant(self):
"Return the font variant as a string"
return self._fontproperties.get_variant()
def get_fontvariant(self):
'alias for get_variant'
return self.get_variant()
def get_weight(self):
"Get the font weight as string or number"
return self._fontproperties.get_weight()
def get_fontname(self):
'alias for get_name'
return self.get_name()
def get_fontstyle(self):
'alias for get_style'
return self.get_style()
def get_fontsize(self):
'alias for get_size'
return self.get_size()
def get_fontweight(self):
'alias for get_weight'
return self.get_weight()
def get_stretch(self):
'Get the font stretch as a string or number'
return self._fontproperties.get_stretch()
def get_fontstretch(self):
'alias for get_stretch'
return self.get_stretch()
def get_ha(self):
'alias for get_horizontalalignment'
return self.get_horizontalalignment()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
x, y = self.get_position()
return (x, y, self._text, self._color,
self._verticalalignment, self._horizontalalignment,
hash(self._fontproperties), self._rotation,
self.figure.dpi, id(self._renderer),
)
def get_text(self):
"Get the text as string"
return self._text
def get_va(self):
'alias for :meth:`getverticalalignment`'
return self.get_verticalalignment()
def get_verticalalignment(self):
"""
Return the vertical alignment as string. Will be one of
'top', 'center', 'bottom' or 'baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
*dpi* defaults to self.figure.dpi; the renderer dpi is
irrelevant. For the web application, if figure.dpi is not
the value used when saving the figure, then the value that
was used must be specified as the *dpi* argument.
'''
#return _unit_box
if not self.get_visible(): return Bbox.unit()
if dpi is not None:
dpi_orig = self.figure.dpi
self.figure.dpi = dpi
if self._text == '':
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx,ty,0,0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
bbox, info = self._get_layout(self._renderer)
x, y = self.get_position()
x, y = self.get_transform().transform_point((x, y))
bbox = bbox.translated(x, y)
if dpi is not None:
self.figure.dpi = dpi_orig
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
.. seealso::
:meth:`set_bbox`
ACCEPTS: any matplotlib color
"""
if self._bbox is None:
self._bbox = dict(facecolor=color, edgecolor=color)
else:
self._bbox.update(dict(facecolor=color))
def set_color(self, color):
"""
Set the foreground color of the text
ACCEPTS: any matplotlib color
"""
# Make sure it is hashable, or get_prop_tup will fail.
try:
hash(color)
except TypeError:
color = tuple(color)
self._color = color
def set_ha(self, align):
'alias for set_horizontalalignment'
self.set_horizontalalignment(align)
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment to one of
ACCEPTS: [ 'center' | 'right' | 'left' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' % str(legal))
self._horizontalalignment = align
def set_ma(self, align):
'alias for set_verticalalignment'
self.set_multialignment(align)
def set_multialignment(self, align):
"""
Set the alignment for multiple lines layout. The layout of the
bounding box of all the lines is determined bu the horizontalalignment
and verticalalignment properties, but the multiline text within that
box can be
ACCEPTS: ['left' | 'right' | 'center' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' % str(legal))
self._multialignment = align
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
Default is 1.2.
ACCEPTS: float (multiple of font size)
"""
self._linespacing = spacing
def set_family(self, fontname):
"""
Set the font family. May be either a single string, or a list
of strings in decreasing priority. Each string may be either
a real font name or a generic font class name. If the latter,
the specific font names will be looked up in the
:file:`matplotlibrc` file.
ACCEPTS: [ FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
"""
self._fontproperties.set_family(fontname)
def set_variant(self, variant):
"""
Set the font variant, either 'normal' or 'small-caps'.
ACCEPTS: [ 'normal' | 'small-caps' ]
"""
self._fontproperties.set_variant(variant)
def set_fontvariant(self, variant):
'alias for set_variant'
return self.set_variant(variant)
def set_name(self, fontname):
"""alias for set_family"""
return self.set_family(fontname)
def set_fontname(self, fontname):
"""alias for set_family"""
self.set_family(fontname)
def set_style(self, fontstyle):
"""
Set the font style.
ACCEPTS: [ 'normal' | 'italic' | 'oblique']
"""
self._fontproperties.set_style(fontstyle)
def set_fontstyle(self, fontstyle):
'alias for set_style'
return self.set_style(fontstyle)
def set_size(self, fontsize):
"""
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points.
ACCEPTS: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ]
"""
self._fontproperties.set_size(fontsize)
def set_fontsize(self, fontsize):
'alias for set_size'
return self.set_size(fontsize)
def set_weight(self, weight):
"""
Set the font weight.
ACCEPTS: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ]
"""
self._fontproperties.set_weight(weight)
def set_fontweight(self, weight):
'alias for set_weight'
return self.set_weight(weight)
def set_stretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
ACCEPTS: [ a numeric value in range 0-1000 | 'ultra-condensed' | 'extra-condensed' | 'condensed' | 'semi-condensed' | 'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' | 'ultra-expanded' ]
"""
self._fontproperties.set_stretch(stretch)
def set_fontstretch(self, stretch):
'alias for set_stretch'
return self.set_stretch(stretch)
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text
ACCEPTS: (x,y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the *y* position of the text
ACCEPTS: float
"""
self._y = y
def set_rotation(self, s):
"""
Set the rotation of the text
ACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]
"""
self._rotation = s
def set_va(self, align):
'alias for set_verticalalignment'
self.set_verticalalignment(align)
def set_verticalalignment(self, align):
"""
Set the vertical alignment
ACCEPTS: [ 'center' | 'top' | 'bottom' | 'baseline' ]
"""
legal = ('top', 'bottom', 'center', 'baseline')
if align not in legal:
raise ValueError('Vertical alignment must be one of %s' % str(legal))
self._verticalalignment = align
def set_text(self, s):
"""
Set the text string *s*
It may contain newlines (``\\n``) or math in LaTeX syntax.
ACCEPTS: string or anything printable with '%s' conversion.
"""
self._text = '%s' % (s,)
def is_math_text(self, s):
"""
Returns True if the given string *s* contains any mathtext.
"""
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
if rcParams['text.usetex']:
return s, 'TeX'
if even_dollars:
return s, True
else:
return s.replace(r'\$', '$'), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text. *fp* must be a
:class:`matplotlib.font_manager.FontProperties` object.
ACCEPTS: a :class:`matplotlib.font_manager.FontProperties` instance
"""
if is_string_like(fp):
fp = FontProperties(fp)
self._fontproperties = fp.copy()
def set_font_properties(self, fp):
'alias for set_fontproperties'
self.set_fontproperties(fp)
artist.kwdocd['Text'] = artist.kwdoc(Text)
Text.__init__.im_func.__doc__ = cbook.dedent(Text.__init__.__doc__) % artist.kwdocd
class TextWithDash(Text):
"""
This is basically a :class:`~matplotlib.text.Text` with a dash
(drawn with a :class:`~matplotlib.lines.Line2D`) before/after
it. It is intended to be a drop-in replacement for
:class:`~matplotlib.text.Text`, and should behave identically to
it when *dashlength* = 0.0.
The dash always comes between the point specified by
:meth:`~matplotlib.text.Text.set_position` and the text. When a
dash exists, the text alignment arguments (*horizontalalignment*,
*verticalalignment*) are ignored.
*dashlength* is the length of the dash in canvas units.
(default = 0.0).
*dashdirection* is one of 0 or 1, where 0 draws the dash after the
text and 1 before. (default = 0).
*dashrotation* specifies the rotation of the dash, and should
generally stay *None*. In this case
:meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
:meth:`~matplotlib.text.Text.get_rotation`. (I.e., the dash takes
its rotation from the text's rotation). Because the text center is
projected onto the dash, major deviations in the rotation cause
what may be considered visually unappealing results.
(default = *None*)
*dashpad* is a padding length to add (or subtract) space
between the text and the dash, in canvas units.
(default = 3)
*dashpush* "pushes" the dash and text away from the point
specified by :meth:`~matplotlib.text.Text.set_position` by the
amount in canvas units. (default = 0)
.. note::
The alignment of the two objects is based on the bounding box
of the :class:`~matplotlib.text.Text`, as obtained by
:meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
turn, appears to depend on the font metrics as given by the
rendering backend. Hence the quality of the "centering" of the
label text with respect to the dash varies depending on the
backend used.
.. note::
I'm not sure that I got the
:meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
or whether that's sufficient for providing the object bounding
box.
"""
__name__ = 'textwithdash'
def __str__(self):
return "TextWithDash(%g,%g,%s)"%(self._x,self._y,repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='center',
horizontalalignment='center',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
dashlength=0.0,
dashdirection=0,
dashrotation=None,
dashpad=3,
dashpush=0,
):
Text.__init__(self, x=x, y=y, text=text, color=color,
verticalalignment=verticalalignment,
horizontalalignment=horizontalalignment,
multialignment=multialignment,
fontproperties=fontproperties,
rotation=rotation,
linespacing=linespacing)
# The position (x,y) values for text and dashline
# are bogus as given in the instantiation; they will
# be set correctly by update_coords() in draw()
self.dashline = Line2D(xdata=(x, x),
ydata=(y, y),
color='k',
linestyle='-')
self._dashx = float(x)
self._dashy = float(y)
self._dashlength = dashlength
self._dashdirection = dashdirection
self._dashrotation = dashrotation
self._dashpad = dashpad
self._dashpush = dashpush
#self.set_bbox(dict(pad=0))
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._dashx))
y = float(self.convert_yunits(self._dashy))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
props = [p for p in Text.get_prop_tup(self)]
props.extend([self._x, self._y, self._dashlength, self._dashdirection, self._dashrotation, self._dashpad, self._dashpush])
return tuple(props)
def draw(self, renderer):
"""
Draw the :class:`TextWithDash` object to the given *renderer*.
"""
self.update_coords(renderer)
Text.draw(self, renderer)
if self.get_dashlength() > 0.0:
self.dashline.draw(renderer)
def update_coords(self, renderer):
"""
Computes the actual *x*, *y* coordinates for text based on the
input *x*, *y* and the *dashlength*. Since the rotation is
with respect to the actual canvas's coordinates we need to map
back and forth.
"""
dashx, dashy = self.get_position()
dashlength = self.get_dashlength()
# Shortcircuit this process if we don't have a dash
if dashlength == 0.0:
self._x, self._y = dashx, dashy
return
dashrotation = self.get_dashrotation()
dashdirection = self.get_dashdirection()
dashpad = self.get_dashpad()
dashpush = self.get_dashpush()
angle = get_rotation(dashrotation)
theta = np.pi*(angle/180.0+dashdirection-1)
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
transform = self.get_transform()
# Compute the dash end points
# The 'c' prefix is for canvas coordinates
cxy = transform.transform_point((dashx, dashy))
cd = np.array([cos_theta, sin_theta])
c1 = cxy+dashpush*cd
c2 = cxy+(dashpush+dashlength)*cd
inverse = transform.inverted()
(x1, y1) = inverse.transform_point(tuple(c1))
(x2, y2) = inverse.transform_point(tuple(c2))
self.dashline.set_data((x1, x2), (y1, y2))
# We now need to extend this vector out to
# the center of the text area.
# The basic problem here is that we're "rotating"
# two separate objects but want it to appear as
# if they're rotated together.
# This is made non-trivial because of the
# interaction between text rotation and alignment -
# text alignment is based on the bbox after rotation.
# We reset/force both alignments to 'center'
# so we can do something relatively reasonable.
# There's probably a better way to do this by
# embedding all this in the object's transformations,
# but I don't grok the transformation stuff
# well enough yet.
we = Text.get_window_extent(self, renderer=renderer)
w, h = we.width, we.height
# Watch for zeros
if sin_theta == 0.0:
dx = w
dy = 0.0
elif cos_theta == 0.0:
dx = 0.0
dy = h
else:
tan_theta = sin_theta/cos_theta
dx = w
dy = w*tan_theta
if dy > h or dy < -h:
dy = h
dx = h/tan_theta
cwd = np.array([dx, dy])/2
cwd *= 1+dashpad/np.sqrt(np.dot(cwd,cwd))
cw = c2+(dashdirection*2-1)*cwd
newx, newy = inverse.transform_point(tuple(cw))
self._x, self._y = newx, newy
# Now set the window extent
# I'm not at all sure this is the right way to do this.
we = Text.get_window_extent(self, renderer=renderer)
self._twd_window_extent = we.frozen()
self._twd_window_extent.update_from_data_xy(np.array([c1]), False)
# Finally, make text align center
Text.set_horizontalalignment(self, 'center')
Text.set_verticalalignment(self, 'center')
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
'''
self.update_coords(renderer)
if self.get_dashlength() == 0.0:
return Text.get_window_extent(self, renderer=renderer)
else:
return self._twd_window_extent
def get_dashlength(self):
"""
Get the length of the dash.
"""
return self._dashlength
def set_dashlength(self, dl):
"""
Set the length of the dash.
ACCEPTS: float (canvas units)
"""
self._dashlength = dl
def get_dashdirection(self):
"""
Get the direction dash. 1 is before the text and 0 is after.
"""
return self._dashdirection
def set_dashdirection(self, dd):
"""
Set the direction of the dash following the text.
1 is before the text and 0 is after. The default
is 0, which is what you'd want for the typical
case of ticks below and on the left of the figure.
ACCEPTS: int (1 is before, 0 is after)
"""
self._dashdirection = dd
def get_dashrotation(self):
"""
Get the rotation of the dash in degrees.
"""
if self._dashrotation == None:
return self.get_rotation()
else:
return self._dashrotation
def set_dashrotation(self, dr):
"""
Set the rotation of the dash, in degrees
ACCEPTS: float (degrees)
"""
self._dashrotation = dr
def get_dashpad(self):
"""
Get the extra spacing between the dash and the text, in canvas units.
"""
return self._dashpad
def set_dashpad(self, dp):
"""
Set the "pad" of the TextWithDash, which is the extra spacing
between the dash and the text, in canvas units.
ACCEPTS: float (canvas units)
"""
self._dashpad = dp
def get_dashpush(self):
"""
Get the extra spacing between the dash and the specified text
position, in canvas units.
"""
return self._dashpush
def set_dashpush(self, dp):
"""
Set the "push" of the TextWithDash, which
is the extra spacing between the beginning
of the dash and the specified position.
ACCEPTS: float (canvas units)
"""
self._dashpush = dp
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the :class:`TextWithDash`.
ACCEPTS: (x, y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashx = float(x)
def set_y(self, y):
"""
Set the *y* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashy = float(y)
def set_transform(self, t):
"""
Set the :class:`matplotlib.transforms.Transform` instance used
by this artist.
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Text.set_transform(self, t)
self.dashline.set_transform(t)
def get_figure(self):
'return the figure instance the artist belongs to'
return self.figure
def set_figure(self, fig):
"""
Set the figure instance the artist belong to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
Text.set_figure(self, fig)
self.dashline.set_figure(fig)
artist.kwdocd['TextWithDash'] = artist.kwdoc(TextWithDash)
class Annotation(Text):
"""
A :class:`~matplotlib.text.Text` class to make annotating things
in the figure, such as :class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes`,
:class:`~matplotlib.patches.Rectangle`, etc., easier.
"""
def __str__(self):
return "Annotation(%g,%g,%s)"%(self.xy[0],self.xy[1],repr(self._text))
def __init__(self, s, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
**kwargs):
"""
Annotate the *x*, *y* point *xy* with text *s* at *x*, *y*
location *xytext*. (If *xytext* = *None*, defaults to *xy*,
and if *textcoords* = *None*, defaults to *xycoords*).
*arrowprops*, if not *None*, is a dictionary of line properties
(see :class:`matplotlib.lines.Line2D`) for the arrow that connects
annotation to the point.
If the dictionary has a key *arrowstyle*, a FancyArrowPatch
instance is created with the given dictionary and is
drawn. Otherwise, a YAArow patch instance is created and
drawn. Valid keys for YAArow are
========= =============================================================
Key Description
========= =============================================================
width the width of the arrow in points
frac the fraction of the arrow length occupied by the head
headwidth the width of the base of the arrow head in points
shrink oftentimes it is convenient to have the arrowtip
and base a bit away from the text and point being
annotated. If *d* is the distance between the text and
annotated point, shrink will shorten the arrow so the tip
and base are shink percent of the distance *d* away from the
endpoints. ie, ``shrink=0.05 is 5%%``
? any key for :class:`matplotlib.patches.polygon`
========= =============================================================
Valid keys for FancyArrowPatch are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*xycoords* and *textcoords* are strings that indicate the
coordinates of *xy* and *xytext*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
If a 'points' or 'pixels' option is specified, values will be
added to the bottom-left and if negative, values will be
subtracted from the top-right. Eg::
# 10 points to the right of the left border of the axes and
# 5 points below the top border
xy=(10,-5), xycoords='axes points'
Additional kwargs are Text properties:
%(Text)s
"""
if xytext is None:
xytext = xy
if textcoords is None:
textcoords = xycoords
# we'll draw ourself after the artist we annotate by default
x,y = self.xytext = xytext
Text.__init__(self, x, y, s, **kwargs)
self.xy = xy
self.xycoords = xycoords
self.textcoords = textcoords
self.arrowprops = arrowprops
self.arrow = None
if arrowprops and arrowprops.has_key("arrowstyle"):
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1,1),
**arrowprops)
else:
self.arrow_patch = None
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def contains(self,event):
t,tinfo = Text.contains(self,event)
if self.arrow is not None:
a,ainfo=self.arrow.contains(event)
t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t,tinfo
def set_figure(self, fig):
if self.arrow is not None:
self.arrow.set_figure(fig)
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def _get_xy(self, x, y, s):
if s=='data':
trans = self.axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s=='offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi/72.
y *= dpi/72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s=='polar':
theta, r = x, y
x = r*np.cos(theta)
y = r*np.sin(theta)
trans = self.axes.transData
return trans.transform_point((x,y))
elif s=='figure points':
#points from the lower left corner of the figure
dpi = self.figure.dpi
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
x *= dpi/72.
y *= dpi/72.
if x<0:
x = r + x
if y<0:
y = t + y
return x,y
elif s=='figure pixels':
#pixels from the lower left corner of the figure
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
if y<0:
y = t + y
return x, y
elif s=='figure fraction':
#(0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x,y))
elif s=='axes points':
#points from the lower left corner of the axes
dpi = self.figure.dpi
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x*dpi/72.
else:
x = l + x*dpi/72.
if y<0:
y = t + y*dpi/72.
else:
y = b + y*dpi/72.
return x, y
elif s=='axes pixels':
#pixels from the lower left corner of the axes
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
else:
x = l + x
if y<0:
y = t + y
else:
y = b + y
return x, y
elif s=='axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = self.axes.transAxes
return trans.transform_point((x, y))
def update_positions(self, renderer):
x, y = self.xytext
self._x, self._y = self._get_xy(x, y, self.textcoords)
x, y = self.xy
x, y = self._get_xy(x, y, self.xycoords)
ox0, oy0 = self._x, self._y
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
l,b,w,h = self.get_window_extent(renderer).bounds
r = l+w
t = b+h
xc = 0.5*(l+r)
yc = 0.5*(b+t)
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# Otherwise, fallback to YAArrow.
#if d.has_key("arrowstyle"):
if self.arrow_patch:
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
bbox = self.get_window_extent(renderer)
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1,oy1))
mutation_scale = d.pop("mutation_scale", self.get_size())
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
if self._bbox_patch:
patchA = d.pop("patchA", self._bbox_patch)
self.arrow_patch.set_patchA(patchA)
else:
patchA = d.pop("patchA", self._bbox)
self.arrow_patch.set_patchA(patchA)
else:
# pick the x,y corner of the text bbox closest to point
# annotated
dsu = [(abs(val-x0), val) for val in l, r, xc]
dsu.sort()
_, x = dsu[0]
dsu = [(abs(val-y0), val) for val in b, t, yc]
dsu.sort()
_, y = dsu[0]
shrink = d.pop('shrink', 0.0)
theta = math.atan2(y-y0, x-x0)
r = math.sqrt((y-y0)**2. + (x-x0)**2.)
dx = shrink*r*math.cos(theta)
dy = shrink*r*math.sin(theta)
width = d.pop('width', 4)
headwidth = d.pop('headwidth', 12)
frac = d.pop('frac', 0.1)
self.arrow = YAArrow(self.figure, (x0+dx,y0+dy), (x-dx, y-dy),
width=width, headwidth=headwidth, frac=frac,
**d)
self.arrow.set_clip_box(self.get_clip_box())
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
self.update_positions(renderer)
self.update_bbox_position_size(renderer)
if self.arrow is not None:
if self.arrow.figure is None and self.figure is not None:
self.arrow.figure = self.figure
self.arrow.draw(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
Text.draw(self, renderer)
artist.kwdocd['Annotation'] = Annotation.__init__.__doc__
| agpl-3.0 |
Hojalab/mplh5canvas | setup.py | 4 | 1650 | #!/usr/bin/env python
from setuptools import setup, find_packages
from distutils.version import LooseVersion
import os
os.environ['MPLCONFIGDIR'] = "."
# temporarily redirect configuration directory
# to prevent matplotlib import testing for
# writeable directory outside of sandbox
from matplotlib import __version__ as mpl_version
import sys
if LooseVersion(mpl_version) < LooseVersion("0.99.1.1"):
print "The HTML5 Canvas Backend requires matplotlib 0.99.1.1 or newer. " \
"Your version (%s) appears older than this. Unable to continue..." % (mpl_version,)
sys.exit(0)
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
INSTALL = open(os.path.join(here, 'INSTALL.rst')).read()
setup (
name="mplh5canvas",
version="0.7",
author="Simon Ratcliffe, Ludwig Schwardt",
author_email="[email protected], [email protected]",
url="http://code.google.com/p/mplh5canvas/",
description="A matplotlib backend based on HTML5 Canvas.",
long_description=README + "\n\n" + INSTALL,
license="BSD",
classifiers=["Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages = find_packages(),
scripts = [],
install_requires = ['matplotlib', 'mod_pywebsocket'],
zip_safe = False,
)
| bsd-3-clause |
rmcgibbo/scipy | scipy/signal/wavelets.py | 23 | 10483 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.special import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=np.float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {}
bitdic['0'] = v / sm
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of w.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
The complete version of the Morlet wavelet, with a correction
term to improve admissibility. For w greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to s.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where r is the sampling rate.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-x^2/2 a^2)``,
where ``A = 2/sqrt(3a)pi^1/4``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(width,length)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
>>> length = min(10 * width[ii], len(data))
>>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(length,
... width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
| bsd-3-clause |
gciteam6/xgboost | src/data/base.py | 1 | 4968 | # Built-in modules
from copy import deepcopy
import csv
from os import pardir, path, makedirs
import datetime
# Third-party modules
import numpy as np
import pandas as pd
import bloscpack as bp
PROJECT_ROOT_PATH = path.join(path.dirname(__file__), pardir, pardir)
RAW_DATA_BASEPATH = path.join(PROJECT_ROOT_PATH, "data/raw")
INTERIM_DATA_BASEPATH = path.join(PROJECT_ROOT_PATH, "data/interim")
PROCESSED_DATA_BASEPATH = path.join(PROJECT_ROOT_PATH, "data/processed")
DATETIME_FORMAT = "(?P<year>\d{4})(?P<month>\d{1,2})(?P<day>\d{1,2})(?P<hour>\d{2})(?P<minute>\d{2})"
TRAIN_DATE_RANGE = (
pd.to_datetime("2012-01-01 00:10:00"),
pd.to_datetime("2016-01-01 00:00:00")
)
TEST_DATE_RANGE = (
pd.to_datetime("2016-01-01 00:10:00"),
pd.to_datetime("2017-04-01 00:00:00")
)
KWARGS_READ_CSV_BASE = {
"sep": "\t",
"header": 0,
"na_values": ['', ' ']
}
KWARGS_TO_CSV_BASE = {
"sep": "\t"
}
KWARGS_OUTER_MERGE = {
"how": "outer",
"left_index": True,
"right_index": True
}
LABEL_LAT_HOUR, LABEL_LAT_MINUTE = "lat1", "lat2"
LABEL_LNG_HOUR, LABEL_LNG_MINUTE = "lng1", "lng2"
LABEL_LAT_DECIMAL, LABEL_LNG_DECIMAL = "lat_dec", "lng_dec"
class PathHandlerBase(object):
def __init__(self):
self.PROJECT_ROOT_PATH = PROJECT_ROOT_PATH
self.RAW_DATA_BASEPATH = RAW_DATA_BASEPATH
self.INTERIM_DATA_BASEPATH = INTERIM_DATA_BASEPATH
self.PROCESSED_DATA_BASEPATH = PROCESSED_DATA_BASEPATH
self.path = path
@staticmethod
def gen_abspath(relpath):
abspath = path.abspath(relpath)
makedirs(path.dirname(abspath), exist_ok=True)
return abspath
class BloscpackMixin:
@staticmethod
def read_blp(serialized_filepath):
return bp.unpack_ndarray_file(serialized_filepath)
@staticmethod
def to_blp(ndarray: np.array, serialized_filepath):
bp.pack_ndarray_file(ndarray, serialized_filepath)
class DataFrameHandlerBase(PathHandlerBase):
def __init__(self):
super().__init__()
self.DATETIME_FORMAT = DATETIME_FORMAT
self.TRAIN_DATE_RANGE = TRAIN_DATE_RANGE
self.TEST_DATE_RANGE = TEST_DATE_RANGE
self.KWARGS_READ_CSV_BASE = KWARGS_READ_CSV_BASE
self.KWARGS_TO_CSV_BASE = KWARGS_TO_CSV_BASE
self.KWARGS_OUTER_MERGE = KWARGS_OUTER_MERGE
def gen_read_csv_kwargs(self, kwargs_to_add: dict):
ret_dict = deepcopy(self.KWARGS_READ_CSV_BASE)
if kwargs_to_add is not None:
ret_dict.update(kwargs_to_add)
return ret_dict
def gen_to_csv_kwargs(self, kwargs_to_add: dict):
ret_dict = deepcopy(self.KWARGS_TO_CSV_BASE)
if kwargs_to_add is not None:
ret_dict.update(kwargs_to_add)
return ret_dict
def parse_datetime(self, df):
return pd.to_datetime(df.str.extract(self.DATETIME_FORMAT, expand=False))
@staticmethod
def gen_datetime_index(start, end, freq_min: int = 10):
return pd.date_range(start, end, freq=pd.offsets.Minute(freq_min))
@staticmethod
def gen_norm_datetime(year, month, day, hour, minute, second):
return datetime.datetime(year, month, day) + \
datetime.timedelta(hours=hour, minutes=minute, seconds=second)
@staticmethod
def add_annotations_to_column_names(df, attribute_name, location_name):
return [
'_'.join([
str(column_name), attribute_name, location_name
]) for column_name in df.columns
]
class LocationHandlerBase(DataFrameHandlerBase):
def __init__(self, master_filepath, **kwargs_location):
super().__init__()
self.location = pd.read_csv(
master_filepath, **self.gen_read_csv_kwargs(kwargs_location)
)
self.location[LABEL_LAT_DECIMAL] = self.location.apply(
lambda df: self.cast_60_to_10(df[LABEL_LAT_HOUR], df[LABEL_LAT_MINUTE]), axis=1
)
self.location[LABEL_LNG_DECIMAL] = self.location.apply(
lambda df: self.cast_60_to_10(df[LABEL_LNG_HOUR], df[LABEL_LNG_MINUTE]), axis=1
)
def get_near_observation_points(self, lat_mid, lng_mid, half_grid_size):
lat_max, lat_min = lat_mid + half_grid_size, lat_mid - half_grid_size
lng_max, lng_min = lng_mid + half_grid_size, lng_mid - half_grid_size
lat_within_mesh = self.location[LABEL_LAT_DECIMAL].apply(
lambda lat: True if (lat_min <= lat <= lat_max) else False
)
lng_within_mesh = self.location[LABEL_LNG_DECIMAL].apply(
lambda lng: True if lng_min <= lng <= lng_max else False
)
flg_within_mesh = [is_lat and ls_lng for (is_lat, ls_lng) in zip(lat_within_mesh, lng_within_mesh)]
return self.location.loc[flg_within_mesh, :]
@staticmethod
def cast_60_to_10(hour, minute, second=0):
return hour + (minute / 60) + (second / 3600)
if __name__ == '__main__':
print("Here is src/data/base.py !")
| mit |
rs2/bokeh | bokeh/models/sources.py | 2 | 23015 | from __future__ import absolute_import
import warnings
from ..core.has_props import abstract
from ..core.properties import Any, Bool, ColumnData, Dict, Enum, Instance, Int, JSON, List, Seq, String
from ..model import Model
from ..util.dependencies import import_optional
from ..util.warnings import BokehUserWarning
from .callbacks import Callback
from .filters import Filter
pd = import_optional('pandas')
@abstract
class DataSource(Model):
''' A base class for data source types.
'''
selected = Dict(String, Dict(String, Any), default={
'0d': {'glyph': None, 'indices': []},
'1d': {'indices': []},
'2d': {'indices': {}}
}, help="""
A dict to indicate selected indices on different dimensions on this DataSource. Keys are:
.. code-block:: python
# selection information for line and patch glyphs
'0d' : {
# the glyph that was selected
'glyph': None
# array with the [smallest] index of the segment of the line that was hit
'indices': []
}
# selection for most (point-like) glyphs, except lines and patches
'1d': {
# indices of the points included in the selection
indices: []
}
# selection information for multiline and patches glyphs
'2d': {
# mapping of indices of the multiglyph to array of glyph indices that were hit
# e.g. {3: [5, 6], 4: [5]}
indices: {}
}
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the selection is changed.
""")
@abstract
class ColumnarDataSource(DataSource):
''' A base class for data source types, which can be mapped onto
a columnar format.
'''
column_names = List(String, help="""
An list of names for all the columns in this DataSource.
""")
class ColumnDataSource(ColumnarDataSource):
''' Maps names of columns to sequences or arrays.
The ``ColumnDataSource`` is a fundamental data structure of Bokeh. Most
plots, data tables, etc. will be driven by a ``ColumnDataSource``.
If the ColumnDataSource initializer is called with a single argument that
can be any of the following:
* A Python ``dict`` that maps string names to sequences of values, e.g.
lists, arrays, etc.
.. code-block:: python
data = {'x': [1,2,3,4], 'y': np.ndarray([10.0, 20.0, 30.0, 40.0])}
source = ColumnDataSource(data)
* A Pandas ``DataFrame`` object
.. code-block:: python
source = ColumnDataSource(df)
In this case the CDS will have columns corresponding to the columns of
the ``DataFrame``. If the ``DataFrame`` has a named index column, then
CDS will also have a column with this name. However, if the index name
(or any subname of a ``MultiIndex``) is ``None``, then the CDS will have
a column generically named ``index`` for the index.
* A Pandas ``GroupBy`` object
.. code-block:: python
group = df.groupby(('colA', 'ColB'))
In this case the CDS will have columns corresponding to the result of
calling ``group.describe()``. The ``describe`` method generates columns
for statistical measures such as ``mean`` and ``count`` for all the
non-grouped orginal columns. The CDS columns are formed by joining
original column names with the computed measure. For example, if a
``DataFrame`` has columns ``'year'`` and ``'mpg'``. Then passing
``df.groupby('year')`` to a CDS will result in columns such as
``'mpg_mean'``
If the ``GroupBy.describe`` result has a named index column, then
CDS will also have a column with this name. However, if the index name
(or any subname of a ``MultiIndex``) is ``None``, then the CDS will have
a column generically named ``index`` for the index.
Note this capability to adapt ``GroupBy`` objects may only work with
Pandas ``>=0.20.0``.
.. note::
There is an implicit assumption that all the columns in a given
``ColumnDataSource`` all have the same length at all times. For this
reason, it is usually preferable to update the ``.data`` property
of a data source "all at once".
'''
data = ColumnData(String, Seq(Any), help="""
Mapping of column names to sequences of data. The data can be, e.g,
Python lists or tuples, NumPy arrays, etc.
""").asserts(lambda _, data: len(set(len(x) for x in data.values())) <= 1,
lambda obj, name, data: warnings.warn(
"ColumnDataSource's columns must be of the same length. " +
"Current lengths: %s" % ", ".join(sorted(str((k, len(v))) for k, v in data.items())), BokehUserWarning))
def __init__(self, *args, **kw):
''' If called with a single argument that is a dict or
pandas.DataFrame, treat that implicitly as the "data" attribute.
'''
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
if pd and isinstance(raw_data, pd.DataFrame):
raw_data = self._data_from_df(raw_data)
elif pd and isinstance(raw_data, pd.core.groupby.GroupBy):
raw_data = self._data_from_groupby(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
super(ColumnDataSource, self).__init__(**kw)
self.column_names[:] = list(raw_data.keys())
self.data.update(raw_data)
@staticmethod
def _data_from_df(df):
''' Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
df (DataFrame) : data to convert
Returns:
dict[str, np.array]
'''
_df = df.copy()
index = _df.index
tmp_data = {c: v.values for c, v in _df.iteritems()}
new_data = {}
for k, v in tmp_data.items():
if isinstance(k, tuple):
k = "_".join(k)
new_data[k] = v
if index.name:
new_data[index.name] = index.values
elif index.names:
try:
new_data["_".join(index.names)] = index.values
except TypeError:
new_data["index"] = index.values
else:
new_data["index"] = index.values
return new_data
@staticmethod
def _data_from_groupby(group):
''' Create a ``dict`` of columns from a Pandas GroupBy,
suitable for creating a ColumnDataSource.
The data generated is the result of running ``describe``
on the group.
Args:
group (GroupBy) : data to convert
Returns:
dict[str, np.array]
'''
return ColumnDataSource._data_from_df(group.describe())
@classmethod
def from_df(cls, data):
''' Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
data (DataFrame) : data to convert
Returns:
dict[str, np.array]
'''
return cls._data_from_df(data)
@classmethod
def from_groupby(cls, data):
''' Create a ``dict`` of columns from a Pandas GroupBy,
suitable for creating a ColumnDataSource.
The data generated is the result of running ``describe``
on the group.
Args:
data (Groupby) : data to convert
Returns:
dict[str, np.array]
'''
return cls._data_from_df(data.describe())
def to_df(self):
''' Convert this data source to pandas dataframe.
If ``column_names`` is set, use those. Otherwise let Pandas
infer the column names. The ``column_names`` property can be
used both to order and filter the columns.
Returns:
DataFrame
'''
if not pd:
raise RuntimeError('Pandas must be installed to convert to a Pandas Dataframe')
if self.column_names:
return pd.DataFrame(self.data, columns=self.column_names)
else:
return pd.DataFrame(self.data)
def add(self, data, name=None):
''' Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used
'''
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.column_names.append(name)
self.data[name] = data
return name
def remove(self, name):
''' Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued.
'''
try:
self.column_names.remove(name)
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name)
def stream(self, new_data, rollover=None):
''' Efficiently update data source columns with new append-only data.
In cases where it is necessary to update data columns in, this method
can efficiently send only the new data, instead of requiring the
entire data set to be re-sent.
Args:
new_data (dict[str, seq]) : a mapping of column names to sequences of
new data to append to each column.
All columns of the data source must be present in ``new_data``,
with identical-length append data.
rollover (int, optional) : A maximum column size, above which data
from the start of the column begins to be discarded. If None,
then columns will continue to grow unbounded (default: None)
Returns:
None
Raises:
ValueError
Example:
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[], bar=[]))
# has new, identical-length updates for all columns in source
new_data = {
'foo' : [10, 20],
'bar' : [100, 200],
}
source.stream(new_data)
'''
# calls internal implementation
self._stream(new_data, rollover)
def _stream(self, new_data, rollover=None, setter=None):
''' Internal implementation to efficiently update data source columns
with new append-only data. The interal implementation adds the setter
attribute. [https://github.com/bokeh/bokeh/issues/6577]
In cases where it is necessary to update data columns in, this method
can efficiently send only the new data, instead of requiring the
entire data set to be re-sent.
Args:
new_data (dict[str, seq] or DataFrame or Series) : a mapping of
column names to sequences of new data to append to each column,
a pandas DataFrame, or a pandas Series in case of a single row -
in this case the Series index is used as column names
All columns of the data source must be present in ``new_data``,
with identical-length append data.
rollover (int, optional) : A maximum column size, above which data
from the start of the column begins to be discarded. If None,
then columns will continue to grow unbounded (default: None)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
Raises:
ValueError
Example:
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[], bar=[]))
# has new, identical-length updates for all columns in source
new_data = {
'foo' : [10, 20],
'bar' : [100, 200],
}
source.stream(new_data)
'''
if pd and isinstance(new_data, pd.Series):
new_data = new_data.to_frame().T
if pd and isinstance(new_data, pd.DataFrame):
newkeys = set(new_data.columns)
else:
newkeys = set(new_data.keys())
oldkeys = set(self.data.keys())
if newkeys != oldkeys:
missing = oldkeys - newkeys
extra = newkeys - oldkeys
if missing and extra:
raise ValueError(
"Must stream updates to all existing columns (missing: %s, extra: %s)" % (", ".join(sorted(missing)), ", ".join(sorted(extra)))
)
elif missing:
raise ValueError("Must stream updates to all existing columns (missing: %s)" % ", ".join(sorted(missing)))
else:
raise ValueError("Must stream updates to all existing columns (extra: %s)" % ", ".join(sorted(extra)))
if not (pd and isinstance(new_data, pd.DataFrame)):
import numpy as np
lengths = set()
arr_types = (np.ndarray, pd.Series) if pd else np.ndarray
for k, x in new_data.items():
if isinstance(x, arr_types):
if len(x.shape) != 1:
raise ValueError("stream(...) only supports 1d sequences, got ndarray with size %r" % (x.shape,))
lengths.add(x.shape[0])
else:
lengths.add(len(x))
if len(lengths) > 1:
raise ValueError("All streaming column updates must be the same length")
self.data._stream(self.document, self, new_data, rollover, setter)
def patch(self, patches, setter=None):
''' Efficiently update data source columns at specific locations
If it is only necessary to update a small subset of data in a
ColumnDataSource, this method can be used to efficiently update only
the subset, instead of requiring the entire data set to be sent.
This method should be passed a dictionary that maps column names to
lists of tuples that describe a patch change to apply. To replace
individual items in columns entirely, the tuples should be of the
form:
.. code-block:: python
(index, new_value) # replace a single column value
# or
(slice, new_values) # replace several column values
Values at an index or slice will be replaced with the corresponding
new values.
In the case of columns whose values are other arrays or lists, (e.g.
image or patches glyphs), it is also possible to patch "subregions".
In this case the first item of the tuple should be a whose first
element is the index of the array item in the CDS patch, and whose
subsequent elements are integer indices or slices into the array item:
.. code-block:: python
# replace the entire 10th column of the 2nd array:
+----------------- index of item in column data source
|
| +--------- row subindex into array item
| |
| | +- column subindex into array item
V V V
([2, slice(None), 10], new_values)
Imagining a list of 2d NumPy arrays, the patch above is roughly
equivalent to:
.. code-block:: python
data = [arr1, arr2, ...] # list of 2d arrays
data[2][:, 10] = new_data
There are some limitations to the kinds of slices and data that can
be accepted.
* Negative ``start``, ``stop``, or ``step`` values for slices will
result in a ``ValueError``.
* In a slice, ``start > stop`` will result in a ``ValueError``
* When patching 1d or 2d subitems, the subitems must be NumPy arrays.
* New values must be supplied as a **flattened one-dimensional array**
of the appropriate size.
Args:
patches (dict[str, list[tuple]]) : lists of patches for each column
Returns:
None
Raises:
ValueError
Example:
The following example shows how to patch entire column elements. In this case,
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[10, 20, 30], bar=[100, 200, 300]))
patches = {
'foo' : [ (slice(2), [11, 12]) ],
'bar' : [ (0, 101), (2, 301) ],
}
source.patch(patches)
After this operation, the value of the ``source.data`` will be:
.. code-block:: python
dict(foo=[11, 22, 30], bar=[101, 200, 301])
For a more comprehensive complete example, see :bokeh-tree:`examples/howto/patch_app.py`.
'''
import numpy as np
extra = set(patches.keys()) - set(self.data.keys())
if extra:
raise ValueError("Can only patch existing columns (extra: %s)" % ", ".join(sorted(extra)))
for name, patch in patches.items():
col_len = len(self.data[name])
for ind, value in patch:
# integer index, patch single value of 1d column
if isinstance(ind, int):
if ind > col_len or ind < 0:
raise ValueError("Out-of bounds index (%d) in patch for column: %s" % (ind, name))
# slice index, patch multiple values of 1d column
elif isinstance(ind, slice):
_check_slice(ind)
if ind.stop is not None and ind.stop > col_len:
raise ValueError("Out-of bounds slice index stop (%d) in patch for column: %s" % (ind.stop, name))
# multi-index, patch sub-regions of "n-d" column
elif isinstance(ind, (list, tuple)):
if len(ind) == 0:
raise ValueError("Empty (length zero) patch multi-index")
if len(ind) == 1:
raise ValueError("Patch multi-index must contain more than one subindex")
if not isinstance(ind[0], int):
raise ValueError("Initial patch sub-index may only be integer, got: %s" % ind[0])
if ind[0] > col_len or ind[0] < 0:
raise ValueError("Out-of bounds initial sub-index (%d) in patch for column: %s" % (ind, name))
if not isinstance(self.data[name][ind[0]], np.ndarray):
raise ValueError("Can only sub-patch into columns with NumPy array items")
if len(self.data[name][ind[0]].shape) != (len(ind)-1):
raise ValueError("Shape mismatch between patch slice and sliced data")
elif isinstance(ind[0], slice):
_check_slice(ind[0])
if ind[0].stop is not None and ind[0].stop > col_len:
raise ValueError("Out-of bounds initial slice sub-index stop (%d) in patch for column: %s" % (ind.stop, name))
# Note: bounds of sub-indices after the first are not checked!
for subind in ind[1:]:
if not isinstance(subind, (int, slice)):
raise ValueError("Invalid patch sub-index: %s" % subind)
if isinstance(subind, slice):
_check_slice(subind)
else:
raise ValueError("Invalid patch index: %s" % ind)
self.data._patch(self.document, self, patches, setter)
def _check_slice(s):
if (s.start is not None and s.stop is not None and s.start > s.stop):
raise ValueError("Patch slices must have start < end, got %s" % s)
if (s.start is not None and s.start < 0) or \
(s.stop is not None and s.stop < 0) or \
(s.step is not None and s.step < 0):
raise ValueError("Patch slices must have non-negative (start, stop, step) values, got %s" % s)
class CDSView(Model):
''' A view into a ColumnDataSource that represents a row-wise subset.
'''
filters = List(Instance(Filter), default=[], help="""
List of filters that the view comprises.
""")
source = Instance(ColumnarDataSource, help="""
The ColumnDataSource associated with this view. Used to determine
the length of the columns.
""")
class GeoJSONDataSource(ColumnarDataSource):
'''
'''
geojson = JSON(help="""
GeoJSON that contains features for plotting. Currently GeoJSONDataSource can
only process a FeatureCollection or GeometryCollection.
""")
@abstract
class RemoteSource(ColumnDataSource):
'''
'''
data_url = String(help="""
The URL to the endpoint for the data.
""")
polling_interval = Int(help="""
polling interval for updating data source in milliseconds
""")
class AjaxDataSource(RemoteSource):
'''
'''
method = Enum('POST', 'GET', help="http method - GET or POST")
mode = Enum("replace", "append", help="""
Whether to append new data to existing data (up to ``max_size``),
or to replace existing data entirely.
""")
max_size = Int(help="""
Maximum size of the data array being kept after each pull requests.
Larger than that size, the data will be right shifted.
""")
if_modified = Bool(False, help="""
Whether to include an ``If-Modified-Since`` header in AJAX requests
to the server. If this header is supported by the server, then only
new data since the last request will be returned.
""")
content_type = String(default='application/json', help="""
Set the "contentType" parameter for the Ajax request.
""")
http_headers = Dict(String, String, help="""
HTTP headers to set for the Ajax request.
""")
| bsd-3-clause |
python-control/python-control | control/nichols.py | 2 | 10971 | """nichols.py
Functions for plotting Black-Nichols charts.
Routines in this module:
nichols.nichols_plot aliased as nichols.nichols
nichols.nichols_grid
"""
# nichols.py - Nichols plot
#
# Contributed by Allan McInnes <[email protected]>
#
# This file contains some standard control system plots: Bode plots,
# Nyquist plots, Nichols plots and pole-zero diagrams
#
# Copyright (c) 2010 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id: freqplot.py 139 2011-03-30 16:19:59Z murrayrm $
import numpy as np
import matplotlib.pyplot as plt
from .ctrlutil import unwrap
from .freqplot import _default_frequency_range
from . import config
__all__ = ['nichols_plot', 'nichols', 'nichols_grid']
# Default parameters values for the nichols module
_nichols_defaults = {
'nichols.grid': True,
}
def nichols_plot(sys_list, omega=None, grid=None):
"""Nichols plot for a system
Plots a Nichols plot for the system over a (optional) frequency range.
Parameters
----------
sys_list : list of LTI, or LTI
List of linear input/output systems (single system is OK)
omega : array_like
Range of frequencies (list or bounds) in rad/sec
grid : boolean, optional
True if the plot should include a Nichols-chart grid. Default is True.
Returns
-------
None
"""
# Get parameter values
grid = config._get_param('nichols', 'grid', grid, True)
# If argument was a singleton, turn it into a list
if not getattr(sys_list, '__iter__', False):
sys_list = (sys_list,)
# Select a default range if none is provided
if omega is None:
omega = _default_frequency_range(sys_list)
for sys in sys_list:
# Get the magnitude and phase of the system
mag_tmp, phase_tmp, omega = sys.frequency_response(omega)
mag = np.squeeze(mag_tmp)
phase = np.squeeze(phase_tmp)
# Convert to Nichols-plot format (phase in degrees,
# and magnitude in dB)
x = unwrap(np.degrees(phase), 360)
y = 20*np.log10(mag)
# Generate the plot
plt.plot(x, y)
plt.xlabel('Phase (deg)')
plt.ylabel('Magnitude (dB)')
plt.title('Nichols Plot')
# Mark the -180 point
plt.plot([-180], [0], 'r+')
# Add grid
if grid:
nichols_grid()
def nichols_grid(cl_mags=None, cl_phases=None, line_style='dotted'):
"""Nichols chart grid
Plots a Nichols chart grid on the current axis, or creates a new chart
if no plot already exists.
Parameters
----------
cl_mags : array-like (dB), optional
Array of closed-loop magnitudes defining the iso-gain lines on a
custom Nichols chart.
cl_phases : array-like (degrees), optional
Array of closed-loop phases defining the iso-phase lines on a custom
Nichols chart. Must be in the range -360 < cl_phases < 0
line_style : string, optional
:doc:`Matplotlib linestyle \
<matplotlib:gallery/lines_bars_and_markers/linestyles>`
"""
# Default chart size
ol_phase_min = -359.99
ol_phase_max = 0.0
ol_mag_min = -40.0
ol_mag_max = default_ol_mag_max = 50.0
# Find bounds of the current dataset, if there is one.
if plt.gcf().gca().has_data():
ol_phase_min, ol_phase_max, ol_mag_min, ol_mag_max = plt.axis()
# M-circle magnitudes.
if cl_mags is None:
# Default chart magnitudes
# The key set of magnitudes are always generated, since this
# guarantees a recognizable Nichols chart grid.
key_cl_mags = np.array([-40.0, -20.0, -12.0, -6.0, -3.0, -1.0, -0.5,
0.0, 0.25, 0.5, 1.0, 3.0, 6.0, 12.0])
# Extend the range of magnitudes if necessary. The extended arange
# will end up empty if no extension is required. Assumes that
# closed-loop magnitudes are approximately aligned with open-loop
# magnitudes beyond the value of np.min(key_cl_mags)
cl_mag_step = -20.0 # dB
extended_cl_mags = np.arange(np.min(key_cl_mags),
ol_mag_min + cl_mag_step, cl_mag_step)
cl_mags = np.concatenate((extended_cl_mags, key_cl_mags))
# N-circle phases (should be in the range -360 to 0)
if cl_phases is None:
# Choose a reasonable set of default phases (denser if the open-loop
# data is restricted to a relatively small range of phases).
key_cl_phases = np.array([-0.25, -45.0, -90.0, -180.0, -270.0,
-325.0, -359.75])
if np.abs(ol_phase_max - ol_phase_min) < 90.0:
other_cl_phases = np.arange(-10.0, -360.0, -10.0)
else:
other_cl_phases = np.arange(-10.0, -360.0, -20.0)
cl_phases = np.concatenate((key_cl_phases, other_cl_phases))
else:
assert ((-360.0 < np.min(cl_phases)) and (np.max(cl_phases) < 0.0))
# Find the M-contours
m = m_circles(cl_mags, phase_min=np.min(cl_phases),
phase_max=np.max(cl_phases))
m_mag = 20*np.log10(np.abs(m))
m_phase = np.mod(np.degrees(np.angle(m)), -360.0) # Unwrap
# Find the N-contours
n = n_circles(cl_phases, mag_min=np.min(cl_mags), mag_max=np.max(cl_mags))
n_mag = 20*np.log10(np.abs(n))
n_phase = np.mod(np.degrees(np.angle(n)), -360.0) # Unwrap
# Plot the contours behind other plot elements.
# The "phase offset" is used to produce copies of the chart that cover
# the entire range of the plotted data, starting from a base chart computed
# over the range -360 < phase < 0. Given the range
# the base chart is computed over, the phase offset should be 0
# for -360 < ol_phase_min < 0.
phase_offset_min = 360.0*np.ceil(ol_phase_min/360.0)
phase_offset_max = 360.0*np.ceil(ol_phase_max/360.0) + 360.0
phase_offsets = np.arange(phase_offset_min, phase_offset_max, 360.0)
for phase_offset in phase_offsets:
# Draw M and N contours
plt.plot(m_phase + phase_offset, m_mag, color='lightgray',
linestyle=line_style, zorder=0)
plt.plot(n_phase + phase_offset, n_mag, color='lightgray',
linestyle=line_style, zorder=0)
# Add magnitude labels
for x, y, m in zip(m_phase[:][-1] + phase_offset, m_mag[:][-1],
cl_mags):
align = 'right' if m < 0.0 else 'left'
plt.text(x, y, str(m) + ' dB', size='small', ha=align,
color='gray')
# Fit axes to generated chart
plt.axis([phase_offset_min - 360.0, phase_offset_max - 360.0,
np.min(cl_mags), np.max([ol_mag_max, default_ol_mag_max])])
#
# Utility functions
#
# This section of the code contains some utility functions for
# generating Nichols plots
#
def closed_loop_contours(Gcl_mags, Gcl_phases):
"""Contours of the function Gcl = Gol/(1+Gol), where
Gol is an open-loop transfer function, and Gcl is a corresponding
closed-loop transfer function.
Parameters
----------
Gcl_mags : array-like
Array of magnitudes of the contours
Gcl_phases : array-like
Array of phases in radians of the contours
Returns
-------
contours : complex array
Array of complex numbers corresponding to the contours.
"""
# Compute the contours in Gcl-space. Since we're given closed-loop
# magnitudes and phases, this is just a case of converting them into
# a complex number.
Gcl = Gcl_mags*np.exp(1.j*Gcl_phases)
# Invert Gcl = Gol/(1+Gol) to map the contours into the open-loop space
return Gcl/(1.0 - Gcl)
def m_circles(mags, phase_min=-359.75, phase_max=-0.25):
"""Constant-magnitude contours of the function Gcl = Gol/(1+Gol), where
Gol is an open-loop transfer function, and Gcl is a corresponding
closed-loop transfer function.
Parameters
----------
mags : array-like
Array of magnitudes in dB of the M-circles
phase_min : degrees
Minimum phase in degrees of the N-circles
phase_max : degrees
Maximum phase in degrees of the N-circles
Returns
-------
contours : complex array
Array of complex numbers corresponding to the contours.
"""
# Convert magnitudes and phase range into a grid suitable for
# building contours
phases = np.radians(np.linspace(phase_min, phase_max, 2000))
Gcl_mags, Gcl_phases = np.meshgrid(10.0**(mags/20.0), phases)
return closed_loop_contours(Gcl_mags, Gcl_phases)
def n_circles(phases, mag_min=-40.0, mag_max=12.0):
"""Constant-phase contours of the function Gcl = Gol/(1+Gol), where
Gol is an open-loop transfer function, and Gcl is a corresponding
closed-loop transfer function.
Parameters
----------
phases : array-like
Array of phases in degrees of the N-circles
mag_min : dB
Minimum magnitude in dB of the N-circles
mag_max : dB
Maximum magnitude in dB of the N-circles
Returns
-------
contours : complex array
Array of complex numbers corresponding to the contours.
"""
# Convert phases and magnitude range into a grid suitable for
# building contours
mags = np.linspace(10**(mag_min/20.0), 10**(mag_max/20.0), 2000)
Gcl_phases, Gcl_mags = np.meshgrid(np.radians(phases), mags)
return closed_loop_contours(Gcl_mags, Gcl_phases)
# Function aliases
nichols = nichols_plot
| bsd-3-clause |
ch3ll0v3k/scikit-learn | sklearn/tests/test_grid_search.py | 68 | 28778 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
theoryno3/scikit-learn | sklearn/cluster/spectral.py | 18 | 18027 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/core/plot/progress.py | 1 | 5019 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.plot.progress Contains the ProgressPlotter class, used for creating plots of the progress
# of different phases of a SKIRT simulation as a function of time.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
from collections import defaultdict
import matplotlib.pyplot as plt
# Import the relevant PTS classes and modules
from .plotter import Plotter
from ..basics.map import Map
from ..tools.logging import log
from ..tools import filesystem as fs
# -----------------------------------------------------------------
full_phase_names = {"stellar": "stellar emission phase",
"spectra": "calculation of dust emission spectra",
"dust": "dust emission phase"}
# -----------------------------------------------------------------
class ProgressPlotter(Plotter):
"""
This class ...
"""
def __init__(self):
"""
The constructor ...
"""
# Call the constructor of the base class
super(ProgressPlotter, self).__init__()
# -----------------------------------------------------------------
@staticmethod
def default_input():
"""
This function ...
:return:
"""
return "progress.dat"
# -----------------------------------------------------------------
def prepare_data(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Preparing the input data into plottable format...")
# Get the number of processes
ranks = np.unique(self.table["Process rank"])
assert len(ranks) == max(ranks) + 1
processes = len(ranks)
# Initialize the data structure to contain the progress information in plottable format
self.data = defaultdict(lambda: [Map({"times": [], "progress": []}) for i in range(processes)])
# Loop over the different phases
for phase in "stellar", "spectra", "dust":
# Loop over the different entries in the progress table
for i in range(len(self.table)):
# Skip entries that do not belong to the current simulation phase
if not self.table["Simulation phase"][i] == phase: continue
# Get the process rank
rank = self.table["Process rank"][i]
# Get the time and progress
time = self.table["Time"][i]
progress = self.table["Progress"][i]
# Add the data point to the data structure
self.data[phase][rank].times.append(time)
self.data[phase][rank].progress.append(progress)
# -----------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Making the plots...")
# Loop over the different phases in the data structure
for phase in self.data:
# Determine the path to the plot file for this phase
plot_path = fs.join(self.output_path, "progress_" + phase + ".pdf")
# Determine the title for the plot
title = "Progress of " + full_phase_names[phase]
# Create the plot for this simulation phase
create_progress_plot(self.data[phase], plot_path, title)
# -----------------------------------------------------------------
def create_progress_plot(data, path, title):
"""
This function ...
:return:
"""
# Initialize figure
plt.figure()
plt.clf()
# Loop over all the different process ranks for which we have data
for rank in range(len(data)):
# Name of the current process
process = "P" + str(rank)
# Add the progress of the current process to the figure
plt.plot(data[rank].times, data[rank].progress, label=process)
plt.xlim(0)
plt.grid('on')
# Set the axis labels
plt.xlabel("Time (s)", fontsize='large')
plt.ylabel("Progress (%)", fontsize='large')
# Set the plot title
plt.title(title)
# Set the legend
if len(data) > 16: plt.legend(loc='upper center', ncol=8, bbox_to_anchor=(0.5, -0.1), prop={'size': 8})
elif len(data) > 1: plt.legend(loc='lower right', ncol=4, prop={'size': 8})
else: pass
# Save the figure
plt.savefig(path, bbox_inches="tight", pad_inches=0.25)
plt.close()
# -----------------------------------------------------------------
| mit |
sthyme/ZFSchizophrenia | BehaviorAnalysis/statsandgraphs_notseparatebyfishregraph.py | 1 | 17322 | #!/usr/bin/python
import os,sys,glob,re
import numpy as np
import scipy
from scipy import stats
import datetime
import time
from datetime import timedelta
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import colors as c
from matplotlib import cm
from scipy.stats.kde import gaussian_kde
from numpy import linspace
from scipy.stats import kruskal
#from scipy.stats import nanmean
#from scipy.stats import nanmedian
import pandas as pd
import statsmodels.api as sm
from scipy.stats import mstats
from matplotlib.ticker import FormatStrFormatter
# MAKE LIST OF ALL THINGS BEING COMBINE - TRY AVERAGE AND ALSO LOWEST P-VALUE
# LOWEST WOULD WORK IF STATS ARE MORE RELIABLE. I THINK I PROBABLY WILL USE THIS.
# don't forget!!!!! You will need to switch order for some and subtract wt from mut or swap signs on the means or the coefficient!
#min_split_dict = {"10min_day1evening":[("10min_day1evening",None,None)], "min_day1evening":[("min_day1evening",None,None)], "10min_day1taps":[("10min_day1taps",None,None)], "min_day1taps":[("min_day1taps",None,None)], "10min_day2darkflashes":[("10min_day2darkflashes",None,None)], "min_day2darkflashes":[("min_day2darkflashes",None,None)], "10min_day2heatshock":[("10min_day2heatshock",None,None)], "min_day2heatshock":[("min_day2heatshock",None,None)], "10min_day2nightstim":[("10min_day2nightstim",None,None)], "min_day2nightstim":[("min_day2nightstim",None,None)], "min_day1day":[("min_day1day",14,None)], "min_day2morning":[("min_day2night2",0,119), ("min_day2morntrans",119,129), ("min_day2morning",129,None)], "min_day1mornstim":[("min_day1mornstim",0,120)], "min_day1night":[("min_day1night",0,-9), ("min_day1morntrans",-9,None)], "10min_day1day":[("10min_day1day",1,None)], "10min_day2morning":[("10min_day2night2",0,10), ("10min_day2morning",11,None)], "10min_day1mornstim":[("10min_day1mornstim",0,12)], "10min_day1night":[("10min_day1night",0,-1)]} # Can't do the "TRANS" times for 10min because they would be one datapoint
skip_list = ["dpixnumberofbouts_minus_distnumberofbouts", "avelonginterboutinterval", "aveboutdispoverdist", "aveboutcumdistovercumdpix", "distoverdpix", "polygonareadivdist", "daytap1", "nighttap1"]
doubledict = {"nightprepulseinhibition100b":"nightprepulseinhibition102", "dayprepulseinhibition100b":"dayprepulseinhibition102", "shortnightprepulseinhibition100b":"shortnightprepulseinhibition102", "shortdayprepulseinhibition100b":"shortdayprepulseinhibition102", "nightprepulseinhibition100c":"nightprepulseinhibition102", "dayprepulseinhibition100c":"dayprepulseinhibition102", "shortnightprepulseinhibition100c":"shortnightprepulseinhibition102", "shortdayprepulseinhibition100c":"shortdayprepulseinhibition102", "a2darkflash103":"adarkflash103", "b2darkflash103":"bdarkflash103", "c2darkflash103":"cdarkflash103", "d2darkflash103":"ddarkflash103", "d0darkflash103":"a0darkflash103", "adaytaphab102":"adaytappre102", "bdaytaphab102":"adaytappostbdaytappre102", "cdaytaphab102":"bdaytappostcdaytappre102", "nighttaphab102":"nighttappre102"}
labels = {
"latencyresponse_dpix": ("Events", "Response latency (ms)"),
"freqresponse_dpix": ("Events", "Response frequency"),
"polygonarea_dist": ("Events", "Response area (pixels)"),
"timeresponse_dpix": ("Events", "Response time (ms)"),
"totaldistanceresponse_dist": ("Events", "Response cumulative distance (pixels)"),
"fullboutdatamax_dpix": ("Events", "Maximum dpix (pixels)"),
"fullboutdatamax_dist": ("Events", "Maximum distance (pixels)"),
"fullboutdatamaxloc_dpix": ("Events", "Time of maximum dpix (ms)"),
"fullboutdatamaxloc_dist": ("Events", "Time of maximum distance (ms)"),
"fullboutdata_dpix": ("Time (ms)", "Dpix (pixels)"),
"fullboutdata_dist": ("Time (ms)", "Distance (pixels)"),
"velocityresponse_dist": ("Events", "Response velocity (pixels / ms)"),
"speedresponse_dist": ("Events", "Response speed (pixels / ms)"),
"cumdpixresponse_dpix": ("Events", "Response cumulative dpix (pixels)"),
"displacement_dist": ("Events", "Response displacement (pixels)"),
"boutcenterfrac_10min": ("Time (min)", "Fraction of interbout time in well center / 10 min"),
"boutcenterfrac_min": ("Time (min)", "Fraction of interbout time in well center / min"),
"boutaverhofrac_10min": ("Time (min)", "Average interbout rho / maximum rho / 10 min"),
"boutaverhofrac_min": ("Time (min)", "Average interbout rho / maximum rho / min"),
"_centerfrac_10min": ("Time (min)", "Fraction of bout time in well center / 10 min"),
"_centerfrac_min": ("Time (min)", "Fraction of bout time in well center / min"),
"_averhofrac_10min": ("Time (min)", "Average bout rho / maximum rho / 10 min"),
"_averhofrac_min": ("Time (min)", "Average bout rho / maximum rho / min"),
"aveboutdisp_min": ("Time (min)", "Average bout displacement (pixels)"),
"aveboutdisp_10min": ("Time (min)", "Average bout displacement (pixels)"),
"aveboutdist_min": ("Time (min)", "Average bout distance (pixels)"),
"aveboutdist_10min": ("Time (min)", "Average bout distance (pixels)"),
"distsecper_min": ("Time (min)", "Active (dist) second / min"),
"dpixsecper_min": ("Time (min)", "Active (dpix) second / min"),
"distminper_10min": ("Time (min)", "Active (dist) min / 10 min"),
"dpixminper_10min": ("Time (min)", "Active (dpix) min / 10 min"),
"aveboutvel_min": ("Time (min)", "Average bout velocity (pixels / ms) / min"),
"aveboutspeed_min": ("Time (min)", "Average bout speed (pixels / ms) / min"),
"aveboutvel_10min": ("Time (min)", "Average bout velocity (pixels / ms) / 10 min"),
"aveboutspeed_10min": ("Time (min)", "Average bout speed (pixels / ms) / 10 min"),
"aveboutcumdpix_10min": ("Time (min)", "Average bout cumulative dpix (pixels) / 10 min"),
"aveboutcumdpix_min": ("Time (min)", "Average bout cumulative dpix (pixels) / min"),
"aveinterboutinterval_min": ("Time (min)", "Average interbout (dist) interval (sec) / min"),
"aveinterboutinterval_10min": ("Time (min)", "Average interbout (dist) interval (sec) / 10 min"),
"dpixinterboutinterval_min": ("Time (min)", "Average interbout (dpix) interval (sec) / min"),
"dpixinterboutinterval_10min": ("Time (min)", "Average interbout (dpix) interval (sec) / 10 min"),
"_avebouttime_min": ("Time (min)", "Average bout (dist) time (ms) / min"),
"_avebouttime_10min": ("Time (min)", "Average bout (dist) time (ms) / 10 min"),
"dpixavebouttime_min": ("Time (min)", "Average bout (dpix) time (ms) / min"),
"dpixavebouttime_10min": ("Time (min)", "Average bout (dpix) time (ms) / 10 min"),
"_numberofbouts_min": ("Time (min)", "Number of bouts (dist) / min"),
"_numberofbouts_10min": ("Time (min)", "Number of bouts (dist) / 10 min"),
"dpixnumberofbouts_min": ("Time (min)", "Number of bouts (dpix) / min"),
"dpixnumberofbouts_10min": ("Time (min)", "Number of bouts (dpix) / 10 min"),
} # no longer need the min and 10 min because I'm switching them all to min, but not changing now
def find_labels(ribgraphname):
xlabel = ""
ylabel = ""
for l in labels.keys():
if l in ribgraphname:
xlabel = labels[l][0]
ylabel = labels[l][1]
return xlabel, ylabel
def box_plot(array1, array2, type, ylabel):
data = []
nparray1 = np.asarray(array1)
nparray2 = np.asarray(array2)
nparray1.flatten()
nparray2.flatten()
data.append(nparray1)
data.append(nparray2)
boxgraphname = "boxgraph_pd_" + "_" + type + ".png"
dictdata = {}
for l in range(0, len(data)):
if data[l].ndim > 1:
mu1 = np.nanmean(data[l], axis=1)
else:
mu1 = data[l]
dictdata[str(l)] = mu1
df = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in dictdata.iteritems() ]))
df.to_csv(boxgraphname + "_data", sep='\t')
plt.clf()
plt.cla()
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax1.set_ylabel(ylabel)
ax1.set_xlabel("Control, Test")
xticks = ["control", "test"]
ax1.set_xticklabels(xticks)
plot = df.boxplot(ax=ax1)
plt.savefig(boxgraphname, transparent=True)
plt.close()
def ribbon_plot(array1, array2, type, xlabel, ylabel, t = None):
ribgraphname = type + ".png"
fig = plt.figure()
ax1 = fig.add_subplot(121)
array1 = np.atleast_2d(array1)
array2 = np.atleast_2d(array2)
if t == None:
t = np.arange(np.shape(array1)[1])
if "fullboutdata_" in ribgraphname:
t = t * 3.5
if "_10min_" in ribgraphname:
t = t * 10
# more than 3 hours
if "combo" in ribgraphname:
# trying to change axis ticks to ms
t = t.astype(float)
t = t / 60.0
xlabel = "Time (hour)"
ax1.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
#print "t: ", t
if "histgraph" in ribgraphname:
# xlabel = "Bins"
xlabel = ylabel + " (binned)"
ylabel = "Probability"
mu1 = np.nanmean(array1, axis=0)
sigma1 = stats.sem(array1, axis=0, nan_policy='omit')
mu2 = np.nanmean(array2, axis=0)
sigma2 = stats.sem(array2, axis=0, nan_policy='omit')
# trying to change axis ticks to ms
# if "fullboutdata_" in ribgraphname:
# scale = 3.5
# ticks = ticker.FuncFormatter(lambda t, pos: '{0:g}'.format(x*scale))
# ax1.xaxis.set_major_formatter(ticks)
ax1.plot(t, mu1, lw=1, label = "mean wt", color = 'black')
ax1.plot(t, mu2, lw=1, label = "mean mut", color = 'red')
ax1.fill_between(t, mu1+sigma1, mu1-sigma1, facecolor='black', alpha=0.3)
ax1.fill_between(t, mu2+sigma2, mu2-sigma2, facecolor='red', alpha=0.3)
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
ax1.grid()
fig.savefig(ribgraphname, transparent=True)
plt.close()
def linear_model_array(ribgraphname, array1, array2):
fw = open(ribgraphname + "_newdata.csv", 'w')
fw.write("time,movement,id,mutornot\n")
# Used to have this check in the justlmm.py, but I don't think I need it now that I'm preprocessing
#if datawt.shape[0] > 5 and datamut.shape[0] > 5:
for n in range(0, array1.shape[0]):
t = 0
for d in array1[n,:]:
fw.write(str(t))
fw.write(",")
fw.write(str(d))
fw.write(",")
fw.write(str(n))
fw.write(",wt")
fw.write("\n")
t = t+1
for n2 in range(0, array2.shape[0]):
t2 = 0
for d2 in array2[n2,:]:
fw.write(str(t2))
fw.write(",")
fw.write(str(d2))
# just adding 100 to the id number, so that it's different from wt ids, since real ids are gone by now
fw.write(",")
fw.write(str(int(n2) + 100))
fw.write(",mut")
fw.write("\n")
t2 = t2+1
fw.close()
data = pd.read_csv(ribgraphname + "_newdata.csv")
data = data[data.movement.notnull()]
model = sm.MixedLM.from_formula("movement ~ mutornot + time + mutornot * time", data, groups=data["id"])
result = model.fit()
print ribgraphname
print result.summary()
def linear_model_re(ribgraphname, array1, array2):
data = pd.read_csv(ribgraphname + "_newdata.csv")
data = data[data.movement.notnull()]
model = sm.MixedLM.from_formula(formula = "movement ~ mutornot", re_formula="time", data=data, groups=data["id"])
result = model.fit()
print ribgraphname
print result.summary()
def calculate_peak(fullarray):
# THIS IS TO GET THE PEAK VALUE FOR THE FULLBOUTDATA PLOTS
maxlist = []
maxlistloc = []
for n in range (0, np.shape(fullarray)[0]):
maxtest = np.nanmax(fullarray[n,:])
maxlist.append(maxtest)
maxloc = np.where(fullarray[n,:]==maxtest)[0]
maxloc = maxloc * 3.5
if np.shape(maxloc)[0] > 0:
maxlistloc.append(maxloc[0])
maxarray = np.asarray(maxlist)
maxarrayloc = np.asarray(maxlistloc)
return maxarray, maxarrayloc
def anova(dataname, nparray1, nparray2):
if nparray1.ndim > 1:
H, pval = mstats.kruskalwallis(np.nanmean(nparray1, axis=1), np.nanmean(nparray2, axis=1))
print "anova: ", dataname, ': Mean of array wt, mut, H-stat, P-value: ', str(np.nanmean(np.nanmean(nparray1,axis=1))), str(np.nanmean(np.nanmean(nparray2,axis=1))), str(H), str(pval)
else:
H, pval = mstats.kruskalwallis(nparray1, nparray2)
print "anova: ", dataname, ': Mean of array wt, mut, H-stat, P-Value: ', str(np.nanmean(np.nanmean(nparray1))), str(np.nanmean(np.nanmean(nparray2))), str(H), str(pval)
def read_process_data(ribgraphname, newarray_dict, anov_dict):
skip = False
for skip in skip_list:
if skip in ribgraphname:
return
arraywt = np.loadtxt(ribgraphname + "_a1_data", delimiter = ',')
arraymut = np.loadtxt(ribgraphname + "_a2_data", delimiter=',')
if "histgraph" not in ribgraphname:
if "_min_" in ribgraphname or "_10min_" in ribgraphname:
if "combo" not in ribgraphname:
newarray_dict[ribgraphname] = (arraywt, arraymut)
anov_dict[ribgraphname] = (arraywt, arraymut)
elif "fullboutdata" in ribgraphname:
fullmaxpeakswt,fullmaxpeakslocwt = calculate_peak(arraywt)
fullmaxpeaksmut,fullmaxpeakslocmut = calculate_peak(arraymut)
#newarray_dict[ribgraphname] = (arraywt, arraymut)
ribbon_plot(arraywt, arraymut, ribgraphname.split('.')[0], find_labels(ribgraphname)[0], find_labels(ribgraphname)[1])
mlname = ribgraphname.replace("fullboutdata", "fullboutdatamaxloc")
mname = ribgraphname.replace("fullboutdata", "fullboutdatamax")
if "_darkflash" not in ribgraphname:
anov_dict[mname] = (fullmaxpeakswt, fullmaxpeaksmut)
anov_dict[mlname] = (fullmaxpeakslocwt, fullmaxpeakslocmut)
else:
#print "ones that are left: ", ribgraphname # this should be everything else
# avoiding all the ones where there is no data in the file (like velocity on slow dark flashes)
if np.shape(arraywt)[0] > 0:
if "_darkflash" in ribgraphname:
newarray_dict[ribgraphname] = (arraywt, arraymut)
else:
anov_dict[ribgraphname] = (arraywt, arraymut)
else:
# for the histgraphs, probably don't need any statistics, but want to make the plots
bincenters = np.loadtxt(ribgraphname + "_bincenters", delimiter = ',')
if arraywt.ndim <2 or arraymut.ndim < 2:
return
ribbon_plot(arraywt, arraymut, ribgraphname.split('.')[0], find_labels(ribgraphname)[0], find_labels(ribgraphname)[1], bincenters)
# no longer adding it, just making the graph right away here, since I don't need stats on it
#anov_dict[ribgraphname] = (arraywt, arraymut)
def ratiographs(anov_dict):
# This would fail if you were renaming the ratios something that was in doubledict . . .
for k in anov_dict.keys():
ksplit = k.split(".")[0].split("_")[-1]
#ribgraph_mean_ribbon_fullboutdatamaxloc_dist_nighttappre102.png
if "fullboutdata" not in k and "histgraph" not in k:
if ksplit in doubledict.keys():
# ksplit is the key, type is the value
# "nightprepulseinhibition100b":"nightprepulseinhibition102"
# want to divide the key by the value
type = doubledict[ksplit]
#newrname is value
newrname = k.replace(ksplit,type)
#print ksplit, k, newrname, anov_dict[k][0], anov_dict[newrname][0]
#divwt = np.divide(np.nanmean(anov_dict[newrname][0], axis=1),np.nanmean(anov_dict[k][0],axis=1))
divwt = np.divide(np.nanmean(anov_dict[k][0], axis=1),np.nanmean(anov_dict[newrname][0],axis=1))
#divmut = np.divide(np.nanmean(anov_dict[newrname][1], axis=1),np.nanmean(anov_dict[k][1],axis=1))
divmut = np.divide(np.nanmean(anov_dict[k][1], axis=1),np.nanmean(anov_dict[newrname][1],axis=1))
#newname = "ratio" + type + "_over_" + k.split(".")[0] + ".png"
newname = "ratio" + "_".join(k.split(".")[0].split("_")[:-1]) + "_" + ksplit + "_over_" + type + ".png"
anov_dict[newname] = (divwt,divmut)
def all():
#for file in glob.glob("ribgraph_mean_ribbonbout_numberofbouts_*_day2night*.png"):
timenewarray_dict = {} # Data I'm using with linear model, needed preprocessing, will use the coefficient (first value in the list) to figure out if + or -
anovnewarray_dict = {} # Data I'm just doing the anova on
for file in glob.glob("rib*_a1_data"):
file = file.split("_a1_data")[0]
try:
read_process_data(file, timenewarray_dict, anovnewarray_dict)
except:
continue
# setup the ratio sets
ratiographs(anovnewarray_dict)
print "Anova section: "
for k2 in anovnewarray_dict.keys():
if "histgraph" not in k2:
# try:
# anova(k2, anovnewarray_dict[k2][0], anovnewarray_dict[k2][1])
# except:
# print "anova failed: ", k2
box_plot(anovnewarray_dict[k2][0], anovnewarray_dict[k2][1], k2.split('.')[0], find_labels(k2)[1])
if "fullboutdatamax" not in k2 or "ratio" not in k2:
if anovnewarray_dict[k2][0].ndim <2 or anovnewarray_dict[k2][1].ndim < 2:
print k2.split('.')[0], " only has one dimension ", anovnewarray_dict[k2][0].ndim, anovnewarray_dict[k2][1].ndim
continue
ribbon_plot(anovnewarray_dict[k2][0], anovnewarray_dict[k2][1], k2.split('.')[0], find_labels(k2)[0], find_labels(k2)[1])
else:
if anovnewarray_dict[k2][0].ndim <2 or anovnewarray_dict[k2][1].ndim < 2:
print k2.split('.')[0], " only has one dimension ", anovnewarray_dict[k2][0].ndim, anovnewarray_dict[k2][1].ndim
continue
ribbon_plot(anovnewarray_dict[k2][0], anovnewarray_dict[k2][1], k2.split('.')[0], find_labels(k2)[0], find_labels(k2)[1])
print "Linear multiply model section: "
for k in timenewarray_dict.keys():
try:
#linear_model_array(k, timenewarray_dict[k][0], timenewarray_dict[k][1])
ribbon_plot(timenewarray_dict[k][0], timenewarray_dict[k][1], k.split('.')[0], find_labels(k)[0], find_labels(k)[1])
except:
print "linear model failed: ", k
# print "Linear retime model section: "
# The time as a random effect model doesn't do well with heatshock data or stimuli (things with single peak)
# for k in timenewarray_dict.keys():
# try:
# linear_model_re(k, timenewarray_dict[k][0], timenewarray_dict[k][1])
# except:
# print "linear model failed: ", k
all()
| mit |
wathen/PhD | MHD/FEniCS/BDMstokes/stokesPETSCnocorrect.py | 1 | 10199 | #!/usr/bin/python
import petsc4py
import slepc4py
import sys
petsc4py.init(sys.argv)
slepc4py.init(sys.argv)
from petsc4py import PETSc
from slepc4py import SLEPc
Print = PETSc.Sys.Print
# from MatrixOperations import *
from dolfin import *
import numpy as np
import matplotlib.pylab as plt
import os
import scipy.io
from PyTrilinos import Epetra, EpetraExt, AztecOO, ML, Amesos
from scipy2Trilinos import scipy_csr_matrix2CrsMatrix
import PETScIO as IO
m = 5
errL2u = np.zeros((m-1,1))
errL2p = np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Vdim = np.zeros((m-1,1))
Qdim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
nn = 2
dim = 2
Solving = 'Iterative'
ShowResultPlots = 'no'
ShowErrorPlots = 'no'
EigenProblem = 'no'
SavePrecond = 'no'
case = 1
parameters['linear_algebra_backend'] = 'uBLAS'
for xx in xrange(1,m):
print xx
nn = 2**(xx)
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn
mesh = RectangleMesh(-1, -1, 1, 1, nn, nn,'right')
parameters['reorder_dofs_serial'] = False
V = FunctionSpace(mesh, "BDM", 1)
Q = FunctionSpace(mesh, "DG", 0)
parameters['reorder_dofs_serial'] = False
W = V*Q
Vdim[xx-1] = V.dim()
Qdim[xx-1] = Q.dim()
Wdim[xx-1] = W.dim()
print "\n\nV: ",Vdim[xx-1],"Q: ",Qdim[xx-1],"W: ",Wdim[xx-1],"\n\n"
def boundary(x, on_boundary):
return on_boundary
if case == 1:
u0 = Expression(("20*x[0]*pow(x[1],3)","5*pow(x[0],4)-5*pow(x[1],4)"))
p0 = Expression("60*pow(x[0],2)*x[1]-20*pow(x[1],3)")
elif case == 2:
u0 = Expression(("sin(pi*x[1])","sin(pi*x[0])"))
p0 = Expression("sin(x[1]*x[0])")
elif case == 3:
u0 = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
p0 = Expression("sin(2*pi*x[0])*sin(2*pi*x[1]) ")
bc = DirichletBC(W.sub(0),u0, boundary)
bcs = [bc]
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
if case == 1:
f = Expression(("120*x[0]*x[1]*(1-mu)","60*(pow(x[0],2)-pow(x[1],2))*(1-mu)"), mu = 1e0)
elif case == 2:
f = Expression(("pi*pi*sin(pi*x[1])+x[1]*cos(x[1]*x[0])","pi*pi*sin(pi*x[0])+x[0]*cos(x[1]*x[0])"))
elif case == 3:
f = Expression(("8*pi*pi*cos(2*pi*x[1])*sin(2*pi*x[0]) + 2*pi*cos(2*pi*x[0])*sin(2*pi*x[1])","2*pi*cos(2*pi*x[0])*sin(2*pi*x[1]) - 8*pi*pi*cos(2*pi*x[0])*sin(2*pi*x[1])"))
N = FacetNormal(mesh)
h = CellSize(mesh)
h_avg =avg(h)
alpha = 10.0
gamma =10.0
n = FacetNormal(mesh)
h = CellSize(mesh)
h_avg =avg(h)
d = 0
a11 = inner(grad(v), grad(u))*dx \
- inner(avg(grad(v)), outer(u('+'),N('+'))+outer(u('-'),N('-')))*dS \
- inner(outer(v('+'),N('+'))+outer(v('-'),N('-')), avg(grad(u)))*dS \
+ alpha/h_avg*inner(outer(v('+'),N('+'))+outer(v('-'),N('-')),outer(u('+'),N('+'))+outer(u('-'),N('-')))*dS \
- inner(outer(v,N), grad(u))*ds \
- inner(grad(v), outer(u,N))*ds \
+ gamma/h*inner(v,u)*ds
a12 = div(v)*p*dx
a21 = div(u)*q*dx
L1 = inner(v,f)*dx + gamma/h*inner(u0,v)*ds - inner(grad(v),outer(u0,N))*ds
a = a11-a12-a21
i = p*q*dx
tic()
AA, bb = assemble_system(a, L1, bcs)
As = AA.sparray()
As.eliminate_zeros()
A = PETSc.Mat().createAIJ(size=As.shape,csr=(As.indptr, As.indices, As.data))
print toc()
b = bb.array()
zeros = 0*b
del bb
bb = IO.arrayToVec(b)
x = IO.arrayToVec(zeros)
PP, Pb = assemble_system(a11+i,L1,bcs)
Ps = PP.sparray()
Ps.eliminate_zeros()
P = PETSc.Mat().createAIJ(size=Ps.shape,csr=(Ps.indptr, Ps.indices, Ps.data))
if (SavePrecond == 'yes'):
Wstring = str(int(Wdim[xx-1][0]-1))
nameA ="".join(['eigenvalues/A',Wstring,".mat"])
scipy.io.savemat(nameA, mdict={'A': As},oned_as='row')
nameP ="".join(['eigenvalues/P',Wstring,".mat"])
scipy.io.savemat(nameP, mdict={'P': Ps},oned_as='row')
del AA, As, PP, Ps
if (EigenProblem == 'yes'):
eigenvalues = np.zeros((Wdim[xx-1][0]-1,1))
xr, tmp = A.getVecs()
xi, tmp = A.getVecs()
E = SLEPc.EPS().create()
E.setOperators(A,P)
E.setProblemType(SLEPc.EPS.ProblemType.GNHEP)
# E.setBalance()
E.setDimensions(Wdim[xx-1][0])
E.setTolerances(tol=1.e-15, max_it=500000)
E.solve()
Print("")
its = E.getIterationNumber()
Print("Number of iterations of the method: %i" % its)
sol_type = E.getType()
Print("Solution method: %s" % sol_type)
nev, ncv, mpd = E.getDimensions()
Print("Number of requested eigenvalues: %i" % nev)
tol, maxit = E.getTolerances()
Print("Stopping condition: tol=%.4g, maxit=%d" % (tol, maxit))
nconv = E.getConverged()
Print("Number of converged eigenpairs: %d" % nconv)
if nconv > 0:
Print("")
Print(" k ||Ax-kx||/||kx|| ")
Print("----------------- ------------------")
for i in range(nconv):
k = E.getEigenpair(i, xr, xi)
eigenvalues[i-1] = k.real
error = E.computeRelativeError(i)
if k.imag != 0.0:
Print(" %12f" % (k.real))
else:
Print(" %12f " % (k.real))
Print("")
Wstring = str(int(Wdim[xx-1][0]-1))
name ="".join(['eigenvalues/e',Wstring,".mat"])
scipy.io.savemat(name, mdict={'e': eigenvalues},oned_as='row')
if (Solving == 'Direct'):
ksp = PETSc.KSP().create()
ksp.setOperators(A)
ksp.setFromOptions()
print 'Solving with:', ksp.getType()
tic()
ksp.solve(bb, x)
SolTime[xx-1] = toc()
print "time to solve: ",SolTime[xx-1]
A.destroy()
if (Solving == 'Iterative'):
ksp = PETSc.KSP().create()
pc = PETSc.PC().create()
ksp.setFromOptions()
# ksp.create(PETSc.COMM_WORLD)
# use conjugate gradients
ksp.setTolerances(1e-10)
ksp.setType('minres')
pc = ksp.getPC()
pc.setOperators(P)
pc.getType()
# and next solve
ksp.setOperators(A,P)
tic()
ksp.solve(bb, x)
SolTime[xx-1] = toc()
print "time to solve: ",SolTime[xx-1]
iterations[xx-1] = ksp.its
print "iterations = ", iterations[xx-1]
if (Solving == 'Iterative' or Solving == 'Direct'):
if case == 1:
ue = Expression(("20*x[0]*pow(x[1],3)","5*pow(x[0],4)-5*pow(x[1],4)"))
pe = Expression("60*pow(x[0],2)*x[1]-20*pow(x[1],3)+5")
elif case == 2:
ue = Expression(("sin(pi*x[1])","sin(pi*x[0])"))
pe = Expression("sin(x[1]*x[0])")
elif case == 3:
ue = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
pe = Expression("sin(2*pi*x[0])*sin(2*pi*x[1]) ")
u = interpolate(ue,V)
p = interpolate(pe,Q)
Nv = u.vector().array().shape
X = IO.vecToArray(x)
x = X[0:Vdim[xx-1][0]]
# x = x_epetra[0:Nv[0]]
ua = Function(V)
ua.vector()[:] = x
udiv[xx-1] = assemble(div(ua)*dx)
pp = X[Nv[0]:]
pa = Function(Q)
pa.vector()[:] = pp
pend = assemble(pa*dx)
ones = Function(Q)
ones.vector()[:]=(0*pp+1)
pp = Function(Q)
pp.vector()[:] = pa.vector().array()- assemble(pa*dx)/assemble(ones*dx)
pInterp = interpolate(pe,Q)
pe = Function(Q)
pe.vector()[:] = pInterp.vector().array()
const = - assemble(pe*dx)/assemble(ones*dx)
pe.vector()[:] = pe.vector()[:]+const
errL2u[xx-1] = errornorm(ue,ua,norm_type="L2", degree_rise=6,mesh=mesh)
errL2p[xx-1] = errornorm(pe,pp,norm_type="L2", degree_rise=6,mesh=mesh)
if xx == 1:
l2uorder[xx-1] = 0
else:
l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
print errL2u[xx-1]
print errL2p[xx-1]
if (ShowErrorPlots == 'yes'):
plt.loglog(NN,errL2u)
plt.title('Error plot for CG2 elements - Velocity L2 convergence = %f' % np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1]))))
plt.xlabel('N')
plt.ylabel('L2 error')
plt.figure()
plt.loglog(NN,errL2p)
plt.title('Error plot for CG1 elements - Pressure L2 convergence = %f' % np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1]))))
plt.xlabel('N')
plt.ylabel('L2 error')
plt.show()
if (Solving == 'Iterative' or Solving == 'Direct'):
print "\n\n"
print " ==============================="
print " Results Table"
print " ===============================\n\n"
import pandas as pd
if (Solving == 'Iterative'):
tableTitles = ["Total DoF","V DoF","Q DoF","# iters","Soln Time","V-L2","V-order","||div u_h||","P-L2","P-order"]
tableValues = np.concatenate((Wdim,Vdim,Qdim,iterations,SolTime,errL2u,l2uorder,udiv,errL2p,l2porder),axis=1)
elif (Solving == 'Direct'):
tableTitles = ["Total DoF","V DoF","Q DoF","Soln Time","V-L2","V-order","||div u_h||","P-L2","P-order"]
tableValues = np.concatenate((Wdim,Vdim,Qdim,SolTime,errL2u,l2uorder,udiv,errL2p,l2porder),axis=1)
df = pd.DataFrame(tableValues, columns = tableTitles)
pd.set_printoptions(precision=3)
print df
print "\n\n"
print "Velocity Elements rate of convergence ", np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1])))
print "Pressure Elements rate of convergence ", np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1])))
print df.to_latex()
if (SavePrecond == 'yes'):
scipy.io.savemat('eigenvalues/Wdim.mat', {'Wdim':Wdim-1},)
if (ShowResultPlots == 'yes'):
plot(ua)
plot(interpolate(ue,V))
plot(pp)
plot(interpolate(pe,Q))
interactive()
| mit |
DucQuang1/BuildingMachineLearningSystemsWithPython | ch08/chapter.py | 21 | 6372 | import numpy as np # NOT IN BOOK
from matplotlib import pyplot as plt # NOT IN BOOK
def load():
import numpy as np
from scipy import sparse
data = np.loadtxt('data/ml-100k/u.data')
ij = data[:, :2]
ij -= 1 # original data is in 1-based system
values = data[:, 2]
reviews = sparse.csc_matrix((values, ij.T)).astype(float)
return reviews.toarray()
reviews = load()
U,M = np.where(reviews)
import random
test_idxs = np.array(random.sample(range(len(U)), len(U)//10))
train = reviews.copy()
train[U[test_idxs], M[test_idxs]] = 0
test = np.zeros_like(reviews)
test[U[test_idxs], M[test_idxs]] = reviews[U[test_idxs], M[test_idxs]]
class NormalizePositive(object):
def __init__(self, axis=0):
self.axis = axis
def fit(self, features, y=None):
if self.axis == 1:
features = features.T
# count features that are greater than zero in axis 0:
binary = (features > 0)
count0 = binary.sum(axis=0)
# to avoid division by zero, set zero counts to one:
count0[count0 == 0] = 1.
# computing the mean is easy:
self.mean = features.sum(axis=0)/count0
# only consider differences where binary is True:
diff = (features - self.mean) * binary
diff **= 2
# regularize the estimate of std by adding 0.1
self.std = np.sqrt(0.1 + diff.sum(axis=0)/count0)
return self
def transform(self, features):
if self.axis == 1:
features = features.T
binary = (features > 0)
features = features - self.mean
features /= self.std
features *= binary
if self.axis == 1:
features = features.T
return features
def inverse_transform(self, features, copy=True):
if copy:
features = features.copy()
if self.axis == 1:
features = features.T
features *= self.std
features += self.mean
if self.axis == 1:
features = features.T
return features
def fit_transform(self, features):
return self.fit(features).transform(features)
norm = NormalizePositive(axis=1)
binary = (train > 0)
train = norm.fit_transform(train)
# plot just 200x200 area for space reasons
plt.imshow(binary[:200, :200], interpolation='nearest')
from scipy.spatial import distance
# compute all pair-wise distances:
dists = distance.pdist(binary, 'correlation')
# Convert to square form, so that dists[i,j]
# is distance between binary[i] and binary[j]:
dists = distance.squareform(dists)
neighbors = dists.argsort(axis=1)
# We are going to fill this matrix with results
filled = train.copy()
for u in range(filled.shape[0]):
# n_u is neighbors of user
n_u = neighbors[u, 1:]
for m in range(filled.shape[1]):
# get relevant reviews in order!
revs = [train[neigh, m]
for neigh in n_u
if binary [neigh, m]]
if len(revs):
# n is the number of reviews for this movie
n = len(revs)
# take half of the reviews plus one into consideration:
n //= 2
n += 1
revs = revs[:n]
filled[u,m] = np.mean(revs)
predicted = norm.inverse_transform(filled)
from sklearn import metrics
r2 = metrics.r2_score(test[test > 0], predicted[test > 0])
print('R2 score (binary neighbors): {:.1%}'.format(r2))
reviews = reviews.T
# use same code as before
r2 = metrics.r2_score(test[test > 0], predicted[test > 0])
print('R2 score (binary movie neighbors): {:.1%}'.format(r2))
from sklearn.linear_model import ElasticNetCV # NOT IN BOOK
reg = ElasticNetCV(alphas=[
0.0125, 0.025, 0.05, .125, .25, .5, 1., 2., 4.])
filled = train.copy()
# iterate over all users:
for u in range(train.shape[0]):
curtrain = np.delete(train, u, axis=0)
bu = binary[u]
reg.fit(curtrain[:,bu].T, train[u, bu])
filled[u, ~bu] = reg.predict(curtrain[:,~bu].T)
predicted = norm.inverse_transform(filled)
r2 = metrics.r2_score(test[test > 0], predicted[test > 0])
print('R2 score (user regression): {:.1%}'.format(r2))
# SHOPPING BASKET ANALYSIS
# This is the slow version of the code, which will take a long time to
# complete.
from collections import defaultdict
from itertools import chain
# File is downloaded as a compressed file
import gzip
# file format is a line per transaction
# of the form '12 34 342 5...'
dataset = [[int(tok) for tok in line.strip().split()]
for line in gzip.open('data/retail.dat.gz')]
dataset = [set(d) for d in dataset]
# count how often each product was purchased:
counts = defaultdict(int)
for elem in chain(*dataset):
counts[elem] += 1
minsupport = 80
valid = set(k for k,v in counts.items() if (v >= minsupport))
itemsets = [frozenset([v]) for v in valid]
freqsets = []
for i in range(16):
nextsets = []
tested = set()
for it in itemsets:
for v in valid:
if v not in it:
# Create a new candidate set by adding v to it
c = (it | frozenset([v]))
# check If we have tested it already
if c in tested:
continue
tested.add(c)
# Count support by looping over dataset
# This step is slow.
# Check `apriori.py` for a better implementation.
support_c = sum(1 for d in dataset if d.issuperset(c))
if support_c > minsupport:
nextsets.append(c)
freqsets.extend(nextsets)
itemsets = nextsets
if not len(itemsets):
break
print("Finished!")
minlift = 5.0
nr_transactions = float(len(dataset))
for itemset in freqsets:
for item in itemset:
consequent = frozenset([item])
antecedent = itemset-consequent
base = 0.0
# acount: antecedent count
acount = 0.0
# ccount : consequent count
ccount = 0.0
for d in dataset:
if item in d: base += 1
if d.issuperset(itemset): ccount += 1
if d.issuperset(antecedent): acount += 1
base /= nr_transactions
p_y_given_x = ccount/acount
lift = p_y_given_x / base
if lift > minlift:
print('Rule {0} -> {1} has lift {2}'
.format(antecedent, consequent,lift))
| mit |
sympsi/sympsi | sympsi/state.py | 1 | 28587 | """Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import u
from sympy.printing.pretty.stringpict import prettyForm, stringPict
from sympsi.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u("\u27E8")
_rbracket_ucode = u("\u27E9")
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u("\u2758")
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u('\u2571'), u('\u2572'), u('\u2502')
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in set([_lbracket, _lbracket_ucode]):
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in set([_rbracket, _rbracket_ucode]):
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in set([_straight_bracket, _straight_bracket_ucode]):
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympsi.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympsi.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympsi.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympsi.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympsi.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympsi import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympsi.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympsi import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympsi.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympsi import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympsi.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympsi import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympsi.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympsi.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympsi.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympsi.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympsi.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympsi.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympsi.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympsi.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympsi.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/pylab_examples/finance_work2.py | 3 | 6269 | import datetime
import numpy as np
import matplotlib.colors as colors
import matplotlib.finance as finance
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
startdate = datetime.date(2006,1,1)
today = enddate = datetime.date.today()
ticker = 'SPY'
fh = finance.fetch_historical_yahoo(ticker, startdate, enddate)
# a numpy record array with fields: date, open, high, low, close, volume, adj_close)
r = mlab.csv2rec(fh); fh.close()
r.sort()
def moving_average(x, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
x = np.asarray(x)
if type=='simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a
def relative_strength(prices, n=14):
"""
compute the n period relative strength indicator
http://stockcharts.com/school/doku.php?id=chart_school:glossary_r#relativestrengthindex
http://www.investopedia.com/terms/r/rsi.asp
"""
deltas = np.diff(prices)
seed = deltas[:n+1]
up = seed[seed>=0].sum()/n
down = -seed[seed<0].sum()/n
rs = up/down
rsi = np.zeros_like(prices)
rsi[:n] = 100. - 100./(1.+rs)
for i in range(n, len(prices)):
delta = deltas[i-1] # cause the diff is 1 shorter
if delta>0:
upval = delta
downval = 0.
else:
upval = 0.
downval = -delta
up = (up*(n-1) + upval)/n
down = (down*(n-1) + downval)/n
rs = up/down
rsi[i] = 100. - 100./(1.+rs)
return rsi
def moving_average_convergence(x, nslow=26, nfast=12):
"""
compute the MACD (Moving Average Convergence/Divergence) using a fast and slow exponential moving avg'
return value is emaslow, emafast, macd which are len(x) arrays
"""
emaslow = moving_average(x, nslow, type='exponential')
emafast = moving_average(x, nfast, type='exponential')
return emaslow, emafast, emafast - emaslow
plt.rc('axes', grid=True)
plt.rc('grid', color='0.75', linestyle='-', linewidth=0.5)
textsize = 9
left, width = 0.1, 0.8
rect1 = [left, 0.7, width, 0.2]
rect2 = [left, 0.3, width, 0.4]
rect3 = [left, 0.1, width, 0.2]
fig = plt.figure(facecolor='white')
axescolor = '#f6f6f6' # the axies background color
ax1 = fig.add_axes(rect1, axisbg=axescolor) #left, bottom, width, height
ax2 = fig.add_axes(rect2, axisbg=axescolor, sharex=ax1)
ax2t = ax2.twinx()
ax3 = fig.add_axes(rect3, axisbg=axescolor, sharex=ax1)
### plot the relative strength indicator
prices = r.adj_close
rsi = relative_strength(prices)
fillcolor = 'darkgoldenrod'
ax1.plot(r.date, rsi, color=fillcolor)
ax1.axhline(70, color=fillcolor)
ax1.axhline(30, color=fillcolor)
ax1.fill_between(r.date, rsi, 70, where=(rsi>=70), facecolor=fillcolor, edgecolor=fillcolor)
ax1.fill_between(r.date, rsi, 30, where=(rsi<=30), facecolor=fillcolor, edgecolor=fillcolor)
ax1.text(0.6, 0.9, '>70 = overbought', va='top', transform=ax1.transAxes, fontsize=textsize)
ax1.text(0.6, 0.1, '<30 = oversold', transform=ax1.transAxes, fontsize=textsize)
ax1.set_ylim(0, 100)
ax1.set_yticks([30,70])
ax1.text(0.025, 0.95, 'RSI (14)', va='top', transform=ax1.transAxes, fontsize=textsize)
ax1.set_title('%s daily'%ticker)
### plot the price and volume data
dx = r.adj_close - r.close
low = r.low + dx
high = r.high + dx
deltas = np.zeros_like(prices)
deltas[1:] = np.diff(prices)
up = deltas>0
ax2.vlines(r.date[up], low[up], high[up], color='black', label='_nolegend_')
ax2.vlines(r.date[~up], low[~up], high[~up], color='black', label='_nolegend_')
ma20 = moving_average(prices, 20, type='simple')
ma200 = moving_average(prices, 200, type='simple')
linema20, = ax2.plot(r.date, ma20, color='blue', lw=2, label='MA (20)')
linema200, = ax2.plot(r.date, ma200, color='red', lw=2, label='MA (200)')
last = r[-1]
s = '%s O:%1.2f H:%1.2f L:%1.2f C:%1.2f, V:%1.1fM Chg:%+1.2f' % (
today.strftime('%d-%b-%Y'),
last.open, last.high,
last.low, last.close,
last.volume*1e-6,
last.close-last.open )
t4 = ax2.text(0.3, 0.9, s, transform=ax2.transAxes, fontsize=textsize)
props = font_manager.FontProperties(size=10)
leg = ax2.legend(loc='center left', shadow=True, fancybox=True, prop=props)
leg.get_frame().set_alpha(0.5)
volume = (r.close*r.volume)/1e6 # dollar volume in millions
vmax = volume.max()
poly = ax2t.fill_between(r.date, volume, 0, label='Volume', facecolor=fillcolor, edgecolor=fillcolor)
ax2t.set_ylim(0, 5*vmax)
ax2t.set_yticks([])
### compute the MACD indicator
fillcolor = 'darkslategrey'
nslow = 26
nfast = 12
nema = 9
emaslow, emafast, macd = moving_average_convergence(prices, nslow=nslow, nfast=nfast)
ema9 = moving_average(macd, nema, type='exponential')
ax3.plot(r.date, macd, color='black', lw=2)
ax3.plot(r.date, ema9, color='blue', lw=1)
ax3.fill_between(r.date, macd-ema9, 0, alpha=0.5, facecolor=fillcolor, edgecolor=fillcolor)
ax3.text(0.025, 0.95, 'MACD (%d, %d, %d)'%(nfast, nslow, nema), va='top',
transform=ax3.transAxes, fontsize=textsize)
#ax3.set_yticks([])
# turn off upper axis tick labels, rotate the lower ones, etc
for ax in ax1, ax2, ax2t, ax3:
if ax!=ax3:
for label in ax.get_xticklabels():
label.set_visible(False)
else:
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_horizontalalignment('right')
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
class MyLocator(mticker.MaxNLocator):
def __init__(self, *args, **kwargs):
mticker.MaxNLocator.__init__(self, *args, **kwargs)
def __call__(self, *args, **kwargs):
return mticker.MaxNLocator.__call__(self, *args, **kwargs)
# at most 5 ticks, pruning the upper and lower so they don't overlap
# with other ticks
#ax2.yaxis.set_major_locator(mticker.MaxNLocator(5, prune='both'))
#ax3.yaxis.set_major_locator(mticker.MaxNLocator(5, prune='both'))
ax2.yaxis.set_major_locator(MyLocator(5, prune='both'))
ax3.yaxis.set_major_locator(MyLocator(5, prune='both'))
plt.show()
| gpl-2.0 |
jakobworldpeace/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 66 | 8261 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
def test_singular_values():
# Check that the TruncatedSVD output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
apca = TruncatedSVD(n_components=2, algorithm='arpack',
random_state=rng).fit(X)
rpca = TruncatedSVD(n_components=2, algorithm='arpack',
random_state=rng).fit(X)
assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 12)
# Compare to the Frobenius norm
X_apca = apca.transform(X)
X_rpca = rpca.transform(X)
assert_array_almost_equal(np.sum(apca.singular_values_**2.0),
np.linalg.norm(X_apca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(rpca.singular_values_**2.0),
np.linalg.norm(X_rpca, "fro")**2.0, 12)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(apca.singular_values_,
np.sqrt(np.sum(X_apca**2.0, axis=0)), 12)
assert_array_almost_equal(rpca.singular_values_,
np.sqrt(np.sum(X_rpca**2.0, axis=0)), 12)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = rng.randn(n_samples, n_features)
apca = TruncatedSVD(n_components=3, algorithm='arpack',
random_state=rng)
rpca = TruncatedSVD(n_components=3, algorithm='randomized',
random_state=rng)
X_apca = apca.fit_transform(X)
X_rpca = rpca.fit_transform(X)
X_apca /= np.sqrt(np.sum(X_apca**2.0, axis=0))
X_rpca /= np.sqrt(np.sum(X_rpca**2.0, axis=0))
X_apca[:, 0] *= 3.142
X_apca[:, 1] *= 2.718
X_rpca[:, 0] *= 3.142
X_rpca[:, 1] *= 2.718
X_hat_apca = np.dot(X_apca, apca.components_)
X_hat_rpca = np.dot(X_rpca, rpca.components_)
apca.fit(X_hat_apca)
rpca.fit(X_hat_rpca)
assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
| bsd-3-clause |
chiffa/numpy | numpy/lib/twodim_base.py | 1 | 26904 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print(H[::-1]) # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause |
anirudhjayaraman/scikit-learn | sklearn/svm/tests/test_sparse.py | 70 | 12992 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
kevincwright/quagmire | display.py | 1 | 5359 | # -*- coding: utf-8 -*-
# pybec v0.1.0 Copyright © 2012
# Kevin C. Wright
# Licensed under the terms of the GNU Public License v 3.0 (see LICENSE.txt)
# pylint: disable-msg=C0103
from __future__ import print_function, division
import matplotlib.pyplot as plt
from numpy import array, nan, hstack, vstack
from pybec.exceptions import PyBECError
class DisplayError(PyBECError):
"""Exception for display functions"""
pass
def display(datastruct, minval = 0, maxval = 6, titleID = True, fignum = None \
, imwrap = 5, colormap = None):
""" Function for displaying the OD image(s) in pybec data structures"""
fig = plt.figure(fignum)
ax = fig.add_subplot(111, xticks = [], yticks = [])
if datastruct.__class__.__name__ == 'Series':
imdat = _build_series_mosaic(datastruct, imwrap)
title = datastruct.series_id
if title == None:
titleID = False
elif datastruct.__class__.__name__ == 'Cycle':
imdat = _build_cycle_imstrip(datastruct)
title = datastruct.cycle_id
elif datastruct.__class__.__name__ == 'Frame':
imdat = datastruct.OD
title = "Frame"
elif datastruct.__class__.__name__ == 'Cloud':
imdat = datastruct.get_OD()
title = "Cloud"
elif datastruct.__class__.__name__ in ['array', 'ndarray']:
imdat = datastruct
titleID = False
else:
raise DisplayError('Invalid Data Structure %s' % str(datastruct))
ax.imshow(imdat, interpolation = 'nearest', vmin = minval, vmax = maxval \
, cmap = colormap)
if titleID:
ax.set_title(title)
plt.show()
return ax
def _build_cycle_imstrip(cycle):
""" Takes the processed image frames in a cycle, crops them to the region
of interest, and combines them into a vertical (default) or
horizontal stack. Used by display() and save()."""
useimages = []
# defaults to using the ROI of the last frame in the cycle
roi = cycle.frames[-1].roi
height, width = roi.size
# stack the images vertically unless they're taller than they are wide
if height > 1.1 * width:
stack_direction = 'horizontal'
spacer = nanarray(height, 3)
else:
stack_direction = 'vertical'
spacer = nanarray(3, width)
for frame in cycle.frames:
# skip any frames that should not be included in the strip
if frame.proc_info['use_frame'] != 1:
continue
# get the ROI information for the frame
height, width = frame.roi.size
# put spacers (nan arrays) between frames in the plots
if len(useimages) > 0:
useimages.append( spacer )
# append the cropped image data for the frame to useimages
useimages.append(frame.OD[frame.roi.slices])
if stack_direction == 'vertical':
imstrip = vstack(useimages)
elif stack_direction == 'horizontal':
imstrip = hstack(useimages)
return imstrip
def _build_series_mosaic(series, imwrap = 5):
""" Takes the processed images from frames in a set of cycles, crops them
to the region of interest, and combines them into a mosaic of verticallly
(default) or horizontally stacked images. Used by display()."""
cycle_images = []
for cycle in series.cycles:
cycle_images.append(_build_cycle_imstrip(cycle))
num_cycles = len(cycle_images)
if num_cycles == 1:
return cycle_images[0]
elif num_cycles > 1:
rows = int((num_cycles-1) // imwrap + 1)
rem = num_cycles % imwrap
if rows == 1:
return hstack(cycle_images)
elif rows > 1:
rowarrs = []
for row in range(rows):
rowarrs.append(cycle_images[row*imwrap:(row+1)*imwrap])
if rem != 0:
h,w = cycle_images[0].shape
rowarrs[-1].extend([nanarray(h,w)]*(imwrap-rem))
rowdat = [hstack(row) for row in rowarrs]
return vstack(rowdat)
else:
raise DisplayError('failed to buid image mosaic')
else:
raise DisplayError('failed to buid image mosaic')
def _build_mosaic(image_list, imwrap = 5):
""" Takes the arrays in a list and combines them into a mosaic of
vertically (default) or horizontally stacked images. """
num_cycles = len(image_list)
if num_cycles == 1:
return image_list[0]
elif num_cycles > 1:
rows = int((num_cycles-1) // imwrap + 1)
rem = num_cycles % imwrap
if rows == 1:
return hstack(image_list)
elif rows > 1:
rowarrs = []
for row in range(rows):
rowarrs.append(image_list[row*imwrap:(row+1)*imwrap])
if rem != 0:
h,w = image_list[0].shape
rowarrs[-1].extend([nanarray(h,w)]*(imwrap-rem))
rowdat = [hstack(row) for row in rowarrs]
return vstack(rowdat)
else:
raise DisplayError('failed to buid image mosaic')
else:
raise DisplayError('failed to buid image mosaic')
def nanarray(height, width):
return array([[nan]*width]*height)
| gpl-3.0 |
marcoantoniooliveira/labweb | oscar/lib/python2.7/site-packages/IPython/kernel/zmq/pylab/backend_inline.py | 2 | 8288 | """Produce SVG versions of active plots for display by the rich Qt frontend.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Third-party imports
import matplotlib
from matplotlib.backends.backend_agg import new_figure_manager, FigureCanvasAgg
from matplotlib._pylab_helpers import Gcf
# Local imports.
from IPython.config.configurable import SingletonConfigurable
from IPython.core.display import display
from IPython.core.displaypub import publish_display_data
from IPython.core.pylabtools import print_figure, select_figure_format
from IPython.utils.traitlets import Dict, Instance, CaselessStrEnum, Bool
from IPython.utils.warn import warn
#-----------------------------------------------------------------------------
# Configurable for inline backend options
#-----------------------------------------------------------------------------
# inherit from InlineBackendConfig for deprecation purposes
class InlineBackendConfig(SingletonConfigurable):
pass
class InlineBackend(InlineBackendConfig):
"""An object to store configuration of the inline backend."""
def _config_changed(self, name, old, new):
# warn on change of renamed config section
if new.InlineBackendConfig != old.InlineBackendConfig:
warn("InlineBackendConfig has been renamed to InlineBackend")
super(InlineBackend, self)._config_changed(name, old, new)
# The typical default figure size is too large for inline use,
# so we shrink the figure size to 6x4, and tweak fonts to
# make that fit.
rc = Dict({'figure.figsize': (6.0,4.0),
# play nicely with white background in the Qt and notebook frontend
'figure.facecolor': 'white',
'figure.edgecolor': 'white',
# 12pt labels get cutoff on 6x4 logplots, so use 10pt.
'font.size': 10,
# 72 dpi matches SVG/qtconsole
# this only affects PNG export, as SVG has no dpi setting
'savefig.dpi': 72,
# 10pt still needs a little more room on the xlabel:
'figure.subplot.bottom' : .125
}, config=True,
help="""Subset of matplotlib rcParams that should be different for the
inline backend."""
)
figure_format = CaselessStrEnum(['svg', 'png', 'retina'], default_value='png', config=True,
help="The image format for figures with the inline backend.")
def _figure_format_changed(self, name, old, new):
if self.shell is None:
return
else:
select_figure_format(self.shell, new)
close_figures = Bool(True, config=True,
help="""Close all figures at the end of each cell.
When True, ensures that each cell starts with no active figures, but it
also means that one must keep track of references in order to edit or
redraw figures in subsequent cells. This mode is ideal for the notebook,
where residual plots from other cells might be surprising.
When False, one must call figure() to create new figures. This means
that gcf() and getfigs() can reference figures created in other cells,
and the active figure can continue to be edited with pylab/pyplot
methods that reference the current active figure. This mode facilitates
iterative editing of figures, and behaves most consistently with
other matplotlib backends, but figure barriers between cells must
be explicit.
""")
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def show(close=None):
"""Show all figures as SVG/PNG payloads sent to the IPython clients.
Parameters
----------
close : bool, optional
If true, a ``plt.close('all')`` call is automatically issued after
sending all the figures. If this is set, the figures will entirely
removed from the internal list of figures.
"""
if close is None:
close = InlineBackend.instance().close_figures
try:
for figure_manager in Gcf.get_all_fig_managers():
display(figure_manager.canvas.figure)
finally:
show._to_draw = []
if close:
matplotlib.pyplot.close('all')
# This flag will be reset by draw_if_interactive when called
show._draw_called = False
# list of figures to draw when flush_figures is called
show._to_draw = []
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
# signal that the current active figure should be sent at the end of
# execution. Also sets the _draw_called flag, signaling that there will be
# something to send. At the end of the code execution, a separate call to
# flush_figures() will act upon these values
manager = Gcf.get_active()
if manager is None:
return
fig = manager.canvas.figure
# Hack: matplotlib FigureManager objects in interacive backends (at least
# in some of them) monkeypatch the figure object and add a .show() method
# to it. This applies the same monkeypatch in order to support user code
# that might expect `.show()` to be part of the official API of figure
# objects.
# For further reference:
# https://github.com/ipython/ipython/issues/1612
# https://github.com/matplotlib/matplotlib/issues/835
if not hasattr(fig, 'show'):
# Queue up `fig` for display
fig.show = lambda *a: display(fig)
# If matplotlib was manually set to non-interactive mode, this function
# should be a no-op (otherwise we'll generate duplicate plots, since a user
# who set ioff() manually expects to make separate draw/show calls).
if not matplotlib.is_interactive():
return
# ensure current figure will be drawn, and each subsequent call
# of draw_if_interactive() moves the active figure to ensure it is
# drawn last
try:
show._to_draw.remove(fig)
except ValueError:
# ensure it only appears in the draw list once
pass
# Queue up the figure for drawing in next show() call
show._to_draw.append(fig)
show._draw_called = True
def flush_figures():
"""Send all figures that changed
This is meant to be called automatically and will call show() if, during
prior code execution, there had been any calls to draw_if_interactive.
This function is meant to be used as a post_execute callback in IPython,
so user-caused errors are handled with showtraceback() instead of being
allowed to raise. If this function is not called from within IPython,
then these exceptions will raise.
"""
if not show._draw_called:
return
if InlineBackend.instance().close_figures:
# ignore the tracking, just draw and close all figures
try:
return show(True)
except Exception as e:
# safely show traceback if in IPython, else raise
try:
get_ipython
except NameError:
raise e
else:
get_ipython().showtraceback()
return
try:
# exclude any figures that were closed:
active = set([fm.canvas.figure for fm in Gcf.get_all_fig_managers()])
for fig in [ fig for fig in show._to_draw if fig in active ]:
try:
display(fig)
except Exception as e:
# safely show traceback if in IPython, else raise
try:
get_ipython
except NameError:
raise e
else:
get_ipython().showtraceback()
break
finally:
# clear flags for next round
show._to_draw = []
show._draw_called = False
# Changes to matplotlib in version 1.2 requires a mpl backend to supply a default
# figurecanvas. This is set here to a Agg canvas
# See https://github.com/matplotlib/matplotlib/pull/1125
FigureCanvas = FigureCanvasAgg
| bsd-3-clause |
jbedorf/tensorflow | tensorflow/contrib/distributions/python/ops/mixture_same_family.py | 18 | 15014 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The same-family Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util as distribution_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class MixtureSameFamily(distribution.Distribution):
"""Mixture (same-family) distribution.
The `MixtureSameFamily` distribution implements a (batch of) mixture
distribution where all components are from different parameterizations of the
same distribution type. It is parameterized by a `Categorical` "selecting
distribution" (over `k` components) and a components distribution, i.e., a
`Distribution` with a rightmost batch shape (equal to `[k]`) which indexes
each (batch of) component.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
### Create a mixture of two scalar Gaussians:
gm = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
probs=[0.3, 0.7]),
components_distribution=tfd.Normal(
loc=[-1., 1], # One for each component.
scale=[0.1, 0.5])) # And same here.
gm.mean()
# ==> 0.4
gm.variance()
# ==> 1.018
# Plot PDF.
x = np.linspace(-2., 3., int(1e4), dtype=np.float32)
import matplotlib.pyplot as plt
plt.plot(x, gm.prob(x).eval());
### Create a mixture of two Bivariate Gaussians:
gm = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
probs=[0.3, 0.7]),
components_distribution=tfd.MultivariateNormalDiag(
loc=[[-1., 1], # component 1
[1, -1]], # component 2
scale_identity_multiplier=[.3, .6]))
gm.mean()
# ==> array([ 0.4, -0.4], dtype=float32)
gm.covariance()
# ==> array([[ 1.119, -0.84],
# [-0.84, 1.119]], dtype=float32)
# Plot PDF contours.
def meshgrid(x, y=x):
[gx, gy] = np.meshgrid(x, y, indexing='ij')
gx, gy = np.float32(gx), np.float32(gy)
grid = np.concatenate([gx.ravel()[None, :], gy.ravel()[None, :]], axis=0)
return grid.T.reshape(x.size, y.size, 2)
grid = meshgrid(np.linspace(-2, 2, 100, dtype=np.float32))
plt.contour(grid[..., 0], grid[..., 1], gm.prob(grid).eval());
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
mixture_distribution,
components_distribution,
validate_args=False,
allow_nan_stats=True,
name="MixtureSameFamily"):
"""Construct a `MixtureSameFamily` distribution.
Args:
mixture_distribution: `tfp.distributions.Categorical`-like instance.
Manages the probability of selecting components. The number of
categories must match the rightmost batch dimension of the
`components_distribution`. Must have either scalar `batch_shape` or
`batch_shape` matching `components_distribution.batch_shape[:-1]`.
components_distribution: `tfp.distributions.Distribution`-like instance.
Right-most batch dimension indexes components.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: `if not mixture_distribution.dtype.is_integer`.
ValueError: if mixture_distribution does not have scalar `event_shape`.
ValueError: if `mixture_distribution.batch_shape` and
`components_distribution.batch_shape[:-1]` are both fully defined and
the former is neither scalar nor equal to the latter.
ValueError: if `mixture_distribution` categories does not equal
`components_distribution` rightmost batch shape.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
self._mixture_distribution = mixture_distribution
self._components_distribution = components_distribution
self._runtime_assertions = []
s = components_distribution.event_shape_tensor()
s_dim0 = tensor_shape.dimension_value(s.shape[0])
self._event_ndims = (s_dim0
if s_dim0 is not None
else array_ops.shape(s)[0])
if not mixture_distribution.dtype.is_integer:
raise ValueError(
"`mixture_distribution.dtype` ({}) is not over integers".format(
mixture_distribution.dtype.name))
if (mixture_distribution.event_shape.ndims is not None
and mixture_distribution.event_shape.ndims != 0):
raise ValueError("`mixture_distribution` must have scalar `event_dim`s")
elif validate_args:
self._runtime_assertions += [
control_flow_ops.assert_has_rank(
mixture_distribution.event_shape_tensor(), 0,
message="`mixture_distribution` must have scalar `event_dim`s"),
]
mdbs = mixture_distribution.batch_shape
cdbs = components_distribution.batch_shape.with_rank_at_least(1)[:-1]
if mdbs.is_fully_defined() and cdbs.is_fully_defined():
if mdbs.ndims != 0 and mdbs != cdbs:
raise ValueError(
"`mixture_distribution.batch_shape` (`{}`) is not "
"compatible with `components_distribution.batch_shape` "
"(`{}`)".format(mdbs.as_list(), cdbs.as_list()))
elif validate_args:
mdbs = mixture_distribution.batch_shape_tensor()
cdbs = components_distribution.batch_shape_tensor()[:-1]
self._runtime_assertions += [
control_flow_ops.assert_equal(
distribution_util.pick_vector(
mixture_distribution.is_scalar_batch(), cdbs, mdbs),
cdbs,
message=(
"`mixture_distribution.batch_shape` is not "
"compatible with `components_distribution.batch_shape`"))]
km = tensor_shape.dimension_value(
mixture_distribution.logits.shape.with_rank_at_least(1)[-1])
kc = tensor_shape.dimension_value(
components_distribution.batch_shape.with_rank_at_least(1)[-1])
if km is not None and kc is not None and km != kc:
raise ValueError("`mixture_distribution components` ({}) does not "
"equal `components_distribution.batch_shape[-1]` "
"({})".format(km, kc))
elif validate_args:
km = array_ops.shape(mixture_distribution.logits)[-1]
kc = components_distribution.batch_shape_tensor()[-1]
self._runtime_assertions += [
control_flow_ops.assert_equal(
km, kc,
message=("`mixture_distribution components` does not equal "
"`components_distribution.batch_shape[-1:]`")),
]
elif km is None:
km = array_ops.shape(mixture_distribution.logits)[-1]
self._num_components = km
super(MixtureSameFamily, self).__init__(
dtype=self._components_distribution.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
self._mixture_distribution._graph_parents # pylint: disable=protected-access
+ self._components_distribution._graph_parents), # pylint: disable=protected-access
name=name)
@property
def mixture_distribution(self):
return self._mixture_distribution
@property
def components_distribution(self):
return self._components_distribution
def _batch_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return self.components_distribution.batch_shape_tensor()[:-1]
def _batch_shape(self):
return self.components_distribution.batch_shape.with_rank_at_least(1)[:-1]
def _event_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return self.components_distribution.event_shape_tensor()
def _event_shape(self):
return self.components_distribution.event_shape
def _sample_n(self, n, seed):
with ops.control_dependencies(self._runtime_assertions):
x = self.components_distribution.sample(n) # [n, B, k, E]
# TODO(jvdillon): Consider using tf.gather (by way of index unrolling).
npdt = x.dtype.as_numpy_dtype
mask = array_ops.one_hot(
indices=self.mixture_distribution.sample(n), # [n, B]
depth=self._num_components, # == k
on_value=np.ones([], dtype=npdt),
off_value=np.zeros([], dtype=npdt)) # [n, B, k]
mask = distribution_utils.pad_mixture_dimensions(
mask, self, self.mixture_distribution,
self._event_shape().ndims) # [n, B, k, [1]*e]
return math_ops.reduce_sum(
x * mask, axis=-1 - self._event_ndims) # [n, B, E]
def _log_prob(self, x):
with ops.control_dependencies(self._runtime_assertions):
x = self._pad_sample_dims(x)
log_prob_x = self.components_distribution.log_prob(x) # [S, B, k]
log_mix_prob = nn_ops.log_softmax(
self.mixture_distribution.logits, axis=-1) # [B, k]
return math_ops.reduce_logsumexp(
log_prob_x + log_mix_prob, axis=-1) # [S, B]
def _mean(self):
with ops.control_dependencies(self._runtime_assertions):
probs = distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.probs, self, self.mixture_distribution,
self._event_shape().ndims) # [B, k, [1]*e]
return math_ops.reduce_sum(
probs * self.components_distribution.mean(),
axis=-1 - self._event_ndims) # [B, E]
def _log_cdf(self, x):
x = self._pad_sample_dims(x)
log_cdf_x = self.components_distribution.log_cdf(x) # [S, B, k]
log_mix_prob = nn_ops.log_softmax(
self.mixture_distribution.logits, axis=-1) # [B, k]
return math_ops.reduce_logsumexp(
log_cdf_x + log_mix_prob, axis=-1) # [S, B]
def _variance(self):
with ops.control_dependencies(self._runtime_assertions):
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
probs = distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.probs, self, self.mixture_distribution,
self._event_shape().ndims) # [B, k, [1]*e]
mean_cond_var = math_ops.reduce_sum(
probs * self.components_distribution.variance(),
axis=-1 - self._event_ndims) # [B, E]
var_cond_mean = math_ops.reduce_sum(
probs * math_ops.squared_difference(
self.components_distribution.mean(),
self._pad_sample_dims(self._mean())),
axis=-1 - self._event_ndims) # [B, E]
return mean_cond_var + var_cond_mean # [B, E]
def _covariance(self):
static_event_ndims = self.event_shape.ndims
if static_event_ndims != 1:
# Covariance is defined only for vector distributions.
raise NotImplementedError("covariance is not implemented")
with ops.control_dependencies(self._runtime_assertions):
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
probs = distribution_utils.pad_mixture_dimensions(
distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.probs, self, self.mixture_distribution,
self._event_shape().ndims),
self, self.mixture_distribution,
self._event_shape().ndims) # [B, k, 1, 1]
mean_cond_var = math_ops.reduce_sum(
probs * self.components_distribution.covariance(),
axis=-3) # [B, e, e]
var_cond_mean = math_ops.reduce_sum(
probs * _outer_squared_difference(
self.components_distribution.mean(),
self._pad_sample_dims(self._mean())),
axis=-3) # [B, e, e]
return mean_cond_var + var_cond_mean # [B, e, e]
def _pad_sample_dims(self, x):
with ops.name_scope("pad_sample_dims", values=[x]):
ndims = x.shape.ndims if x.shape.ndims is not None else array_ops.rank(x)
shape = array_ops.shape(x)
d = ndims - self._event_ndims
x = array_ops.reshape(x, shape=array_ops.concat([
shape[:d], [1], shape[d:]], axis=0))
return x
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _outer_squared_difference(x, y):
"""Convenience function analogous to tf.squared_difference."""
z = x - y
return z[..., array_ops.newaxis, :] * z[..., array_ops.newaxis]
| apache-2.0 |
jkarnows/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 44 | 34602 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = np.log10(self.thetaL) \
+ self.random_state.rand(self.theta0.size).reshape(
self.theta0.shape) * np.log10(self.thetaU
/ self.thetaL)
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
fionapigott/Data-Science-45min-Intros | adaboost-101/sample_code.py | 20 | 5548 | '''
Created on Nov 28, 2010
Adaboost is short for Adaptive Boosting
@author: Peter
'''
from numpy import *
def loadSimpData():
datMat = matrix([[ 1. , 2.1],
[ 2. , 1.1],
[ 1.3, 1. ],
[ 1. , 1. ],
[ 2. , 1. ]])
classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
return datMat,classLabels
def loadDataSet(fileName): #general function to parse tab -delimited floats
numFeat = len(open(fileName).readline().split('\t')) #get number of fields
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr =[]
curLine = line.strip().split('\t')
for i in range(numFeat-1):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat,labelMat
def stumpClassify(dataMatrix,dimen,threshVal,threshIneq):#just classify the data
retArray = ones((shape(dataMatrix)[0],1))
if threshIneq == 'lt':
retArray[dataMatrix[:,dimen] <= threshVal] = -1.0
else:
retArray[dataMatrix[:,dimen] > threshVal] = -1.0
return retArray
def buildStump(dataArr,classLabels,D):
dataMatrix = mat(dataArr); labelMat = mat(classLabels).T
m,n = shape(dataMatrix)
numSteps = 10.0; bestStump = {}; bestClasEst = mat(zeros((m,1)))
minError = inf #init error sum, to +infinity
for i in range(n):#loop over all dimensions
rangeMin = dataMatrix[:,i].min(); rangeMax = dataMatrix[:,i].max();
stepSize = (rangeMax-rangeMin)/numSteps
for j in range(-1,int(numSteps)+1):#loop over all range in current dimension
for inequal in ['lt', 'gt']: #go over less than and greater than
threshVal = (rangeMin + float(j) * stepSize)
predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal)#call stump classify with i, j, lessThan
errArr = mat(ones((m,1)))
errArr[predictedVals == labelMat] = 0
weightedError = D.T*errArr #calc total error multiplied by D
#print "split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (i, threshVal, inequal, weightedError)
if weightedError < minError:
minError = weightedError
bestClasEst = predictedVals.copy()
bestStump['dim'] = i
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump,minError,bestClasEst
def adaBoostTrainDS(dataArr,classLabels,numIt=40):
weakClassArr = []
m = shape(dataArr)[0]
D = mat(ones((m,1))/m) #init D to all equal
aggClassEst = mat(zeros((m,1)))
for i in range(numIt):
bestStump,error,classEst = buildStump(dataArr,classLabels,D)#build Stump
#print "D:",D.T
alpha = float(0.5*log((1.0-error)/max(error,1e-16)))#calc alpha, throw in max(error,eps) to account for error=0
bestStump['alpha'] = alpha
weakClassArr.append(bestStump) #store Stump Params in Array
#print "classEst: ",classEst.T
expon = multiply(-1*alpha*mat(classLabels).T,classEst) #exponent for D calc, getting messy
D = multiply(D,exp(expon)) #Calc New D for next iteration
D = D/D.sum()
#calc training error of all classifiers, if this is 0 quit for loop early (use break)
aggClassEst += alpha*classEst
#print "aggClassEst: ",aggClassEst.T
aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T,ones((m,1)))
errorRate = aggErrors.sum()/m
print "total error: ",errorRate
if errorRate == 0.0: break
return weakClassArr,aggClassEst
def adaClassify(datToClass,classifierArr):
dataMatrix = mat(datToClass)#do stuff similar to last aggClassEst in adaBoostTrainDS
m = shape(dataMatrix)[0]
aggClassEst = mat(zeros((m,1)))
for i in range(len(classifierArr)):
classEst = stumpClassify(dataMatrix,classifierArr[i]['dim'],\
classifierArr[i]['thresh'],\
classifierArr[i]['ineq'])#call stump classify
aggClassEst += classifierArr[i]['alpha']*classEst
print aggClassEst
return sign(aggClassEst)
def plotROC(predStrengths, classLabels):
import matplotlib.pyplot as plt
cur = (1.0,1.0) #cursor
ySum = 0.0 #variable to calculate AUC
numPosClas = sum(array(classLabels)==1.0)
yStep = 1/float(numPosClas); xStep = 1/float(len(classLabels)-numPosClas)
sortedIndicies = predStrengths.argsort()#get sorted index, it's reverse
fig = plt.figure()
fig.clf()
ax = plt.subplot(111)
#loop through all the values, drawing a line segment at each point
for index in sortedIndicies.tolist()[0]:
if classLabels[index] == 1.0:
delX = 0; delY = yStep;
else:
delX = xStep; delY = 0;
ySum += cur[1]
#draw line from cur to (cur[0]-delX,cur[1]-delY)
ax.plot([cur[0],cur[0]-delX],[cur[1],cur[1]-delY], c='b')
cur = (cur[0]-delX,cur[1]-delY)
ax.plot([0,1],[0,1],'b--')
plt.xlabel('False positive rate'); plt.ylabel('True positive rate')
plt.title('ROC curve for AdaBoost horse colic detection system')
ax.axis([0,1,0,1])
plt.show()
print "the Area Under the Curve is: ",ySum*xStep
| unlicense |
PatrickChrist/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
pviechnicki/taskExplorer | predict_diff_in_hours.py | 1 | 7436 | import logging
import glob
import seaborn as sns # for easy on the eyes defaults
import pandas as pd
import ipdb
import numpy as np
from treeinterpreter import treeinterpreter as ti
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn.ensemble import ExtraTreesRegressor
from sklearn import linear_model # seems to hang and not work?
from sklearn.linear_model.stochastic_gradient import SGDRegressor
import warnings
from sklearn.metrics import make_scorer
from sklearn.metrics import r2_score
#note: user should have most up to date numpy, scipy and sklearn
# to avoid BLAS/LaPack errors when using linear_model.LinearRegression()
# quick train/test script for testing on various task data sets against various algorithms
# broken up into Functions, Variables and a driver loop (for loop over classifier, data sets)
# Run initalization
# ignore warning, see: https://github.com/scipy/scipy/issues/5998
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
# Functions
def make_cv(test_size, random_seed, n_splits=2):
cv = ShuffleSplit(n_splits=n_splits,
test_size=test_size,
random_state=random_seed)
return cv
def cross_validate_me(clf,
test_size,
design_matrix_name,
design_matrix,
random_seed,
predicator_variables,
response_variable):
print("\n\t-----------------------------")
print("\t File: {}".format(design_matrix_name))
print("\t Learner Type: ", clf)
print("\t-----------------------------")
scores = cross_val_score(clf,
design_matrix[predicator_variables],
np.ravel(design_matrix[response_variable].values),
cv=make_cv(test_size, random_seed),
n_jobs=2)
print("%d-fold Cross Validation Accuracy: %0.2f (+/- %0.2f)" % (len(scores), scores.mean(), scores.std() * 2))
# same as accuracy
#scores = cross_val_score(clf,
# design_matrix[predicator_variables],
# np.ravel(design_matrix[response_variable].values),
# cv=make_cv(test_size, random_seed),
# n_jobs=2,
# scoring=make_scorer(r2_score))
#print("(%d-fold average) R^2: %0.2f (+/- %0.2f)" % (len(scores), scores.mean(), scores.std() * 2))
#print("Predictor Variables Used: (response variable {})".format(response_variable[0]),
# "\n\t",
# "\n\t".join(predicator_variables))
print("\t-----------------------------")
# Variables
random_seed = 0
test_size = 0.10
year_span_range = 10 # predict on year_span = 1..year_span_range
orig = {'filename':"./design_matrix/design_matrix_task_model_bing.csv",
'predicator_variables':['Data Value1', 'social_index', 'pm_index',
'relevance', 'importance', 'job_zone',
'log_bing'],
'response_variable' : ["difference in hours"],
'sep':'\t'}
orig = {'filename':"./design_matrix/design_matrix_task_model_bing.csv",
'predicator_variables':['Data Value1', 'social_index', 'pm_index',
'relevance', 'importance', 'job_zone',
'log_bing'],
'response_variable' : ["difference in hours"],
'sep':'\t'}
feb5 = {'filename':"task_model_data.feb5.bsv",
'predicator_variables':["importance", "relevance", "task_person_hours1",
"year_span", "pm_job", "social_job",
"social_job", "normalized_job_zone", "pmjob_x_jobzone",
"social_x_jobzone"],
'response_variable' : ["difference_in_hours"],
'sep':"|"}
feb18 = {'filename':"task_model_training_data.feb18.bsv",
'predicator_variables':["importance", "relevance", "normalized_job_zone",
"year_span", "social_job", "creative_job",
"pm_job", "automation_index"],
'response_variable' : ["difference_in_hours"],
'sep':'|'}
predictme = {'filename':"task_forecast_full_data.bsv",
'predicator_variables':["importance", "relevance", "normalized_job_zone",
"year_span", "social_job", "creative_job",
"social_job", "creative_job",
"pm_job", "automation_index"],
'response_variable' : ["difference_in_hours"],
'sep':'|'}
#dataset_info = [feb18, feb5, feb18, orig]
dataset_info = [orig]
dataset_predict = [] # [predictme]
# Driver loop (runs data, classifers over another, measuring accuracy, etc)
for info in dataset_info: # cross validate set of regressors over each dataset
filename = info['filename']
predicator_variables = info['predicator_variables']
response_variable = info['response_variable']
sep = info['sep']
design_matrix = pd.read_csv(filename, sep=sep)[predicator_variables + response_variable]
design_matrix.drop_duplicates(inplace=True)
print("\t(dropped duplicates)")
regressors = [\
RandomForestRegressor(n_estimators=10,
random_state=random_seed,
criterion="mse",
max_depth=None,
oob_score=True,
n_jobs=4)]
for regressor in regressors:
cross_validate_me(regressor,
test_size = test_size,
design_matrix = design_matrix,
design_matrix_name = filename,
random_seed = random_seed,
predicator_variables = predicator_variables,
response_variable = response_variable)
# Output predictions, accuracy should be somethign like what we see in
# cross validate
print("\n training on entire ", filename)
trained = regressor.fit(design_matrix[predicator_variables],
np.ravel(design_matrix[response_variable].values))
for predict in dataset_predict:
filename = predict['filename']
predicator_variables = predict['predicator_variables']
response_variable = predict['response_variable']
sep = predict['sep']
print("\n predicting on ", filename)
predict_matrix = pd.read_csv(filename, sep=sep)[predicator_variables]
predict_matrix[response_variable[0]] = trained.predict(predict_matrix[predicator_variables])
predict_matrix.to_csv(filename+".predict", sep=sep)
for year_span in range(1, year_span_range):
print(".. with year_span ", year_span)
predict_matrix['year_span'] = year_span
predict_matrix[response_variable[0]] = trained.predict(
predict_matrix[predicator_variables + ['year_span']])
predict_matrix.to_csv(filename+"."+str(year_span) +".predict", sep=sep)
| gpl-3.0 |
jakirkham/runipy | runipy/notebook_runner.py | 4 | 8406 | from __future__ import print_function
try:
# python 2
from Queue import Empty
except ImportError:
# python 3
from queue import Empty
import platform
from time import sleep
import json
import logging
import os
import warnings
with warnings.catch_warnings():
try:
from IPython.utils.shimmodule import ShimWarning
warnings.filterwarnings('error', '', ShimWarning)
except ImportError:
class ShimWarning(Warning):
"""Warning issued by IPython 4.x regarding deprecated API."""
pass
try:
# IPython 3
from IPython.kernel import KernelManager
from IPython.nbformat import NotebookNode
except ShimWarning:
# IPython 4
from nbformat import NotebookNode
from jupyter_client import KernelManager
except ImportError:
# IPython 2
from IPython.kernel import KernelManager
from IPython.nbformat.current import NotebookNode
finally:
warnings.resetwarnings()
import IPython
class NotebookError(Exception):
pass
class NotebookRunner(object):
# The kernel communicates with mime-types while the notebook
# uses short labels for different cell types. We'll use this to
# map from kernel types to notebook format types.
MIME_MAP = {
'image/jpeg': 'jpeg',
'image/png': 'png',
'text/plain': 'text',
'text/html': 'html',
'text/latex': 'latex',
'application/javascript': 'html',
'application/json': 'json',
'image/svg+xml': 'svg',
}
def __init__(
self,
nb,
pylab=False,
mpl_inline=False,
profile_dir=None,
working_dir=None):
self.km = KernelManager()
args = []
if pylab:
args.append('--pylab=inline')
logging.warn(
'--pylab is deprecated and will be removed in a future version'
)
elif mpl_inline:
args.append('--matplotlib=inline')
logging.warn(
'--matplotlib is deprecated and' +
' will be removed in a future version'
)
if profile_dir:
args.append('--profile-dir=%s' % os.path.abspath(profile_dir))
cwd = os.getcwd()
if working_dir:
os.chdir(working_dir)
self.km.start_kernel(extra_arguments=args)
os.chdir(cwd)
if platform.system() == 'Darwin':
# There is sometimes a race condition where the first
# execute command hits the kernel before it's ready.
# It appears to happen only on Darwin (Mac OS) and an
# easy (but clumsy) way to mitigate it is to sleep
# for a second.
sleep(1)
self.kc = self.km.client()
self.kc.start_channels()
try:
self.kc.wait_for_ready()
except AttributeError:
# IPython < 3
self._wait_for_ready_backport()
self.nb = nb
def shutdown_kernel(self):
logging.info('Shutdown kernel')
self.kc.stop_channels()
self.km.shutdown_kernel(now=True)
def _wait_for_ready_backport(self):
# Backport BlockingKernelClient.wait_for_ready from IPython 3.
# Wait for kernel info reply on shell channel.
self.kc.kernel_info()
while True:
msg = self.kc.get_shell_msg(block=True, timeout=30)
if msg['msg_type'] == 'kernel_info_reply':
break
# Flush IOPub channel
while True:
try:
msg = self.kc.get_iopub_msg(block=True, timeout=0.2)
except Empty:
break
def run_cell(self, cell):
"""Run a notebook cell and update the output of that cell in-place."""
logging.info('Running cell:\n%s\n', cell.input)
self.kc.execute(cell.input)
reply = self.kc.get_shell_msg()
status = reply['content']['status']
traceback_text = ''
if status == 'error':
traceback_text = 'Cell raised uncaught exception: \n' + \
'\n'.join(reply['content']['traceback'])
logging.info(traceback_text)
else:
logging.info('Cell returned')
outs = list()
while True:
try:
msg = self.kc.get_iopub_msg(timeout=1)
if msg['msg_type'] == 'status':
if msg['content']['execution_state'] == 'idle':
break
except Empty:
# execution state should return to idle
# before the queue becomes empty,
# if it doesn't, something bad has happened
raise
content = msg['content']
msg_type = msg['msg_type']
# IPython 3.0.0-dev writes pyerr/pyout in the notebook format
# but uses error/execute_result in the message spec. This does the
# translation needed for tests to pass with IPython 3.0.0-dev
notebook3_format_conversions = {
'error': 'pyerr',
'execute_result': 'pyout'
}
msg_type = notebook3_format_conversions.get(msg_type, msg_type)
out = NotebookNode(output_type=msg_type)
if 'execution_count' in content:
cell['prompt_number'] = content['execution_count']
out.prompt_number = content['execution_count']
if msg_type in ('status', 'pyin', 'execute_input'):
continue
elif msg_type == 'stream':
out.stream = content['name']
# in msgspec 5, this is name, text
# in msgspec 4, this is name, data
if 'text' in content:
out.text = content['text']
else:
out.text = content['data']
elif msg_type in ('display_data', 'pyout'):
for mime, data in content['data'].items():
try:
attr = self.MIME_MAP[mime]
except KeyError:
raise NotImplementedError(
'unhandled mime type: %s' % mime
)
# In notebook version <= 3 JSON data is stored as a string
# Evaluation of IPython2's JSON gives strings directly
# Therefore do not encode for IPython versions prior to 3
json_encode = (
IPython.version_info[0] >= 3 and
mime == "application/json")
data_out = data if not json_encode else json.dumps(data)
setattr(out, attr, data_out)
elif msg_type == 'pyerr':
out.ename = content['ename']
out.evalue = content['evalue']
out.traceback = content['traceback']
elif msg_type == 'clear_output':
outs = list()
continue
else:
raise NotImplementedError(
'unhandled iopub message: %s' % msg_type
)
outs.append(out)
cell['outputs'] = outs
if status == 'error':
raise NotebookError(traceback_text)
def iter_code_cells(self):
"""Iterate over the notebook cells containing code."""
for ws in self.nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
yield cell
def run_notebook(self, skip_exceptions=False, progress_callback=None):
"""
Run all the notebook cells in order and update the outputs in-place.
If ``skip_exceptions`` is set, then if exceptions occur in a cell, the
subsequent cells are run (by default, the notebook execution stops).
"""
for i, cell in enumerate(self.iter_code_cells()):
try:
self.run_cell(cell)
except NotebookError:
if not skip_exceptions:
raise
if progress_callback:
progress_callback(i)
def count_code_cells(self):
"""Return the number of code cells in the notebook."""
return sum(1 for _ in self.iter_code_cells())
| bsd-2-clause |
franalli/CS221 | SVM.py | 1 | 1193 | import glob
import os
import numpy as np
import collections
import matplotlib.pyplot as plt
from matplotlib.pyplot import specgram
from IPython import embed
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
train = np.loadtxt('/home/franalli/Documents/UrbanSound8K/train')
y_train = np.loadtxt('/home/franalli/Documents/UrbanSound8K/y_train')
val = np.loadtxt('/home/franalli/Documents/UrbanSound8K/val')
y_val = np.loadtxt('/home/franalli/Documents/UrbanSound8K/y_val')
predictions = []
clf = svm.SVC(C=1000.0, cache_size=8000, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=2, gamma=1e-6, kernel='rbf',
max_iter=-1, probability=False, random_state=0, shrinking=True,
tol=0.001, verbose=False)
clf.fit(train,y_train)
val_predictions = clf.predict(val)
train_predictions = clf.predict(train)
val_acc = np.mean(val_predictions == y_val)
train_acc = np.mean(train_predictions == y_train)
print(confusion_matrix(val_predictions,y_val))
print 'train acc:{}'.format(train_acc)
print 'val acc:{}'.format(val_acc)
embed() | bsd-3-clause |
jwiggins/scikit-image | doc/examples/segmentation/plot_threshold_adaptive.py | 22 | 1307 | """
=====================
Adaptive Thresholding
=====================
Thresholding is the simplest way to segment objects from a background. If that
background is relatively uniform, then you can use a global threshold value to
binarize the image by pixel-intensity. If there's large variation in the
background intensity, however, adaptive thresholding (a.k.a. local or dynamic
thresholding) may produce better results.
Here, we binarize an image using the `threshold_adaptive` function, which
calculates thresholds in regions of size `block_size` surrounding each pixel
(i.e. local neighborhoods). Each threshold value is the weighted mean of the
local neighborhood minus an offset value.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.filters import threshold_otsu, threshold_adaptive
image = data.page()
global_thresh = threshold_otsu(image)
binary_global = image > global_thresh
block_size = 40
binary_adaptive = threshold_adaptive(image, block_size, offset=10)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(image)
ax0.set_title('Image')
ax1.imshow(binary_global)
ax1.set_title('Global thresholding')
ax2.imshow(binary_adaptive)
ax2.set_title('Adaptive thresholding')
for ax in axes:
ax.axis('off')
plt.show()
| bsd-3-clause |
mtrbean/scipy | scipy/signal/fir_filter_design.py | 25 | 20184 | """Functions for FIR filter design."""
from __future__ import division, print_function, absolute_import
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
from . import sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez']
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
numtaps : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta : float
The beta parameter for the kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.kaiser(numtaps, beta, sym=0)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None, optional
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values, optional
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool, optional
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool, optional
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float, optional
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See also
--------
scipy.signal.firwin2
Examples
--------
Low-pass from 0 to f::
>>> from scipy import signal
>>> signal.firwin(numtaps, f)
Use a specific window function::
>>> signal.firwin(numtaps, f, window='nuttall')
High-pass ('stop' from 0 to f)::
>>> signal.firwin(numtaps, f, pass_zero=False)
Band-pass::
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
Band-stop::
>>> signal.firwin(numtaps, [f1, f2])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1])::
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
Multi-band (passbands are [f1, f2] and [f3,f4])::
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0,
antisymmetric=False):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float, optional
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
antisymmetric : bool, optional
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the "
"Nyquist rate.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero "
"and Nyquist rates.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> from scipy import signal
>>> bpass = signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/zh/manifold/plot_manifold_sphere.py | 89 | 5055 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <https://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
ax.view_init(40, -10)
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| gpl-3.0 |
YerevaNN/mimic3-benchmarks | mimic3benchmark/subject.py | 1 | 2993 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import os
import pandas as pd
from mimic3benchmark.util import dataframe_from_csv
def read_stays(subject_path):
stays = dataframe_from_csv(os.path.join(subject_path, 'stays.csv'), index_col=None)
stays.INTIME = pd.to_datetime(stays.INTIME)
stays.OUTTIME = pd.to_datetime(stays.OUTTIME)
stays.DOB = pd.to_datetime(stays.DOB)
stays.DOD = pd.to_datetime(stays.DOD)
stays.DEATHTIME = pd.to_datetime(stays.DEATHTIME)
stays.sort_values(by=['INTIME', 'OUTTIME'], inplace=True)
return stays
def read_diagnoses(subject_path):
return dataframe_from_csv(os.path.join(subject_path, 'diagnoses.csv'), index_col=None)
def read_events(subject_path, remove_null=True):
events = dataframe_from_csv(os.path.join(subject_path, 'events.csv'), index_col=None)
if remove_null:
events = events[events.VALUE.notnull()]
events.CHARTTIME = pd.to_datetime(events.CHARTTIME)
events.HADM_ID = events.HADM_ID.fillna(value=-1).astype(int)
events.ICUSTAY_ID = events.ICUSTAY_ID.fillna(value=-1).astype(int)
events.VALUEUOM = events.VALUEUOM.fillna('').astype(str)
# events.sort_values(by=['CHARTTIME', 'ITEMID', 'ICUSTAY_ID'], inplace=True)
return events
def get_events_for_stay(events, icustayid, intime=None, outtime=None):
idx = (events.ICUSTAY_ID == icustayid)
if intime is not None and outtime is not None:
idx = idx | ((events.CHARTTIME >= intime) & (events.CHARTTIME <= outtime))
events = events[idx]
del events['ICUSTAY_ID']
return events
def add_hours_elpased_to_events(events, dt, remove_charttime=True):
events = events.copy()
events['HOURS'] = (events.CHARTTIME - dt).apply(lambda s: s / np.timedelta64(1, 's')) / 60./60
if remove_charttime:
del events['CHARTTIME']
return events
def convert_events_to_timeseries(events, variable_column='VARIABLE', variables=[]):
metadata = events[['CHARTTIME', 'ICUSTAY_ID']].sort_values(by=['CHARTTIME', 'ICUSTAY_ID'])\
.drop_duplicates(keep='first').set_index('CHARTTIME')
timeseries = events[['CHARTTIME', variable_column, 'VALUE']]\
.sort_values(by=['CHARTTIME', variable_column, 'VALUE'], axis=0)\
.drop_duplicates(subset=['CHARTTIME', variable_column], keep='last')
timeseries = timeseries.pivot(index='CHARTTIME', columns=variable_column, values='VALUE')\
.merge(metadata, left_index=True, right_index=True)\
.sort_index(axis=0).reset_index()
for v in variables:
if v not in timeseries:
timeseries[v] = np.nan
return timeseries
def get_first_valid_from_timeseries(timeseries, variable):
if variable in timeseries:
idx = timeseries[variable].notnull()
if idx.any():
loc = np.where(idx)[0][0]
return timeseries[variable].iloc[loc]
return np.nan
| mit |
lordkman/burnman | contrib/CHRU2014/paper_opt_pv.py | 4 | 10666 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
paper_opt_pv
------------
This script reproduces :cite:`Cottaar2014`, Figure 6.
Vary the amount perovskite vs. ferropericlase and compute the error in the
seismic data against PREM.
requires:
- creating minerals
- compute seismic velocities
- geotherms
- seismic models
- seismic comparison
teaches:
- compare errors between models
- loops over models
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../../burnman'):
sys.path.insert(1, os.path.abspath('../..'))
import burnman
from burnman import minerals
from misc.helper_solid_solution import *
import misc.colors as colors
if __name__ == "__main__":
# figsize=(6,5)
plt.figure(dpi=100, figsize=(12, 10))
prop = {'size': 12}
plt.rc('text', usetex=True)
plt.rcParams['text.latex.preamble'] = r'\usepackage{relsize}'
plt.rc('font', family='sans-serif')
figsize = (6, 5)
dashstyle2 = (7, 3)
dashstyle3 = (3, 2)
# input variables ###
#
# INPUT for method
""" choose 'slb2' (finite-strain 2nd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'slb3 (finite-strain 3rd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'mgd3' (mie-gruneisen-debeye 3rd order shear modulus,
matas et al. 2007)
or 'mgd2' (mie-gruneisen-debeye 2nd order shear modulus,
matas et al. 2007)
or 'bm2' (birch-murnaghan 2nd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))
or 'bm3' (birch-murnaghan 3rd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))"""
method = 'slb3'
seismic_model = burnman.seismic.PREM()
# pick from .prem() .slow() .fast()
# (see burnman/seismic.py)
number_of_points = 20 # set on how many depth slices the computations should be done
depths = np.linspace(850e3, 2700e3, number_of_points)
seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate(
['pressure', 'density', 'v_p', 'v_s', 'v_phi'], depths)
print(seis_p[0], seis_p[-1])
def eval_material(amount_perovskite):
rock = burnman.Composite([SLB_2011_ZSB_2013_mg_fe_perovskite(0.07),
other_ferropericlase(0.2)],
[amount_perovskite, 1.0 - amount_perovskite])
rock.set_method(method)
temperature = burnman.geotherm.adiabatic(seis_p, 1900, rock)
print("Calculations are done for:")
rock.debug_print()
mat_rho, mat_vs, mat_vphi = rock.evaluate(
['rho', 'v_s', 'v_phi'], seis_p, temperature)
#[rho_err,vphi_err,vs_err]=burnman.compare_chifactor(mat_vs,mat_vphi,mat_rho,seis_vs,seis_vphi,seis_rho)
return seis_p, mat_vs, mat_vphi, mat_rho
def material_error(x):
_, mat_vs, mat_vphi, mat_rho = eval_material(x)
[vs_err, vphi_err, rho_err] = burnman.compare_l2(depths,
[mat_vs, mat_vphi,
mat_rho],
[seis_vs, seis_vphi, seis_rho])
scale = 2700e3 - 850e3
return vs_err / scale, vphi_err / scale
xx = np.linspace(0.0, 1.0, 200) # 200 for final image
# speed up computation for the automatic tests:
if "RUNNING_TESTS" in globals():
xx = np.linspace(0.0, 1.0, 10)
errs = np.array([material_error(x) for x in xx])
yy_vs = errs[:, 0]
yy_vphi = errs[:, 1]
vs_average_prem = sum(seis_vs) / len(seis_vs)
vphi_average_prem = sum(seis_vphi) / len(seis_vphi)
print(vs_average_prem, vphi_average_prem)
yy_vs /= vs_average_prem
yy_vphi /= vphi_average_prem
yy_sum = (yy_vs + yy_vphi) # we scale by a factor so it fits in the plot
# plt.figure(dpi=100,figsize=figsize)
plt.subplot(2, 2, 1)
plt.plot(xx * 100, yy_vs, "-", color=colors.color(1),
label=("$V_s$ error"), linewidth=1.5, dashes=dashstyle2)
plt.plot(xx * 100, yy_vphi, "-", color=colors.color(3),
label=("$V_\phi$ error"), linewidth=1.5)
# plt.plot (xx*100,yy_vs+yy_vphi,"g--",label=("sum"),linewidth=1.5)
plt.plot(xx * 100, yy_sum, "-", color=colors.color(4),
label=("weighted sum"), linewidth=1.5, dashes=dashstyle3)
ymin = 1e-2
ymax = 1e2
plt.ylim([ymin, ymax])
print(xx[np.argmin(yy_vs)], xx[np.argmin(yy_vphi)], xx[np.argmin(yy_sum)])
B = np.around(xx[np.argmin(yy_vs)], decimals=3)
A = np.around(xx[np.argmin(yy_vphi)], decimals=3)
C = np.around(xx[np.argmin(yy_sum)], decimals=3)
plt.plot([A * 100., A * 100.], [ymin, ymax], color=colors.color(3),
label='A (%g\%% pv)' % (A * 100), linewidth=1.5, linestyle='-')
plt.plot([B * 100., B * 100.], [ymin, ymax], color=colors.color(1),
label='B (%g\%% pv)' % (B * 100), linewidth=1.5, dashes=dashstyle2)
plt.plot([C * 100., C * 100.], [ymin, ymax], color=colors.color(4),
label='C (%g\%% pv)' % (C * 100), linewidth=1.5, dashes=dashstyle3)
plt.yscale('log')
plt.xlabel('\% Perovskite')
plt.ylabel('Error')
plt.legend(loc='lower left', prop=prop)
# plt.tight_layout(pad=2)
# plt.savefig("opt_pv_1.pdf",bbox_inches='tight')
# plt.show()
A_p, A_vs, A_vphi, _ = eval_material(A)
B_p, B_vs, B_vphi, _ = eval_material(B)
C_p, C_vs, C_vphi, _ = eval_material(C)
# plt.figure(dpi=100,figsize=figsize)
plt.subplot(2, 2, 3)
plt.plot(seis_p / 1.e9, seis_vs / 1.e3, color='k', linestyle='-',
linewidth=2.0, markersize=6, markerfacecolor='None', label='PREM')
plt.plot(A_p / 1.e9, A_vs / 1.e3, color=colors.color(3), linestyle='-',
label='A (%g\%% pv)' % (A * 100), linewidth=1.5, markevery=5, marker='v', markerfacecolor='None', mew=1.5, markeredgecolor=colors.color(3))
plt.plot(B_p / 1.e9, B_vs / 1.e3, color=colors.color(1), dashes=dashstyle2,
label='B (%g\%% pv)' % (B * 100), linewidth=1.5, markevery=5, marker='v', markerfacecolor='None', mew=1.5, markeredgecolor=colors.color(1))
plt.plot(C_p / 1.e9, C_vs / 1.e3, color=colors.color(4), dashes=dashstyle3,
label='C (%g\%% pv)' % (C * 100), linewidth=1.5, markevery=5, marker='v', markerfacecolor='None', mew=1.5, markeredgecolor=colors.color(4))
plt.xlabel('Pressure (GPa)')
plt.ylabel(
'Shear velocity $V_{\mathlarger{\mathlarger{\mathlarger{s}}}}$ (km/s)')
plt.xlim([30, 130])
plt.legend(loc='lower right', prop=prop)
# plt.tight_layout()
# plt.savefig("opt_pv_2.pdf",bbox_inches='tight')
# plt.show()
plt.subplot(2, 2, 4)
# plt.figure(dpi=100,figsize=figsize)
plt.plot(seis_p / 1.e9, seis_vphi / 1.e3, color='k', linestyle='-',
linewidth=2.0, markersize=6, markerfacecolor='None', label='PREM', mew=1.5)
plt.plot(A_p / 1.e9, A_vphi / 1.e3, color=colors.color(3), linestyle='-',
markevery=5, marker='s', markersize=5, markeredgecolor=colors.color(3), markerfacecolor='None', mew=1.5, label='A (%g\%% pv)' % (A * 100), linewidth=1.5)
plt.plot(
B_p / 1.e9, B_vphi / 1.e3, color=colors.color(1), dashes=dashstyle2,
markevery=5, marker='s', markersize=5, markeredgecolor=colors.color(1), markerfacecolor='None', mew=1.5, label='B (%g\%% pv)' % (B * 100), linewidth=1.5)
plt.plot(
C_p / 1.e9, C_vphi / 1.e3, color=colors.color(4), dashes=dashstyle3,
markevery=5, marker='s', markersize=5, markeredgecolor=colors.color(4), markerfacecolor='None', mew=1.5, label='C (%g\%% pv)' % (C * 100), linewidth=1.5)
plt.xlabel('Pressure (GPa)')
plt.ylabel(
"Bulk sound velocity $V_{\mathlarger{\mathlarger{\mathlarger{\phi}}}}$ (km/s)")
plt.xlim([30, 130])
plt.legend(loc='lower right', prop=prop)
# plt.tight_layout()
# plt.savefig("opt_pv_3.pdf",bbox_inches='tight')
# plt.show()
# plot percent differences
# plt.figure(dpi=100,figsize=figsize)
plt.subplot(2, 2, 2)
plt.plot(seis_p / 1.e9, seis_vs * 0.0,
color='k', linestyle='-', linewidth=2.0)
plt.plot(seis_p / 1.e9, (A_vs - seis_vs) / seis_vs * 100.0, color=colors.color(3), label='$V_s$: A (%g\%% pv)' %
(A * 100), linewidth=1.5, linestyle='-', markevery=5, marker='v', markerfacecolor='None', mew=1.5, markeredgecolor=colors.color(3))
plt.plot(seis_p / 1.e9, (B_vs - seis_vs) / seis_vs * 100.0, color=colors.color(1), label='$V_s$: B (%g\%% pv)' %
(B * 100), linewidth=1.5, dashes=dashstyle2, markevery=5, marker='v', markerfacecolor='None', mew=1.5, markeredgecolor=colors.color(1))
plt.plot(seis_p / 1.e9, (C_vs - seis_vs) / seis_vs * 100.0, color=colors.color(4), label='$V_s$: C (%g\%% pv)' %
(C * 100), linewidth=1.5, dashes=dashstyle3, markevery=5, marker='v', markerfacecolor='None', mew=1.5, markeredgecolor=colors.color(4))
plt.plot(
seis_p / 1.e9, (A_vphi - seis_vphi) / seis_vphi * 100.0, color=colors.color(3), markevery=5, marker='s', markersize=5,
markeredgecolor=colors.color(3), markerfacecolor='None', mew=1.5, label='$V_\phi$: A', linewidth=1.5, linestyle='-')
plt.plot(
seis_p / 1.e9, (B_vphi - seis_vphi) / seis_vphi * 100.0, color=colors.color(1), markevery=5, marker='s', markersize=5,
markeredgecolor=colors.color(1), markerfacecolor='None', mew=1.5, label='$V_\phi$: B', linewidth=1.5, dashes=dashstyle2)
plt.plot(
seis_p / 1.e9, (C_vphi - seis_vphi) / seis_vphi * 100.0, color=colors.color(4), markevery=5, marker='s', markersize=5,
markeredgecolor=colors.color(4), markerfacecolor='None', mew=1.5, label='$V_\phi$: C', linewidth=1.5, dashes=dashstyle3)
plt.xlabel('Pressure (GPa)')
plt.ylabel('Difference from PREM (\%)')
plt.ylim([-5, 4])
plt.xlim([30, 130])
plt.legend(loc='lower center', ncol=2, prop=prop)
# plt.tight_layout()
# plt.savefig("opt_pv_4.pdf",bbox_inches='tight')
# plt.show()
plt.tight_layout()
if "RUNNING_TESTS" not in globals():
plt.savefig("paper_opt_pv.pdf", bbox_inches='tight')
plt.show()
| gpl-2.0 |
yavalvas/yav_com | build/matplotlib/lib/matplotlib/backends/backend_qt4agg.py | 11 | 3003 | """
Render to qt from agg
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os # not used
import sys
import ctypes
import warnings
import matplotlib
from matplotlib.figure import Figure
from .backend_qt5agg import NavigationToolbar2QTAgg
from .backend_qt5agg import FigureCanvasQTAggBase
from .backend_agg import FigureCanvasAgg
from .backend_qt4 import QtCore
from .backend_qt4 import FigureManagerQT
from .backend_qt4 import FigureCanvasQT
from .backend_qt4 import NavigationToolbar2QT
##### not used
from .backend_qt4 import show
from .backend_qt4 import draw_if_interactive
from .backend_qt4 import backend_version
######
from matplotlib.cbook import mplDeprecation
DEBUG = False
_decref = ctypes.pythonapi.Py_DecRef
_decref.argtypes = [ctypes.py_object]
_decref.restype = None
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG:
print('backend_qt4agg.new_figure_manager')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQTAgg(figure)
return FigureManagerQT(canvas, num)
class FigureCanvasQTAgg(FigureCanvasQTAggBase,
FigureCanvasQT, FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQtAgg: ', figure)
FigureCanvasQT.__init__(self, figure)
FigureCanvasAgg.__init__(self, figure)
self._drawRect = None
self.blitbox = None
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
# it has been reported that Qt is semi-broken in a windows
# environment. If `self.draw()` uses `update` to trigger a
# system-level window repaint (as is explicitly advised in the
# Qt documentation) the figure responds very slowly to mouse
# input. The work around is to directly use `repaint`
# (against the advice of the Qt documentation). The
# difference between `update` and repaint is that `update`
# schedules a `repaint` for the next time the system is idle,
# where as `repaint` repaints the window immediately. The
# risk is if `self.draw` gets called with in another `repaint`
# method there will be an infinite recursion. Thus, we only
# expose windows users to this risk.
if sys.platform.startswith('win'):
self._priv_update = self.repaint
else:
self._priv_update = self.update
FigureCanvas = FigureCanvasQTAgg
FigureManager = FigureManagerQT
| mit |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| mit |
dhruv13J/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
springcoil/pymc3 | pymc3/tests/test_model.py | 1 | 9246 | import pytest
from theano import theano, tensor as tt
import numpy as np
import pandas as pd
import numpy.testing as npt
import unittest
import pymc3 as pm
from pymc3.distributions import HalfCauchy, Normal, transforms
from pymc3 import Potential, Deterministic
from pymc3.model import ValueGradFunction
class NewModel(pm.Model):
def __init__(self, name='', model=None):
super(NewModel, self).__init__(name, model)
assert pm.modelcontext(None) is self
# 1) init variables with Var method
self.Var('v1', pm.Normal.dist())
self.v2 = pm.Normal('v2', mu=0, sd=1)
# 2) Potentials and Deterministic variables with method too
# be sure that names will not overlap with other same models
pm.Deterministic('d', tt.constant(1))
pm.Potential('p', tt.constant(1))
class DocstringModel(pm.Model):
def __init__(self, mean=0, sd=1, name='', model=None):
super(DocstringModel, self).__init__(name, model)
self.Var('v1', Normal.dist(mu=mean, sd=sd))
Normal('v2', mu=mean, sd=sd)
Normal('v3', mu=mean, sd=HalfCauchy('sd', beta=10, testval=1.))
Deterministic('v3_sq', self.v3 ** 2)
Potential('p1', tt.constant(1))
class TestBaseModel(object):
def test_setattr_properly_works(self):
with pm.Model() as model:
pm.Normal('v1')
assert len(model.vars) == 1
with pm.Model('sub') as submodel:
submodel.Var('v1', pm.Normal.dist())
assert hasattr(submodel, 'v1')
assert len(submodel.vars) == 1
assert len(model.vars) == 2
with submodel:
submodel.Var('v2', pm.Normal.dist())
assert hasattr(submodel, 'v2')
assert len(submodel.vars) == 2
assert len(model.vars) == 3
def test_context_passes_vars_to_parent_model(self):
with pm.Model() as model:
# a set of variables is created
NewModel()
# another set of variables are created but with prefix 'another'
usermodel2 = NewModel(name='another')
# you can enter in a context with submodel
with usermodel2:
usermodel2.Var('v3', pm.Normal.dist())
pm.Normal('v4')
# this variable is created in parent model too
assert 'another_v2' in model.named_vars
assert 'another_v3' in model.named_vars
assert 'another_v3' in usermodel2.named_vars
assert 'another_v4' in model.named_vars
assert 'another_v4' in usermodel2.named_vars
assert hasattr(usermodel2, 'v3')
assert hasattr(usermodel2, 'v2')
assert hasattr(usermodel2, 'v4')
# When you create a class based model you should follow some rules
with model:
m = NewModel('one_more')
assert m.d is model['one_more_d']
assert m['d'] is model['one_more_d']
assert m['one_more_d'] is model['one_more_d']
class TestNested(object):
def test_nest_context_works(self):
with pm.Model() as m:
new = NewModel()
with new:
assert pm.modelcontext(None) is new
assert pm.modelcontext(None) is m
assert 'v1' in m.named_vars
assert 'v2' in m.named_vars
def test_named_context(self):
with pm.Model() as m:
NewModel(name='new')
assert 'new_v1' in m.named_vars
assert 'new_v2' in m.named_vars
def test_docstring_example1(self):
usage1 = DocstringModel()
assert 'v1' in usage1.named_vars
assert 'v2' in usage1.named_vars
assert 'v3' in usage1.named_vars
assert 'v3_sq' in usage1.named_vars
assert len(usage1.potentials), 1
def test_docstring_example2(self):
with pm.Model() as model:
DocstringModel(name='prefix')
assert 'prefix_v1' in model.named_vars
assert 'prefix_v2' in model.named_vars
assert 'prefix_v3' in model.named_vars
assert 'prefix_v3_sq' in model.named_vars
assert len(model.potentials), 1
def test_duplicates_detection(self):
with pm.Model():
DocstringModel(name='prefix')
with pytest.raises(ValueError):
DocstringModel(name='prefix')
def test_model_root(self):
with pm.Model() as model:
assert model is model.root
with pm.Model() as sub:
assert model is sub.root
class TestObserved(object):
def test_observed_rv_fail(self):
with pytest.raises(TypeError):
with pm.Model():
x = Normal('x')
Normal('n', observed=x)
class TestTheanoConfig(object):
def test_set_testval_raise(self):
with theano.configparser.change_flags(compute_test_value='off'):
with pm.Model():
assert theano.config.compute_test_value == 'raise'
assert theano.config.compute_test_value == 'off'
def test_nested(self):
with theano.configparser.change_flags(compute_test_value='off'):
with pm.Model(theano_config={'compute_test_value': 'ignore'}):
assert theano.config.compute_test_value == 'ignore'
with pm.Model(theano_config={'compute_test_value': 'warn'}):
assert theano.config.compute_test_value == 'warn'
assert theano.config.compute_test_value == 'ignore'
assert theano.config.compute_test_value == 'off'
def test_duplicate_vars():
with pytest.raises(ValueError) as err:
with pm.Model():
pm.Normal('a')
pm.Normal('a')
err.match('already exists')
with pytest.raises(ValueError) as err:
with pm.Model():
pm.Normal('a')
pm.Normal('a', transform=transforms.log)
err.match('already exists')
with pytest.raises(ValueError) as err:
with pm.Model():
a = pm.Normal('a')
pm.Potential('a', a**2)
err.match('already exists')
with pytest.raises(ValueError) as err:
with pm.Model():
pm.Binomial('a', 10, .5)
pm.Normal('a', transform=transforms.log)
err.match('already exists')
def test_empty_observed():
data = pd.DataFrame(np.ones((2, 3)) / 3)
data.values[:] = np.nan
with pm.Model():
a = pm.Normal('a', observed=data)
npt.assert_allclose(a.tag.test_value, np.zeros((2, 3)))
b = pm.Beta('b', alpha=1, beta=1, observed=data)
npt.assert_allclose(b.tag.test_value, np.ones((2, 3)) / 2)
class TestValueGradFunction(unittest.TestCase):
def test_no_extra(self):
a = tt.vector('a')
a.tag.test_value = np.zeros(3, dtype=a.dtype)
a.dshape = (3,)
a.dsize = 3
f_grad = ValueGradFunction(a.sum(), [a], [], mode='FAST_COMPILE')
assert f_grad.size == 3
def test_invalid_type(self):
a = tt.ivector('a')
a.tag.test_value = np.zeros(3, dtype=a.dtype)
a.dshape = (3,)
a.dsize = 3
with pytest.raises(TypeError) as err:
ValueGradFunction(a.sum(), [a], [], mode='FAST_COMPILE')
err.match('Invalid dtype')
def setUp(self):
extra1 = tt.iscalar('extra1')
extra1_ = np.array(0, dtype=extra1.dtype)
extra1.tag.test_value = extra1_
extra1.dshape = tuple()
extra1.dsize = 1
val1 = tt.vector('val1')
val1_ = np.zeros(3, dtype=val1.dtype)
val1.tag.test_value = val1_
val1.dshape = (3,)
val1.dsize = 3
val2 = tt.matrix('val2')
val2_ = np.zeros((2, 3), dtype=val2.dtype)
val2.tag.test_value = val2_
val2.dshape = (2, 3)
val2.dsize = 6
self.val1, self.val1_ = val1, val1_
self.val2, self.val2_ = val2, val2_
self.extra1, self.extra1_ = extra1, extra1_
self.cost = extra1 * val1.sum() + val2.sum()
self.f_grad = ValueGradFunction(
self.cost, [val1, val2], [extra1], mode='FAST_COMPILE')
def test_extra_not_set(self):
with pytest.raises(ValueError) as err:
self.f_grad.get_extra_values()
err.match('Extra values are not set')
with pytest.raises(ValueError) as err:
self.f_grad(np.zeros(self.f_grad.size, dtype=self.f_grad.dtype))
err.match('Extra values are not set')
def test_grad(self):
self.f_grad.set_extra_values({'extra1': 5})
array = np.ones(self.f_grad.size, dtype=self.f_grad.dtype)
val, grad = self.f_grad(array)
assert val == 21
npt.assert_allclose(grad, [5, 5, 5, 1, 1, 1, 1, 1, 1])
def test_bij(self):
self.f_grad.set_extra_values({'extra1': 5})
array = np.ones(self.f_grad.size, dtype=self.f_grad.dtype)
point = self.f_grad.array_to_dict(array)
assert len(point) == 2
npt.assert_allclose(point['val1'], 1)
npt.assert_allclose(point['val2'], 1)
array2 = self.f_grad.dict_to_array(point)
npt.assert_allclose(array2, array)
point_ = self.f_grad.array_to_full_dict(array)
assert len(point_) == 3
assert point_['extra1'] == 5
| apache-2.0 |
TechplexEngineer/DB-Benchmarking-Tools | plotting/genstats.py | 1 | 2006 | #!/usr/bin/env python
###
# Code to calculate statictics from dump file
# B.Bourque 7/8/2015
###
import matplotlib.pyplot as plt
import matplotlib.dates as md
import numpy as np
from datetime import datetime, timedelta
# This function expects the data in [filename] to be tab separated.
# Col1 is the unix timestamp and col2 is the database transactions per second
def genstats(filename, start=0, end=-1,numpts=-1, plot=False, print_dates=False, human_readable=True):
data = np.loadtxt(filename, delimiter='\t')
st = data[:,0][0] # get the first timestamp, col 0 row 0
st = datetime.fromtimestamp(st)
# x values
dates=[(datetime.fromtimestamp(ts) - timedelta(hours=st.hour, minutes=st.minute, seconds=st.second)) for ts in data[:,0]]
# if end is not a positive number, then end is the length of the data
if end < 0:
end = len(data)-1
# If start is negative, then its 0
if start < 0:
start = 0
if numpts != -1:
end = start+numpts
if human_readable:
print "start\t\t:", start
print "start_date\t:", dates[start]
print "end\t\t:", end
print "end_date\t:", dates[end]
print "elasped\t\t:", (dates[end]-dates[start])
print "avg\t\t:", np.average(data[start:end,1])
print "{0}\t{1}\t{2}\t{3}\t{4}\t{5}".format(start,end,dates[start],dates[end],(dates[end]-dates[start]),np.average(data[start:end,1]))
if plot:
if print_dates:
plt.plot(dates[start:end], data[start:end,1], 'b-o')
else:
plt.plot(data[start:end,1], 'b-o')
plt.ylabel('TPS')
plt.xlabel('Time')
plt.title("TPS "+filename)
plt.minorticks_on()
plt.grid()
if print_dates:
ax=plt.gca()
xfmt = md.DateFormatter('%H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
plt.show()
if __name__ == "__main__":
print "Test Routine"
genstats("testdata.txt", 0, -1, plot=True, print_dates=True, human_readable=True)
genstats("testdata.txt", 0, -1, plot=True, print_dates=False, human_readable=True)
genstats("testdata.txt", 0, -1, plot=False, print_dates=True, human_readable=False) | mit |
marchdf/dg1d | dg1d/plot.py | 1 | 3976 | #!/usr/bin/env python3
#
#
"""@package sample plotting
A sample plotting tool for the one-dimensional DG data. The exact
solution plotted is for the sinewave initial condition.
"""
# ========================================================================
#
# Imports
#
# ========================================================================
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import dg1d.solution as solution
# ========================================================================
#
# Some defaults variables
#
# ========================================================================
plt.rc('text', usetex=True)
plt.rc('font', family='serif', serif='Times')
cmap_med = ['#F15A60', '#7AC36A', '#5A9BD4', '#FAA75B',
'#9E67AB', '#CE7058', '#D77FB4', '#737373']
cmap = ['#EE2E2F', '#008C48', '#185AA9', '#F47D23',
'#662C91', '#A21D21', '#B43894', '#010202']
dashseq = [(None, None), [10, 5], [10, 4, 3, 4], [
3, 3], [10, 4, 3, 4, 3, 4], [3, 3], [3, 3]]
markertype = ['s', 'd', 'o', 'p', 'h']
# ========================================================================
#
# Main
#
# ========================================================================
if __name__ == '__main__':
# ========================================================================
# Parse arguments
parser = argparse.ArgumentParser(
description='A simple plot tool for the one-dimensional DG data')
parser.add_argument(
'-s', '--show', help='Show the plots', action='store_true')
parser.add_argument('-f', '--file', dest='step',
help='File to load', type=int, required=True)
parser.add_argument('-t', '--type', dest='system',
help='Type of system to solve', type=str, default='advection')
args = parser.parse_args()
# Load solution
solution = solution.Solution('empty', args.system, 0)
solution.loader(args.step)
# Collocate the solution to the Gaussian nodes
ug = solution.collocate()
# Collocate to the cell edge values
uf = solution.evaluate_faces()
# Get the primitive variables for Euler
if args.system == 'euler':
gamma = 1.4
rho = ug[:, 0::solution.N_F]
u = ug[:, 1::solution.N_F] / rho
p = (gamma - 1) * (ug[:, 2::solution.N_F] - 0.5 * rho * u * u)
ug[:, 1::solution.N_F] = u
ug[:, 2::solution.N_F] = p
rho = uf[:, 0::solution.N_F]
u = uf[:, 1::solution.N_F] / rho
p = (gamma - 1) * (uf[:, 2::solution.N_F] - 0.5 * rho * u * u)
uf[:, 1::solution.N_F] = u
uf[:, 2::solution.N_F] = p
# Plot each field
for field in range(solution.N_F):
plt.figure(field)
# Plot each element solution in a different color
# Skip plotting the ghost cells
for e in range(1, solution.N_E + 1):
a = solution.x[e - 1]
b = solution.x[e]
xg = 0.5 * (b - a) * solution.basis.x + 0.5 * (b + a)
# plot the solution at the Gaussian nodes (circles)
plt.plot(xg, ug[:, e * solution.N_F + field], 'o',
mfc=cmap[e % len(cmap)], mec=cmap[e % len(cmap)])
# Plot the solution at the cell edges (squares)
plt.plot([a, b], uf[:, e * solution.N_F + field],
's', mfc=cmap[e % len(cmap)], mec='black')
# Plot the exact solution
if args.system == 'advection':
xe = np.linspace(-1, 1, 200)
fe = np.sin(2 * np.pi * xe)
plt.plot(xe, fe, 'k')
# Plot the sensors if they exist
if os.path.isfile('sensor0000000000.dat'):
plt.figure(solution.N_F)
dat = np.loadtxt('sensor{0:010d}.dat'.format(args.step), delimiter=',')
# plot sensors at cell centers
plt.plot(dat[:, 0], dat[:, 1], 'o', mfc=cmap[0], mec=cmap[0])
plt.ylim([-0.1, 2.1])
if args.show:
plt.show()
| apache-2.0 |
yannickmartin/wellFARE | wellfare/ILM/fast_estimators.py | 1 | 5629 | """
This module implements fast estimators for the time-profiles of
growth rate, promoter activity, and protein concentrations.
These estimators rely on a simple model in which gene expression
is modeled as a one-step process. This enables to compute the
observation matrix directly using an ad-hoc formula.
As a consequence these algorithms are faster and require less
parameters than their counterparts in module ``estimators``
Simple approximations are made to compute the observation matrix,
these are valid as long as the vector of estimation times (ttu) of
the different estimated input (growth rate, promoter actitivity,
protein concentration) has a fine time resolution.
See also:
----------
estimators : collection of functions for the inference
"""
from ..curves import Curve
from .methods import DEFAULT_ALPHAS, infer_control
def ilp_growth_rate(curve_volume, ttu, alphas=None, eps_L=.0001):
"""
Returns
--------
mu, v_smoothed, model
As described below.
mu
Vector of inferred mu.
v_smoothed
The predicted value of the observed volume at the same time
points as the data. v_smoothed will appear smoothed compared to
the measured volume.
mod
instance of sklearn.linear_model.RidgeCV, used for the Ridge
regularization / cross-validation. Useful to get the value of
the parameter alpha used etc.
"""
if isinstance(curve_volume, list):
results = [ilp_growth_rate(v, ttu,
alphas=alphas, eps_L=eps_L)
for v in curve_volume]
return zip(*results)
if alphas is None: alphas = DEFAULT_ALPHAS
ttv = curve_volume.x
dttu = 1.0*(ttu[1]-ttu[0])
H_ic = np.ones((len(ttv),1))
# dT is a Ny x Nu matrix with
# dT[i,j] = ttv[i] - ttu[j]
dT = np.array([ttv]).T - ttu
H_u = ( np.maximum(0, np.minimum(dttu, dT))
* curve_volume(ttu+ dttu/2))
H = np.hstack([H_ic, H_u])
growth_rate, v_smooth, ic, alpha, ascores = \
infer_control(H, y= curve_volume.y, Nic= 1,
alphas= alphas, eps_L = eps_L)
return ( Curve(ttu, growth_rate),
Curve(ttv, v_smooth),
ic, alpha, ascores )
def ilp_synthesis_rate(curve_fluo, curve_volume, ttu, degr,
alphas=None, eps_L=.0001):
"""
dF/dt = s(t)V(t) - degr*F
Parameters
-----------
curve_fluo
A curve instance representing the (noisy) measured
fluorescence
curve_volume
A curve instance representing the (noisy) measured
volume
ttu
Times at which the control is
Returns
--------
synth_rate, fluo_smoothed, ic, alpha, ascores
As described below.
synth_rate
Vector. Inferred control.
fluo_smoothed
The predicted value of the observed data at the same time
points as the data. y_smoothed will appear smoothed compared
to y.
mod
instance of sklearn.linear_model.RidgeCV, used for the Ridge
regularization / cross-validation. Useful to get the value
of the parameter alpha used etc.
"""
if isinstance(curve_fluo, list):
results = [ilp_synthesis_rate(f, v, ttu, degr,
alphas=alphas, eps_L=eps_L)
for f, v in zip(curve_fluo, curve_volume)]
return zip(*results)
if alphas is None: alphas = DEFAULT_ALPHAS
tt_fluo= curve_fluo.x
H_ic = np.exp(-degr*tt_fluo).reshape((len(tt_fluo),1))
model = lambda Y,t: 1 - degr*Y
dtau = ttu[1]-ttu[0]
m = odeint(model,0,[0,dtau]).flatten()[1]
TT = (ttu-np.array([tt_fluo]).T)
H_u = (m*np.exp(degr*TT)*(TT<0)) * curve_volume(ttu + dtau/2)
H = np.hstack([H_ic, H_u])
activity, fluo_smooth, ic, alpha, ascores = \
infer_control(H, y= curve_fluo.y, Nic= 1, alphas= alphas,
eps_L = eps_L)
return ( Curve(ttu, activity),
Curve(tt_fluo, fluo_smooth),
ic, alpha, ascores )
def ilp_concentration(curve_fluo, curve_volume, ttu, dR, dP,
alphas=None, eps_L=0.0001):
""" Retrieves the concentration of a protein P, given
the fluorescence of reporter R.
Parameters
-----------
curve_fluo
A curve instance representing the measured fluorescence
(proportional to the quantities of reporter)
curve_volume
Volume of the population.
dR
Degradation rate of the reporter
dP
Degradation rate of the proteins.
alphas
Smoothing parameters to be tested.
eps_L
Negligible factor for the derivation matrix.
"""
if isinstance(curve_fluo, list):
results = [ilp_concentration(f, v, ttu, dR, dP,
alphas=alphas, eps_L=eps_L)
for f, v in zip(curve_fluo, curve_volume)]
return zip(*results)
tt = curve_fluo.x
deltatau = ttu[1]-ttu[0]
dT = np.array([tt]).T-ttu
dTlz = dT >= 0 # ti-tj > 0
dTlzsdtau = dTlz*(dT < deltatau) # 0 < ti-tj < delta_tau
A = np.exp(dR*np.minimum(deltatau, dT)) - 1
B = dTlz*np.exp(dT*(-dR))*(dP-dR)/dR
Hu = (dTlzsdtau + A*B)*curve_volume(ttu+deltatau/2)
Hic = np.array([np.exp(-dR*tt)]).reshape((len(tt),1))
H = np.hstack([Hic, Hu])
p_est, f_est, ic, a, ascores = infer_control(
H, curve_fluo.y, 1, alphas=alphas, eps_L=eps_L)
return (Curve(ttu, p_est),
Curve(tt, f_est),
ic, a, ascores ) | lgpl-3.0 |
MatthieuMichon/f24 | src/load/chart.py | 1 | 2309 | #!/usr/bin/python3
"""
load.chart
~~~~~~~~~~
This module implements classes for transforming extracted datasets related to
airport data.
"""
from mpl_toolkits.mplot3d.axes3d import Axes3D
import matplotlib.pyplot as plt
import math
def dist(lat1, lon1, lat2, lon2):
"""Distance in nm between two points.
Equirectangular formula works for small distances."""
x = (lon2 - lon1) * math.cos(0.5*math.radians(lat2+lat1))
y = lat2 - lat1
retval = 3440 * math.sqrt(x*x + y*y)
#raise ValueError
print(retval)
return retval
class Chart:
PT_LAT = 0
PT_LON = 1
PT_ALT = 2
def __init__(self, trail_list, verbose=False):
self.verbose = verbose
self.trail_list = trail_list
self.cropped_trails = self.trail_list
def crop_out_of_range(self, latitude, longitude, distance, elevation):
"""Remove points from trail which are located further than the
distance to the reference or higher than the given elevation"""
# self.cropped_trails = [
# [pt for pt in trail] for trail in self.trail_list]
self.cropped_trails = [
[pt for pt in trail if (dist(
lat1=latitude, lon1=longitude,
lat2=pt[self.PT_LAT], lon2=pt[self.PT_LON]) < distance)
and (pt[self.PT_ALT] < elevation)]
for trail in self.trail_list]
def plot(self, longitude, latitude, distance):
self.data_list = [list(zip(*trail[::-1]))
for trail in self.cropped_trails]
fig, ax = plt.subplots(subplot_kw={'projection': '3d'})
for trail in self.data_list:
i = self.data_list.index(trail)
ax.plot(trail[self.PT_LON], trail[self.PT_LAT], trail[self.PT_ALT],
color=(i/10, 1-i/10, 0.5))
# TODO: think of drawing three plots: dep, enroute and arr b/c
# of the difference in scale, also why not use a log scale for Z
# ax.plot(trail[1], trail[0], trail[2], color=color[(i % 5)])
ax.set_xlabel('lon')
ax.set_xlim(longitude-distance/2, longitude+distance/2)
ax.set_ylabel('lat')
ax.set_ylim(latitude-distance/2, latitude+distance/2)
ax.set_zlabel('alt')
ax.set_zlim(0, 3000)
plt.show()
| gpl-2.0 |
matthew-brett/pymc | pymc/__init__.py | 1 | 1462 | """
Markov Chain methods in Python.
A toolkit of stochastic methods for biometric analysis. Features
a Metropolis-Hastings MCMC sampler and both linear and unscented
(non-linear) Kalman filters.
Pre-requisite modules: numpy, matplotlib
Required external components: TclTk
"""
__version__ = '2.1alpha'
try:
import numpy
except ImportError:
raise ImportError, 'NumPy does not seem to be installed. Please see the user guide.'
# Core modules
from threadpool import *
try:
import Container_values
del Container_values
except ImportError:
raise ImportError, 'You seem to be importing PyMC from inside its source tree. Please change to another directory and try again.'
from Node import *
from Container import *
from PyMCObjects import *
from InstantiationDecorators import *
from CommonDeterministics import *
from distributions import *
from Model import *
from StepMethods import *
from MCMC import *
from NormalApproximation import *
from tests import test
# Utilities modules
import utils
import CommonDeterministics
from CircularStochastic import *
import distributions
import gp
# Optional modules
try:
from diagnostics import *
except ImportError:
pass
try:
import ScipyDistributions
except ImportError:
pass
try:
import parallel
except ImportError:
pass
try:
import sandbox
except ImportError:
pass
try:
import graph
except ImportError:
pass
try:
import Matplot
except:
pass
| mit |
LiaoPan/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
pythonvietnam/scikit-learn | sklearn/tree/tests/test_tree.py | 11 | 47506 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import ignore_warnings
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
peterfpeterson/mantid | qt/python/mantidqt/widgets/sliceviewer/test/test_sliceviewer_presenter.py | 3 | 24997 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
import sys
import unittest
from unittest import mock
from unittest.mock import patch
from mantid.api import MultipleExperimentInfos
import matplotlib
matplotlib.use('Agg')
# Mock out simpleapi to import expensive import of something we don't use anyway
sys.modules['mantid.simpleapi'] = mock.MagicMock()
from mantidqt.widgets.sliceviewer.model import SliceViewerModel, WS_TYPE # noqa: E402
from mantidqt.widgets.sliceviewer.presenter import ( # noqa: E402
PeaksViewerCollectionPresenter, SliceViewer)
from mantidqt.widgets.sliceviewer.transform import NonOrthogonalTransform # noqa: E402
from mantidqt.widgets.sliceviewer.toolbar import ToolItemText # noqa: E402
from mantidqt.widgets.sliceviewer.view import SliceViewerView, SliceViewerDataView # noqa: E402
def _create_presenter(model, view, mock_sliceinfo_cls, enable_nonortho_axes, supports_nonortho):
model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDH)
model.is_ragged_matrix_plotted.return_value = False
model.get_dim_limits.return_value = ((-1, 1), (-2, 2))
data_view_mock = view.data_view
data_view_mock.plot_MDH = mock.Mock()
presenter = SliceViewer(None, model=model, view=view)
if enable_nonortho_axes:
data_view_mock.nonorthogonal_mode = True
data_view_mock.nonortho_transform = mock.MagicMock(NonOrthogonalTransform)
data_view_mock.nonortho_transform.tr.return_value = (0, 1)
presenter.nonorthogonal_axes(True)
else:
data_view_mock.nonorthogonal_mode = False
data_view_mock.nonortho_transform = None
data_view_mock.disable_tool_button.reset_mock()
data_view_mock.create_axes_orthogonal.reset_mock()
data_view_mock.create_axes_nonorthogonal.reset_mock()
mock_sliceinfo_instance = mock_sliceinfo_cls.return_value
mock_sliceinfo_instance.can_support_nonorthogonal_axes.return_value = supports_nonortho
return presenter, data_view_mock
def create_workspace_mock():
# Mock out workspace methods needed for SliceViewerModel.__init__
workspace = mock.Mock(spec=MultipleExperimentInfos)
workspace.isMDHistoWorkspace = lambda: False
workspace.getNumDims = lambda: 2
workspace.name = lambda: "workspace"
return workspace
class SliceViewerTest(unittest.TestCase):
def setUp(self):
self.view = mock.Mock(spec=SliceViewerView)
data_view = mock.Mock(spec=SliceViewerDataView)
data_view.plot_MDH = mock.Mock()
data_view.dimensions = mock.Mock()
data_view.norm_opts = mock.Mock()
data_view.image_info_widget = mock.Mock()
data_view.canvas = mock.Mock()
data_view.nonorthogonal_mode = False
data_view.nonortho_transform = None
data_view.get_axes_limits.return_value = None
dimensions = mock.Mock()
dimensions.get_slicepoint.return_value = [None, None, 0.5]
dimensions.transpose = False
dimensions.get_slicerange.return_value = [None, None, (-15, 15)]
dimensions.qflags = [True, True, True]
data_view.dimensions = dimensions
self.view.data_view = data_view
self.model = mock.Mock(spec=SliceViewerModel)
self.model.get_ws = mock.Mock()
self.model.get_data = mock.Mock()
self.model.rebin = mock.Mock()
self.model.workspace_equals = mock.Mock()
self.model.get_properties.return_value = {
"workspace_type": "WS_TYPE.MATRIX",
"supports_normalise": True,
"supports_nonorthogonal_axes": False,
"supports_dynamic_rebinning": False,
"supports_peaks_overlays": True
}
@patch("sip.isdeleted", return_value=False)
def test_sliceviewer_MDH(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDH)
presenter = SliceViewer(None, model=self.model, view=self.view)
# setup calls
self.assertEqual(self.model.get_dimensions_info.call_count, 0)
self.assertEqual(self.model.get_ws.call_count, 1)
self.assertEqual(self.model.get_properties.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.plot_MDH.call_count, 1)
# new_plot
self.model.reset_mock()
self.view.reset_mock()
presenter.new_plot()
self.assertEqual(self.model.get_ws.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.plot_MDH.call_count, 1)
# update_plot_data
self.model.reset_mock()
self.view.reset_mock()
presenter.update_plot_data()
self.assertEqual(self.model.get_data.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.update_plot_data.call_count, 1)
@patch("sip.isdeleted", return_value=False)
def test_sliceviewer_MDE(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDE)
presenter = SliceViewer(None, model=self.model, view=self.view)
# setup calls
self.assertEqual(self.model.get_dimensions_info.call_count, 0)
self.assertEqual(self.model.get_ws_MDE.call_count, 1)
self.assertEqual(self.model.get_properties.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_bin_params.call_count, 1)
self.assertEqual(self.view.data_view.plot_MDH.call_count, 1)
# new_plot
self.model.reset_mock()
self.view.reset_mock()
presenter.new_plot()
self.assertEqual(self.model.get_ws_MDE.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_bin_params.call_count, 1)
self.assertEqual(self.view.data_view.plot_MDH.call_count, 1)
# update_plot_data
self.model.reset_mock()
self.view.reset_mock()
presenter.update_plot_data()
self.assertEqual(self.model.get_data.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_bin_params.call_count, 1)
self.assertEqual(self.view.data_view.update_plot_data.call_count, 1)
@patch("sip.isdeleted", return_value=False)
def test_sliceviewer_matrix(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MATRIX)
presenter = SliceViewer(None, model=self.model, view=self.view)
# setup calls
self.assertEqual(self.model.get_dimensions_info.call_count, 0)
self.assertEqual(self.model.get_ws.call_count, 1)
self.assertEqual(self.model.get_properties.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 0)
self.assertEqual(self.view.data_view.plot_matrix.call_count, 1)
# new_plot
self.model.reset_mock()
self.view.reset_mock()
presenter.new_plot()
self.assertEqual(self.model.get_ws.call_count, 1)
self.assertEqual(self.view.data_view.dimensions.get_slicepoint.call_count, 0)
self.assertEqual(self.view.data_view.plot_matrix.call_count, 1)
@patch("sip.isdeleted", return_value=False)
def test_normalization_change_set_correct_normalization(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MATRIX)
self.view.data_view.plot_matrix = mock.Mock()
presenter = SliceViewer(None, model=self.model, view=self.view)
presenter.normalization_changed("By bin width")
self.view.data_view.plot_matrix.assert_called_with(self.model.get_ws(), distribution=False)
def peaks_button_disabled_if_model_cannot_support_it(self):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MATRIX)
self.model.can_support_peaks_overlay.return_value = False
SliceViewer(None, model=self.model, view=self.view)
self.view.data_view.disable_tool_button.assert_called_once_with(ToolItemText.OVERLAY_PEAKS)
def peaks_button_not_disabled_if_model_can_support_it(self):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MATRIX)
self.model.can_support_peaks_overlay.return_value = True
SliceViewer(None, model=self.model, view=self.view)
self.view.data_view.disable_tool_button.assert_not_called()
@patch("sip.isdeleted", return_value=False)
def test_non_orthogonal_axes_toggled_on(self, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDE)
self.model.get_dim_limits.return_value = ((-1, 1), (-2, 2))
self.model.is_ragged_matrix_plotted.return_value = False
data_view_mock = self.view.data_view
data_view_mock.plot_MDH = mock.Mock()
presenter = SliceViewer(None, model=self.model, view=self.view)
data_view_mock.plot_MDH.reset_mock() # clear initial plot call
data_view_mock.create_axes_orthogonal.reset_mock()
presenter.nonorthogonal_axes(True)
data_view_mock.deactivate_and_disable_tool.assert_called_once_with(
ToolItemText.REGIONSELECTION)
data_view_mock.create_axes_nonorthogonal.assert_called_once()
data_view_mock.create_axes_orthogonal.assert_not_called()
self.assertEqual(data_view_mock.plot_MDH.call_count, 2)
data_view_mock.disable_tool_button.assert_has_calls([mock.call(ToolItemText.LINEPLOTS)])
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_non_orthogonal_axes_toggled_off(self, mock_sliceinfo_cls, _):
self.model.get_ws_type = mock.Mock(return_value=WS_TYPE.MDE)
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=True,
supports_nonortho=True)
data_view_mock.plot_MDH.reset_mock() # clear initial plot call
data_view_mock.create_axes_orthogonal.reset_mock()
data_view_mock.create_axes_nonorthogonal.reset_mock()
data_view_mock.enable_tool_button.reset_mock()
data_view_mock.disable_tool_button.reset_mock()
data_view_mock.remove_line_plots.reset_mock()
presenter.nonorthogonal_axes(False)
data_view_mock.create_axes_orthogonal.assert_called_once()
data_view_mock.create_axes_nonorthogonal.assert_not_called()
data_view_mock.plot_MDH.assert_called_once()
data_view_mock.enable_tool_button.assert_has_calls(
(mock.call(ToolItemText.LINEPLOTS), mock.call(ToolItemText.REGIONSELECTION)))
@patch("sip.isdeleted", return_value=False)
def test_request_to_show_all_data_sets_correct_limits_on_view_MD(self, _):
presenter = SliceViewer(None, model=self.model, view=self.view)
self.model.is_ragged_matrix_plotted.return_value = False
self.model.get_dim_limits.return_value = ((-1, 1), (-2, 2))
presenter.show_all_data_requested()
data_view = self.view.data_view
self.model.get_dim_limits.assert_called_once_with([None, None, 0.5],
data_view.dimensions.transpose)
data_view.get_full_extent.assert_not_called()
data_view.set_axes_limits.assert_called_once_with((-1, 1), (-2, 2))
@patch("sip.isdeleted", return_value=False)
def test_request_to_show_all_data_sets_correct_limits_on_view_ragged_matrix(self, _):
presenter = SliceViewer(None, model=self.model, view=self.view)
self.model.is_ragged_matrix_plotted.return_value = True
self.view.data_view.get_full_extent.return_value = [-1, 1, -2, 2]
presenter.show_all_data_requested()
data_view = self.view.data_view
self.model.get_dim_limits.assert_not_called()
data_view.set_axes_limits.assert_called_once_with((-1, 1), (-2, 2))
@patch("sip.isdeleted", return_value=False)
def test_data_limits_changed_creates_new_plot_if_dynamic_rebinning_supported(self, _):
presenter = SliceViewer(None, model=self.model, view=self.view)
self.model.can_support_dynamic_rebinning.return_value = True
new_plot_mock = mock.MagicMock()
presenter.new_plot = new_plot_mock
presenter.data_limits_changed()
new_plot_mock.assert_called_once()
@patch("sip.isdeleted", return_value=False)
def test_data_limits_changed_does_not_create_new_plot_if_dynamic_rebinning_not_supported(self, _):
presenter = SliceViewer(None, model=self.model, view=self.view)
self.model.can_support_dynamic_rebinning.return_value = False
new_plot_mock = mock.MagicMock()
presenter.new_plot = new_plot_mock
presenter.data_limits_changed()
new_plot_mock.assert_not_called()
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_changing_dimensions_in_nonortho_mode_switches_to_ortho_when_dim_not_Q(
self, mock_sliceinfo_cls, is_view_delete):
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=True,
supports_nonortho=False)
presenter.dimensions_changed()
data_view_mock.disable_tool_button.assert_called_once_with(ToolItemText.NONORTHOGONAL_AXES)
data_view_mock.create_axes_orthogonal.assert_called_once()
data_view_mock.create_axes_nonorthogonal.assert_not_called()
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_changing_dimensions_in_nonortho_mode_keeps_nonortho_when_dim_is_Q(
self, mock_sliceinfo_cls, _):
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=True,
supports_nonortho=True)
presenter.dimensions_changed()
data_view_mock.create_axes_nonorthogonal.assert_called_once()
data_view_mock.disable_tool_button.assert_not_called()
data_view_mock.create_axes_orthogonal.assert_not_called()
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_changing_dimensions_in_ortho_mode_disables_nonortho_btn_if_not_supported(
self, mock_sliceinfo_cls, _):
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.dimensions_changed()
data_view_mock.disable_tool_button.assert_called_once_with(ToolItemText.NONORTHOGONAL_AXES)
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
def test_changing_dimensions_in_ortho_mode_enables_nonortho_btn_if_supported(
self, mock_sliceinfo_cls, _):
presenter, data_view_mock = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=False,
supports_nonortho=True)
presenter.dimensions_changed()
data_view_mock.enable_tool_button.assert_called_once_with(ToolItemText.NONORTHOGONAL_AXES)
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.peaksviewer.presenter.TableWorkspaceDataPresenterStandard")
@mock.patch("mantidqt.widgets.sliceviewer.presenter.PeaksViewerCollectionPresenter",
spec=PeaksViewerCollectionPresenter)
def test_overlay_peaks_workspaces_attaches_view_and_draws_peaks(self, mock_peaks_presenter, *_):
for nonortho_axes in (False, True):
presenter, _ = _create_presenter(self.model, self.view, mock.MagicMock(), nonortho_axes,
nonortho_axes)
presenter.view.query_peaks_to_overlay.side_effect = ["peaks_workspace"]
presenter.overlay_peaks_workspaces()
presenter.view.query_peaks_to_overlay.assert_called_once()
mock_peaks_presenter.assert_called_once()
mock_peaks_presenter.overlay_peaksworkspaces.asssert_called_once()
mock_peaks_presenter.reset_mock()
presenter.view.query_peaks_to_overlay.reset_mock()
@patch("sip.isdeleted", return_value=False)
def test_gui_starts_with_zoom_selected(self, _):
SliceViewer(None, model=self.model, view=self.view)
self.view.data_view.activate_tool.assert_called_once_with(ToolItemText.ZOOM)
@patch("sip.isdeleted", return_value=False)
def test_replace_workspace_returns_when_the_workspace_is_not_the_model_workspace(self, _):
self.model.workspace_equals.return_value = False
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.update_view = mock.Mock()
presenter._decide_plot_update_methods = mock.Mock()
other_workspace = mock.Mock()
presenter.replace_workspace('other_workspace', other_workspace)
presenter._decide_plot_update_methods.assert_not_called()
presenter.update_view.assert_not_called()
@patch("sip.isdeleted", return_value=False)
def test_replace_workspace_closes_view_when_model_properties_change(self, _):
self.model.workspace_equals.return_value = True
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.refresh_view = mock.Mock()
presenter._decide_plot_update_methods = mock.Mock()
workspace = create_workspace_mock()
# Not equivalent to self.model.get_properties()
new_model_properties = {
"workspace_type": "WS_TYPE.MDE",
"supports_normalise": False,
"supports_nonorthogonal_axes": False,
"supports_dynamic_rebinning": False,
"supports_peaks_overlays": True
}
with patch.object(SliceViewerModel, "get_properties", return_value=new_model_properties):
presenter.replace_workspace('workspace', workspace)
self.view.emit_close.assert_called_once()
presenter._decide_plot_update_methods.assert_not_called()
presenter.refresh_view.assert_not_called()
@patch("sip.isdeleted", return_value=False)
def test_replace_workspace_updates_view(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
self.view.delayed_refresh = mock.Mock()
presenter._decide_plot_update_methods = mock.Mock(
return_value=(presenter.new_plot_matrix(), presenter.update_plot_data_matrix()))
workspace = create_workspace_mock()
new_model_properties = self.model.get_properties()
# Patch get_properties so that the properties of the new model match those of self.model
with patch.object(SliceViewerModel, "get_properties", return_value=new_model_properties):
presenter.replace_workspace('workspace', workspace)
self.view.emit_close.assert_not_called()
presenter._decide_plot_update_methods.assert_called_once()
self.view.delayed_refresh.assert_called_once()
@patch("sip.isdeleted", return_value=False)
def test_refresh_view(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.new_plot = mock.Mock()
presenter.refresh_view()
self.view.data_view.image_info_widget.setWorkspace.assert_called()
self.view.setWindowTitle.assert_called_with(self.model.get_title())
presenter.new_plot.assert_called_once()
@patch("sip.isdeleted", return_value=True)
def test_refresh_view_does_nothing_when_view_deleted(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter.new_plot = mock.Mock()
presenter.refresh_view()
self.view.data_view.image_info_widget.setWorkspace.assert_not_called()
presenter.new_plot.assert_not_called()
@patch("sip.isdeleted", return_value=False)
def test_clear_observer_peaks_presenter_not_none(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter._peaks_presenter = mock.MagicMock()
presenter.clear_observer()
presenter._peaks_presenter.clear_observer.assert_called_once()
@patch("sip.isdeleted", return_value=False)
def test_clear_observer_peaks_presenter_is_none(self, _):
presenter, _ = _create_presenter(self.model,
self.view,
mock.MagicMock(),
enable_nonortho_axes=False,
supports_nonortho=False)
presenter._peaks_presenter = None
# Will raise exception if misbehaving.
presenter.clear_observer()
@patch("sip.isdeleted", return_value=False)
@mock.patch("mantidqt.widgets.sliceviewer.presenter.SliceInfo")
@mock.patch("mantidqt.widgets.sliceviewer.presenter.PeaksViewerCollectionPresenter",
spec=PeaksViewerCollectionPresenter)
def test_peak_add_delete_event(self, mock_peaks_presenter, mock_sliceinfo_cls, _):
mock_sliceinfo_cls().inverse_transform = mock.Mock(side_effect=lambda pos: pos[::-1])
mock_sliceinfo_cls().z_value = 3
presenter, _ = _create_presenter(self.model,
self.view,
mock_sliceinfo_cls,
enable_nonortho_axes=False,
supports_nonortho=True)
presenter._peaks_presenter = mock_peaks_presenter
event = mock.Mock()
event.inaxes = True
event.xdata = 1.0
event.ydata = 2.0
presenter.add_delete_peak(event)
mock_sliceinfo_cls.get_sliceinfo.assert_not_called()
mock_peaks_presenter.add_delete_peak.assert_called_once_with([3, 2, 1])
self.view.data_view.canvas.draw_idle.assert_called_once()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
WiproOpenSourcePractice/bdreappstore | enu/real_time_event_detection/hadoopstream/reducer_train.py | 1 | 1545 | #!/usr/bin/env python
import sys
import os
os.environ['MPLCONFIGDIR'] = "/tmp/"
import pandas as pd
import numpy as np
import commands
import pickle as p
from sklearn import svm
from sklearn.cross_validation import StratifiedKFold
from sklearn import preprocessing
from sklearn.externals import joblib
current_key = None
key = None
vecList = []
classList = []
def qt_rmvd( string ):
string = string.strip()
if string.startswith("'") and string.endswith("'"):
string = string[1:-1]
return string
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# parse the input we got from mapper.py
key, values = line.split('\t', 1)
values = values[1:-1]
values = values.split(",")
vec = [float(qt_rmvd(x)) for x in values[:-1]]
c = int(qt_rmvd(values[-1]))
vecList.append(vec)
classList.append(c)
#print c,'\n',vecList
vecList = np.asarray(vecList)
classList = np.asarray(classList)
min_max_scaler = preprocessing.MinMaxScaler()
vecList = min_max_scaler.fit_transform(vecList)
clf = svm.SVC(kernel='rbf', C = 1)
clf.fit(vecList,classList)
l = joblib.dump(clf, 'Schlumberger-SVM.pkl')
'''s = p.dumps(clf)
#print s
f = open("svm.model","w")
f.write(s)
f.close()'''
#print commands.getoutput("ls")
print commands.getoutput("hadoop fs -rm /user/dropuser/schlumberger-model/*")
for s in l:
print commands.getoutput("hadoop fs -put "+ s +" /user/dropuser/schlumberger-model")
print commands.getoutput("hadoop fs -ls /user/dropuser/schlumberger-model/")
| apache-2.0 |
harshaneelhg/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
rhyolight/nupic.research | projects/union_path_integration/plot_comparison_to_ideal.py | 3 | 4358 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Plot convergence chart that compares different algorithms."""
import argparse
from collections import defaultdict
import json
import os
import matplotlib.pyplot as plt
import numpy as np
CWD = os.path.dirname(os.path.realpath(__file__))
CHART_DIR = os.path.join(CWD, "charts")
def getCumulativeAccuracy(convergenceFrequencies):
tot = float(sum(convergenceFrequencies.values()))
results = []
cum = 0.0
for step in xrange(1, 41):
cum += convergenceFrequencies.get(str(step), 0)
results.append(cum / tot)
return results
def createChart(inFilename, outFilename, locationModuleWidths, legendPosition):
numSteps = 12
resultsByParams = defaultdict(list)
with open(inFilename, "r") as f:
experiments = json.load(f)
for exp in experiments:
locationModuleWidth = exp[0]["locationModuleWidth"]
resultsByParams[locationModuleWidth].append(
getCumulativeAccuracy(exp[1]["convergence"]))
with open("results/ideal.json", "r") as f:
idealResults = [getCumulativeAccuracy(trial)
for trial in json.load(f)]
with open("results/bof.json", "r") as f:
bofResults = [getCumulativeAccuracy(trial)
for trial in json.load(f)]
plt.figure(figsize=(3.25, 2.5), tight_layout = {"pad": 0})
data = (
[(idealResults, "Ideal Observer", "x--", 10, 1)]
+
[(resultsByParams[locationModuleWidth],
"{}x{} Cells Per Module".format(locationModuleWidth,
locationModuleWidth),
fmt,
None,
0)
for locationModuleWidth, fmt in zip(locationModuleWidths,
["s-", "o-", "^-"])]
+
[(bofResults, "Bag of Features", "d--", None, -1)])
percentiles = [5, 50, 95]
for resultsByTrial, label, fmt, markersize, zorder in data:
x = []
y = []
errBelow = []
errAbove = []
resultsByStep = zip(*resultsByTrial)
for step, results in zip(xrange(numSteps), resultsByStep):
x.append(step + 1)
p1, p2, p3 = np.percentile(results, percentiles)
y.append(p2)
errBelow.append(p2 - p1)
errAbove.append(p3 - p2)
plt.errorbar(x, y, yerr=[errBelow, errAbove], fmt=fmt, label=label,
capsize=2, markersize=markersize, zorder=zorder)
# Formatting
plt.xlabel("Number of Sensations")
plt.ylabel("Cumulative Accuracy")
plt.xticks([(i+1) for i in xrange(numSteps)])
# Remove the errorbars from the legend.
handles, labels = plt.gca().get_legend_handles_labels()
handles = [h[0] for h in handles]
plt.legend(handles, labels, loc="center right", bbox_to_anchor=legendPosition)
outFilePath = os.path.join(CHART_DIR, outFilename)
print "Saving", outFilePath
plt.savefig(outFilePath)
plt.clf()
if __name__ == "__main__":
plt.rc("font",**{"family": "sans-serif",
"sans-serif": ["Arial"],
"size": 8})
parser = argparse.ArgumentParser()
parser.add_argument("--inFile", type=str, required=True)
parser.add_argument("--outFile", type=str, required=True)
parser.add_argument("--locationModuleWidth", type=int, nargs='+',
default=[17, 20, 40])
parser.add_argument("--legendPosition", type=float, nargs=2, default=None)
args = parser.parse_args()
createChart(args.inFile, args.outFile, args.locationModuleWidth, args.legendPosition)
| gpl-3.0 |
wdecoster/NanoPlot | nanoplot/report.py | 1 | 8612 | import pandas as pd
import numpy as np
class BarcodeTitle(object):
"""Bit of a dummy class to add barcode titles to the report"""
def __init__(self, title):
self.title = title.upper()
def encode(self):
return ""
def chunks(values, chunks):
if values:
chunksize = int(len(values) / chunks)
return ([' '.join(values[i:i + chunksize]) for i in range(0, len(values), chunksize)])
else:
return [" "] * chunks
def html_stats(settings):
statsfile = settings["statsfile"]
filtered = settings["filtered"]
as_tsv = settings['tsv_stats']
stats_html = []
stats_html.append('<main class="grid-main"><h2>NanoPlot reports</h2>')
if filtered:
stats_html.append('<h3 id="stats0">Summary statistics prior to filtering</h3>')
if as_tsv:
stats_html.append(statsfile[0].to_html())
stats_html.append('<h3 id="stats1">Summary statistics after filtering</h3>')
stats_html.append(statsfile[1].to_html())
else:
stats_html.append(stats2html(statsfile[0]))
stats_html.append('<h3 id="stats1">Summary statistics after filtering</h3>')
stats_html.append(stats2html(statsfile[1]))
else:
stats_html.append('<h3 id="stats0">Summary statistics</h3>')
if as_tsv:
stats_html.append(statsfile[0].to_html())
else:
stats_html.append(stats2html(statsfile[0]))
return '\n'.join(stats_html)
def stats2html(statsf):
df = pd.read_csv(statsf, sep=':', header=None, names=['feature', 'value'])
values = df["value"].str.strip().str.replace('\t', ' ').str.split().replace(np.nan, '')
num = len(values[0]) or 1
v = [chunks(i, num) for i in values]
df = pd.DataFrame(v, index=df["feature"])
df.columns.name = None
df.index.name = None
return df.to_html(header=False)
def html_toc(plots, filtered=False):
toc = []
toc.append('<h1 class="hiddentitle">NanoPlot statistics report</h1>')
toc.append('<header class="grid-header"><nav><h2 class="hiddentitle">Menu</h2><ul>')
if filtered:
toc.append(
'<li><a href="#stats0">Summary Statistics prior to filtering</a></li>')
toc.append(
'<li><a href="#stats1">Summary Statistics after filtering</a></li>')
else:
toc.append('<li><a href="#stats0">Summary Statistics</a></li>')
toc.append('<li class="submenu"><a href="#plots" class="submenubtn">Plots</a>')
toc.append('<ul class="submenu-items">')
toc.extend(['<li><a href="#'
+ p.title.replace(' ', '_') + '">' + p.title + '</a></li>' for p in plots])
toc.append('</ul>')
toc.append('</li>')
toc.append(
'<li class="issue-btn"><a href="https://github.com/wdecoster/NanoPlot/issues" target="_blank" class="reporting">Report issue on Github</a></li>')
toc.append('</ul></nav></header>')
return '\n'.join(toc)
def html_plots(plots):
html_plots = []
html_plots.append('<h3 id="plots">Plots</h3>')
for plot in plots:
html_plots.append('<button class="collapsible">' + plot.title + '</button>')
html_plots.append('<section class="collapsible-content"><h4 class="hiddentitle" id="' +
plot.title.replace(' ', '_') + '">' + plot.title + '</h4>')
html_plots.append(plot.encode())
html_plots.append('</section>')
html_plots.append(
'<script>var coll = document.getElementsByClassName("collapsible");var i;for (i = 0; i < coll.length; i++) {coll[i].addEventListener("click", function() {this.classList.toggle("active");var content = this.nextElementSibling;if (content.style.display === "none") {content.style.display = "block";} else {content.style.display = "none";}});}</script>')
return '\n'.join(html_plots)
def run_info(settings):
html_info = []
html_info.append('<h5>Run Info</h5>\n')
html_info.append('<h6>Data source:</h6>\n')
for k in ["fastq", "fasta", "fastq_rich", "fastq_minimal", "summary",
"bam", "ubam", "cram", "pickle", "feather"]:
html_info.append(f"{k}:\t{settings[k]}<br>")
html_info.append('<h6>Filtering parameters:</h6>\n')
for k in ['maxlength', 'minlength', 'drop_outliers', 'downsample', 'loglength',
'percentqual', 'alength', 'minqual', 'runtime_until', 'no_supplementary']:
html_info.append(f"{k}:\t{settings[k]}<br>")
# html_info.append('</p>')
return '\n'.join(html_info)
html_head = """
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<style>
body {margin:0}
.grid { /* grid definition for index page */
display: grid;
grid-template-areas: 'gheader'
'gmain';
margin: 0;
}
.grid > .grid-header { /* definition of the header on index page and its position in the grid */
grid-area: gheader;
}
.grid > .grid-main { /* definition of the main content on index page and its position in the grid */
grid-area: gmain;
}
nav {
text-align: center;
}
ul {
border-bottom: 1px solid white;
font-family: "Trebuchet MS", sans-serif;
list-style-type: none; /* remove dot symbols from list */
margin: 0;
padding: 0;
overflow: hidden; /* contains the overflow of the element if it goes 'out of bounds' */
background-color: #001f3f;
font-size: 1.6em;
}
ul > li > ul {
font-size: 1em;
}
li {
float: left; /* floats the list items to the left side of the page */
}
li a, .submenubutton {
display: inline-block; /* display the list items inline block so the items are vertically displayed */
color: white;
text-align: center;
padding: 14px 16px;
text-decoration: none; /* removes the underline that comes with the a tag */
}
li a:hover, .submenu:hover .submenubutton { /* when you hover over a submenu item the bkgrnd color is gray */
background-color: #39CCCC;
}
.submenu {
display: inline-block; /* idem to above, list items are displayed underneath each other */
}
.submenu-items { /* hides the ul */
display: none;
position: absolute;
background-color: #f9f9f9;
min-width: 160px;
z-index: 1;
}
.submenu-items li {
display: block;
float: none;
overflow: hidden;
}
.submenu-items li a { /* styling of the links in the submenu */
color: black;
padding: 12px 16px;
text-decoration: none;
display: block;
text-align: left;
}
.submenu-items a:hover {
background-color: #f1f1f1;
}
.submenu:hover .submenu-items {
display: block;
float: bottom;
overflow: hidden;
}
li {
border-right: 1px solid #bbb;
}
.issue-btn {
border-right: none;
float: right;
}
.hiddentitle { /* hides titles that are not necessary for content, but are for outline */
position: absolute;
width: 1px;
height: 1px;
overflow: hidden;
left: -10000px;
}
h2 { color: #111; font-family: 'Helvetica Neue', sans-serif; font-size: 60px; font-weight: bold; letter-spacing: -1px; line-height: 1; text-align: center; }
h3 { color: #111; font-family: 'Open Sans', sans-serif; font-size: 25px; font-weight: 300; line-height: 32px; text-align: center; padding-bottom: 0;}
h4 { color: #111; font-family: 'Helvetica Neue', sans-serif; font-size: 16px; font-weight: 150; margin: 0 0 0 0; text-align: left; padding:20px 0px 20px 0px;}
table {
font-family: Arial, Helvetica, sans-serif;
border-collapse: collapse;
table-layout: auto;
border-collapse: collapse;
width: 100%;
}
table td, table th {
border: 1px solid #ddd;
padding: 8px;
}
table tr:nth-child(even){background-color: #f2f2f2;}
table tr:hover {background-color: #ddd;}
/* Style the button that is used to open and close the collapsible content */
.collapsible {
background-color: #39CCCC;
color: white;
cursor: pointer;
padding: 18px;
width: 100%;
border: none;
text-align: left;
outline: none;
font-size: 15px;
}
/* Add a background color to the button if it is clicked on (add the .active class with JS), and when you move the mouse over it (hover) */
.active, .collapsible:hover {
color:white;
background-color: #001f3f;
}
/* Style the collapsible content. Note: hidden by default */
.collapsible-content {
padding: 0 18px;
display: block;
overflow: hidden;
background-color: #FFFFFF;
text-align: center;
}
.collapsible:after {
content: '-';
font-size: 20px;
font-weight: bold;
float: right;
color:white;
margin-left: 5px;
}
.active:after {
content: '+'; /* Unicode character for "minus" sign (-) */
color: white;
}
</style>
<title>NanoPlot Report</title>
</head>
"""
| gpl-3.0 |
pydata/xarray | xarray/coding/cftime_offsets.py | 1 | 36290 | """Time offset classes for use with cftime.datetime objects"""
# The offset classes and mechanisms for generating time ranges defined in
# this module were copied/adapted from those defined in pandas. See in
# particular the objects and methods defined in pandas.tseries.offsets
# and pandas.core.indexes.datetimes.
# For reference, here is a copy of the pandas copyright notice:
# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team
# All rights reserved.
# Copyright (c) 2008-2011 AQR Capital Management, LLC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the copyright holder nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from datetime import timedelta
from distutils.version import LooseVersion
from functools import partial
from typing import ClassVar, Optional
import numpy as np
from ..core.pdcompat import count_not_none
from .cftimeindex import CFTimeIndex, _parse_iso8601_with_reso
from .times import format_cftime_datetime
def get_date_type(calendar):
"""Return the cftime date type for a given calendar name."""
try:
import cftime
except ImportError:
raise ImportError("cftime is required for dates with non-standard calendars")
else:
calendars = {
"noleap": cftime.DatetimeNoLeap,
"360_day": cftime.Datetime360Day,
"365_day": cftime.DatetimeNoLeap,
"366_day": cftime.DatetimeAllLeap,
"gregorian": cftime.DatetimeGregorian,
"proleptic_gregorian": cftime.DatetimeProlepticGregorian,
"julian": cftime.DatetimeJulian,
"all_leap": cftime.DatetimeAllLeap,
"standard": cftime.DatetimeGregorian,
}
return calendars[calendar]
class BaseCFTimeOffset:
_freq: ClassVar[Optional[str]] = None
_day_option: ClassVar[Optional[str]] = None
def __init__(self, n=1):
if not isinstance(n, int):
raise TypeError(
"The provided multiple 'n' must be an integer. "
"Instead a value of type {!r} was provided.".format(type(n))
)
self.n = n
def rule_code(self):
return self._freq
def __eq__(self, other):
return self.n == other.n and self.rule_code() == other.rule_code()
def __ne__(self, other):
return not self == other
def __add__(self, other):
return self.__apply__(other)
def __sub__(self, other):
import cftime
if isinstance(other, cftime.datetime):
raise TypeError("Cannot subtract a cftime.datetime from a time offset.")
elif type(other) == type(self):
return type(self)(self.n - other.n)
else:
return NotImplemented
def __mul__(self, other):
return type(self)(n=other * self.n)
def __neg__(self):
return self * -1
def __rmul__(self, other):
return self.__mul__(other)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
if isinstance(other, BaseCFTimeOffset) and type(self) != type(other):
raise TypeError("Cannot subtract cftime offsets of differing types")
return -self + other
def __apply__(self):
return NotImplemented
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
test_date = (self + date) - self
return date == test_date
def rollforward(self, date):
if self.onOffset(date):
return date
else:
return date + type(self)()
def rollback(self, date):
if self.onOffset(date):
return date
else:
return date - type(self)()
def __str__(self):
return "<{}: n={}>".format(type(self).__name__, self.n)
def __repr__(self):
return str(self)
def _get_offset_day(self, other):
# subclass must implement `_day_option`; calling from the base class
# will raise NotImplementedError.
return _get_day_of_month(other, self._day_option)
def _get_day_of_month(other, day_option):
"""Find the day in `other`'s month that satisfies a BaseCFTimeOffset's
onOffset policy, as described by the `day_option` argument.
Parameters
----------
other : cftime.datetime
day_option : 'start', 'end'
'start': returns 1
'end': returns last day of the month
Returns
-------
day_of_month : int
"""
if day_option == "start":
return 1
elif day_option == "end":
return _days_in_month(other)
elif day_option is None:
# Note: unlike `_shift_month`, _get_day_of_month does not
# allow day_option = None
raise NotImplementedError()
else:
raise ValueError(day_option)
def _days_in_month(date):
"""The number of days in the month of the given date"""
if date.month == 12:
reference = type(date)(date.year + 1, 1, 1)
else:
reference = type(date)(date.year, date.month + 1, 1)
return (reference - timedelta(days=1)).day
def _adjust_n_months(other_day, n, reference_day):
"""Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided.
"""
if n > 0 and other_day < reference_day:
n = n - 1
elif n <= 0 and other_day > reference_day:
n = n + 1
return n
def _adjust_n_years(other, n, month, reference_day):
"""Adjust the number of times an annual offset is applied based on
another date, and the reference day provided"""
if n > 0:
if other.month < month or (other.month == month and other.day < reference_day):
n -= 1
else:
if other.month > month or (other.month == month and other.day > reference_day):
n += 1
return n
def _shift_month(date, months, day_option="start"):
"""Shift the date to a month start or end a given number of months away."""
import cftime
delta_year = (date.month + months) // 12
month = (date.month + months) % 12
if month == 0:
month = 12
delta_year = delta_year - 1
year = date.year + delta_year
if day_option == "start":
day = 1
elif day_option == "end":
reference = type(date)(year, month, 1)
day = _days_in_month(reference)
else:
raise ValueError(day_option)
if LooseVersion(cftime.__version__) < LooseVersion("1.0.4"):
# dayofwk=-1 is required to update the dayofwk and dayofyr attributes of
# the returned date object in versions of cftime between 1.0.2 and
# 1.0.3.4. It can be removed for versions of cftime greater than
# 1.0.3.4.
return date.replace(year=year, month=month, day=day, dayofwk=-1)
else:
return date.replace(year=year, month=month, day=day)
def roll_qtrday(other, n, month, day_option, modby=3):
"""Possibly increment or decrement the number of periods to shift
based on rollforward/rollbackward conventions.
Parameters
----------
other : cftime.datetime
n : number of periods to increment, before adjusting for rolling
month : int reference month giving the first month of the year
day_option : 'start', 'end'
The convention to use in finding the day in a given month against
which to compare for rollforward/rollbackward decisions.
modby : int 3 for quarters, 12 for years
Returns
-------
n : int number of periods to increment
See Also
--------
_get_day_of_month : Find the day in a month provided an offset.
"""
months_since = other.month % modby - month % modby
if n > 0:
if months_since < 0 or (
months_since == 0 and other.day < _get_day_of_month(other, day_option)
):
# pretend to roll back if on same month but
# before compare_day
n -= 1
else:
if months_since > 0 or (
months_since == 0 and other.day > _get_day_of_month(other, day_option)
):
# make sure to roll forward, so negate
n += 1
return n
def _validate_month(month, default_month):
result_month = default_month if month is None else month
if not isinstance(result_month, int):
raise TypeError(
"'self.month' must be an integer value between 1 "
"and 12. Instead, it was set to a value of "
"{!r}".format(result_month)
)
elif not (1 <= result_month <= 12):
raise ValueError(
"'self.month' must be an integer value between 1 "
"and 12. Instead, it was set to a value of "
"{!r}".format(result_month)
)
return result_month
class MonthBegin(BaseCFTimeOffset):
_freq = "MS"
def __apply__(self, other):
n = _adjust_n_months(other.day, self.n, 1)
return _shift_month(other, n, "start")
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
return date.day == 1
class MonthEnd(BaseCFTimeOffset):
_freq = "M"
def __apply__(self, other):
n = _adjust_n_months(other.day, self.n, _days_in_month(other))
return _shift_month(other, n, "end")
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
return date.day == _days_in_month(date)
_MONTH_ABBREVIATIONS = {
1: "JAN",
2: "FEB",
3: "MAR",
4: "APR",
5: "MAY",
6: "JUN",
7: "JUL",
8: "AUG",
9: "SEP",
10: "OCT",
11: "NOV",
12: "DEC",
}
class QuarterOffset(BaseCFTimeOffset):
"""Quarter representation copied off of pandas/tseries/offsets.py"""
_freq: ClassVar[str]
_default_month: ClassVar[int]
def __init__(self, n=1, month=None):
BaseCFTimeOffset.__init__(self, n)
self.month = _validate_month(month, self._default_month)
def __apply__(self, other):
# months_since: find the calendar quarter containing other.month,
# e.g. if other.month == 8, the calendar quarter is [Jul, Aug, Sep].
# Then find the month in that quarter containing an onOffset date for
# self. `months_since` is the number of months to shift other.month
# to get to this on-offset month.
months_since = other.month % 3 - self.month % 3
qtrs = roll_qtrday(
other, self.n, self.month, day_option=self._day_option, modby=3
)
months = qtrs * 3 - months_since
return _shift_month(other, months, self._day_option)
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
mod_month = (date.month - self.month) % 3
return mod_month == 0 and date.day == self._get_offset_day(date)
def __sub__(self, other):
import cftime
if isinstance(other, cftime.datetime):
raise TypeError("Cannot subtract cftime.datetime from offset.")
elif type(other) == type(self) and other.month == self.month:
return type(self)(self.n - other.n, month=self.month)
else:
return NotImplemented
def __mul__(self, other):
return type(self)(n=other * self.n, month=self.month)
def rule_code(self):
return "{}-{}".format(self._freq, _MONTH_ABBREVIATIONS[self.month])
def __str__(self):
return "<{}: n={}, month={}>".format(type(self).__name__, self.n, self.month)
class QuarterBegin(QuarterOffset):
# When converting a string to an offset, pandas converts
# 'QS' to a QuarterBegin offset starting in the month of
# January. When creating a QuarterBegin offset directly
# from the constructor, however, the default month is March.
# We follow that behavior here.
_default_month = 3
_freq = "QS"
_day_option = "start"
def rollforward(self, date):
"""Roll date forward to nearest start of quarter"""
if self.onOffset(date):
return date
else:
return date + QuarterBegin(month=self.month)
def rollback(self, date):
"""Roll date backward to nearest start of quarter"""
if self.onOffset(date):
return date
else:
return date - QuarterBegin(month=self.month)
class QuarterEnd(QuarterOffset):
# When converting a string to an offset, pandas converts
# 'Q' to a QuarterEnd offset starting in the month of
# December. When creating a QuarterEnd offset directly
# from the constructor, however, the default month is March.
# We follow that behavior here.
_default_month = 3
_freq = "Q"
_day_option = "end"
def rollforward(self, date):
"""Roll date forward to nearest end of quarter"""
if self.onOffset(date):
return date
else:
return date + QuarterEnd(month=self.month)
def rollback(self, date):
"""Roll date backward to nearest end of quarter"""
if self.onOffset(date):
return date
else:
return date - QuarterEnd(month=self.month)
class YearOffset(BaseCFTimeOffset):
_freq: ClassVar[str]
_day_option: ClassVar[str]
_default_month: ClassVar[int]
def __init__(self, n=1, month=None):
BaseCFTimeOffset.__init__(self, n)
self.month = _validate_month(month, self._default_month)
def __apply__(self, other):
reference_day = _get_day_of_month(other, self._day_option)
years = _adjust_n_years(other, self.n, self.month, reference_day)
months = years * 12 + (self.month - other.month)
return _shift_month(other, months, self._day_option)
def __sub__(self, other):
import cftime
if isinstance(other, cftime.datetime):
raise TypeError("Cannot subtract cftime.datetime from offset.")
elif type(other) == type(self) and other.month == self.month:
return type(self)(self.n - other.n, month=self.month)
else:
return NotImplemented
def __mul__(self, other):
return type(self)(n=other * self.n, month=self.month)
def rule_code(self):
return "{}-{}".format(self._freq, _MONTH_ABBREVIATIONS[self.month])
def __str__(self):
return "<{}: n={}, month={}>".format(type(self).__name__, self.n, self.month)
class YearBegin(YearOffset):
_freq = "AS"
_day_option = "start"
_default_month = 1
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
return date.day == 1 and date.month == self.month
def rollforward(self, date):
"""Roll date forward to nearest start of year"""
if self.onOffset(date):
return date
else:
return date + YearBegin(month=self.month)
def rollback(self, date):
"""Roll date backward to nearest start of year"""
if self.onOffset(date):
return date
else:
return date - YearBegin(month=self.month)
class YearEnd(YearOffset):
_freq = "A"
_day_option = "end"
_default_month = 12
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
return date.day == _days_in_month(date) and date.month == self.month
def rollforward(self, date):
"""Roll date forward to nearest end of year"""
if self.onOffset(date):
return date
else:
return date + YearEnd(month=self.month)
def rollback(self, date):
"""Roll date backward to nearest end of year"""
if self.onOffset(date):
return date
else:
return date - YearEnd(month=self.month)
class Day(BaseCFTimeOffset):
_freq = "D"
def as_timedelta(self):
return timedelta(days=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
class Hour(BaseCFTimeOffset):
_freq = "H"
def as_timedelta(self):
return timedelta(hours=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
class Minute(BaseCFTimeOffset):
_freq = "T"
def as_timedelta(self):
return timedelta(minutes=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
class Second(BaseCFTimeOffset):
_freq = "S"
def as_timedelta(self):
return timedelta(seconds=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
class Millisecond(BaseCFTimeOffset):
_freq = "L"
def as_timedelta(self):
return timedelta(milliseconds=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
class Microsecond(BaseCFTimeOffset):
_freq = "U"
def as_timedelta(self):
return timedelta(microseconds=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
_FREQUENCIES = {
"A": YearEnd,
"AS": YearBegin,
"Y": YearEnd,
"YS": YearBegin,
"Q": partial(QuarterEnd, month=12),
"QS": partial(QuarterBegin, month=1),
"M": MonthEnd,
"MS": MonthBegin,
"D": Day,
"H": Hour,
"T": Minute,
"min": Minute,
"S": Second,
"L": Millisecond,
"ms": Millisecond,
"U": Microsecond,
"us": Microsecond,
"AS-JAN": partial(YearBegin, month=1),
"AS-FEB": partial(YearBegin, month=2),
"AS-MAR": partial(YearBegin, month=3),
"AS-APR": partial(YearBegin, month=4),
"AS-MAY": partial(YearBegin, month=5),
"AS-JUN": partial(YearBegin, month=6),
"AS-JUL": partial(YearBegin, month=7),
"AS-AUG": partial(YearBegin, month=8),
"AS-SEP": partial(YearBegin, month=9),
"AS-OCT": partial(YearBegin, month=10),
"AS-NOV": partial(YearBegin, month=11),
"AS-DEC": partial(YearBegin, month=12),
"A-JAN": partial(YearEnd, month=1),
"A-FEB": partial(YearEnd, month=2),
"A-MAR": partial(YearEnd, month=3),
"A-APR": partial(YearEnd, month=4),
"A-MAY": partial(YearEnd, month=5),
"A-JUN": partial(YearEnd, month=6),
"A-JUL": partial(YearEnd, month=7),
"A-AUG": partial(YearEnd, month=8),
"A-SEP": partial(YearEnd, month=9),
"A-OCT": partial(YearEnd, month=10),
"A-NOV": partial(YearEnd, month=11),
"A-DEC": partial(YearEnd, month=12),
"QS-JAN": partial(QuarterBegin, month=1),
"QS-FEB": partial(QuarterBegin, month=2),
"QS-MAR": partial(QuarterBegin, month=3),
"QS-APR": partial(QuarterBegin, month=4),
"QS-MAY": partial(QuarterBegin, month=5),
"QS-JUN": partial(QuarterBegin, month=6),
"QS-JUL": partial(QuarterBegin, month=7),
"QS-AUG": partial(QuarterBegin, month=8),
"QS-SEP": partial(QuarterBegin, month=9),
"QS-OCT": partial(QuarterBegin, month=10),
"QS-NOV": partial(QuarterBegin, month=11),
"QS-DEC": partial(QuarterBegin, month=12),
"Q-JAN": partial(QuarterEnd, month=1),
"Q-FEB": partial(QuarterEnd, month=2),
"Q-MAR": partial(QuarterEnd, month=3),
"Q-APR": partial(QuarterEnd, month=4),
"Q-MAY": partial(QuarterEnd, month=5),
"Q-JUN": partial(QuarterEnd, month=6),
"Q-JUL": partial(QuarterEnd, month=7),
"Q-AUG": partial(QuarterEnd, month=8),
"Q-SEP": partial(QuarterEnd, month=9),
"Q-OCT": partial(QuarterEnd, month=10),
"Q-NOV": partial(QuarterEnd, month=11),
"Q-DEC": partial(QuarterEnd, month=12),
}
_FREQUENCY_CONDITION = "|".join(_FREQUENCIES.keys())
_PATTERN = fr"^((?P<multiple>\d+)|())(?P<freq>({_FREQUENCY_CONDITION}))$"
# pandas defines these offsets as "Tick" objects, which for instance have
# distinct behavior from monthly or longer frequencies in resample.
CFTIME_TICKS = (Day, Hour, Minute, Second)
def to_offset(freq):
"""Convert a frequency string to the appropriate subclass of
BaseCFTimeOffset."""
if isinstance(freq, BaseCFTimeOffset):
return freq
else:
try:
freq_data = re.match(_PATTERN, freq).groupdict()
except AttributeError:
raise ValueError("Invalid frequency string provided")
freq = freq_data["freq"]
multiples = freq_data["multiple"]
multiples = 1 if multiples is None else int(multiples)
return _FREQUENCIES[freq](n=multiples)
def to_cftime_datetime(date_str_or_date, calendar=None):
import cftime
if isinstance(date_str_or_date, str):
if calendar is None:
raise ValueError(
"If converting a string to a cftime.datetime object, "
"a calendar type must be provided"
)
date, _ = _parse_iso8601_with_reso(get_date_type(calendar), date_str_or_date)
return date
elif isinstance(date_str_or_date, cftime.datetime):
return date_str_or_date
else:
raise TypeError(
"date_str_or_date must be a string or a "
"subclass of cftime.datetime. Instead got "
"{!r}.".format(date_str_or_date)
)
def normalize_date(date):
"""Round datetime down to midnight."""
return date.replace(hour=0, minute=0, second=0, microsecond=0)
def _maybe_normalize_date(date, normalize):
"""Round datetime down to midnight if normalize is True."""
if normalize:
return normalize_date(date)
else:
return date
def _generate_linear_range(start, end, periods):
"""Generate an equally-spaced sequence of cftime.datetime objects between
and including two dates (whose length equals the number of periods)."""
import cftime
total_seconds = (end - start).total_seconds()
values = np.linspace(0.0, total_seconds, periods, endpoint=True)
units = "seconds since {}".format(format_cftime_datetime(start))
calendar = start.calendar
return cftime.num2date(
values, units=units, calendar=calendar, only_use_cftime_datetimes=True
)
def _generate_range(start, end, periods, offset):
"""Generate a regular range of cftime.datetime objects with a
given time offset.
Adapted from pandas.tseries.offsets.generate_range.
Parameters
----------
start : cftime.datetime, or None
Start of range
end : cftime.datetime, or None
End of range
periods : int, or None
Number of elements in the sequence
offset : BaseCFTimeOffset
An offset class designed for working with cftime.datetime objects
Returns
-------
A generator object
"""
if start:
start = offset.rollforward(start)
if end:
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
current = start
if offset.n >= 0:
while current <= end:
yield current
next_date = current + offset
if next_date <= current:
raise ValueError(f"Offset {offset} did not increment date")
current = next_date
else:
while current >= end:
yield current
next_date = current + offset
if next_date >= current:
raise ValueError(f"Offset {offset} did not decrement date")
current = next_date
def cftime_range(
start=None,
end=None,
periods=None,
freq="D",
normalize=False,
name=None,
closed=None,
calendar="standard",
):
"""Return a fixed frequency CFTimeIndex.
Parameters
----------
start : str or cftime.datetime, optional
Left bound for generating dates.
end : str or cftime.datetime, optional
Right bound for generating dates.
periods : int, optional
Number of periods to generate.
freq : str or None, default: "D"
Frequency strings can have multiples, e.g. "5H".
normalize : bool, default: False
Normalize start/end dates to midnight before generating date range.
name : str, default: None
Name of the resulting index
closed : {"left", "right"} or None, default: None
Make the interval closed with respect to the given frequency to the
"left", "right", or both sides (None).
calendar : str, default: "standard"
Calendar type for the datetimes.
Returns
-------
CFTimeIndex
Notes
-----
This function is an analog of ``pandas.date_range`` for use in generating
sequences of ``cftime.datetime`` objects. It supports most of the
features of ``pandas.date_range`` (e.g. specifying how the index is
``closed`` on either side, or whether or not to ``normalize`` the start and
end bounds); however, there are some notable exceptions:
- You cannot specify a ``tz`` (time zone) argument.
- Start or end dates specified as partial-datetime strings must use the
`ISO-8601 format <https://en.wikipedia.org/wiki/ISO_8601>`_.
- It supports many, but not all, frequencies supported by
``pandas.date_range``. For example it does not currently support any of
the business-related or semi-monthly frequencies.
- Compound sub-monthly frequencies are not supported, e.g. '1H1min', as
these can easily be written in terms of the finest common resolution,
e.g. '61min'.
Valid simple frequency strings for use with ``cftime``-calendars include
any multiples of the following.
+--------+--------------------------+
| Alias | Description |
+========+==========================+
| A, Y | Year-end frequency |
+--------+--------------------------+
| AS, YS | Year-start frequency |
+--------+--------------------------+
| Q | Quarter-end frequency |
+--------+--------------------------+
| QS | Quarter-start frequency |
+--------+--------------------------+
| M | Month-end frequency |
+--------+--------------------------+
| MS | Month-start frequency |
+--------+--------------------------+
| D | Day frequency |
+--------+--------------------------+
| H | Hour frequency |
+--------+--------------------------+
| T, min | Minute frequency |
+--------+--------------------------+
| S | Second frequency |
+--------+--------------------------+
| L, ms | Millisecond frequency |
+--------+--------------------------+
| U, us | Microsecond frequency |
+--------+--------------------------+
Any multiples of the following anchored offsets are also supported.
+----------+--------------------------------------------------------------------+
| Alias | Description |
+==========+====================================================================+
| A(S)-JAN | Annual frequency, anchored at the end (or beginning) of January |
+----------+--------------------------------------------------------------------+
| A(S)-FEB | Annual frequency, anchored at the end (or beginning) of February |
+----------+--------------------------------------------------------------------+
| A(S)-MAR | Annual frequency, anchored at the end (or beginning) of March |
+----------+--------------------------------------------------------------------+
| A(S)-APR | Annual frequency, anchored at the end (or beginning) of April |
+----------+--------------------------------------------------------------------+
| A(S)-MAY | Annual frequency, anchored at the end (or beginning) of May |
+----------+--------------------------------------------------------------------+
| A(S)-JUN | Annual frequency, anchored at the end (or beginning) of June |
+----------+--------------------------------------------------------------------+
| A(S)-JUL | Annual frequency, anchored at the end (or beginning) of July |
+----------+--------------------------------------------------------------------+
| A(S)-AUG | Annual frequency, anchored at the end (or beginning) of August |
+----------+--------------------------------------------------------------------+
| A(S)-SEP | Annual frequency, anchored at the end (or beginning) of September |
+----------+--------------------------------------------------------------------+
| A(S)-OCT | Annual frequency, anchored at the end (or beginning) of October |
+----------+--------------------------------------------------------------------+
| A(S)-NOV | Annual frequency, anchored at the end (or beginning) of November |
+----------+--------------------------------------------------------------------+
| A(S)-DEC | Annual frequency, anchored at the end (or beginning) of December |
+----------+--------------------------------------------------------------------+
| Q(S)-JAN | Quarter frequency, anchored at the end (or beginning) of January |
+----------+--------------------------------------------------------------------+
| Q(S)-FEB | Quarter frequency, anchored at the end (or beginning) of February |
+----------+--------------------------------------------------------------------+
| Q(S)-MAR | Quarter frequency, anchored at the end (or beginning) of March |
+----------+--------------------------------------------------------------------+
| Q(S)-APR | Quarter frequency, anchored at the end (or beginning) of April |
+----------+--------------------------------------------------------------------+
| Q(S)-MAY | Quarter frequency, anchored at the end (or beginning) of May |
+----------+--------------------------------------------------------------------+
| Q(S)-JUN | Quarter frequency, anchored at the end (or beginning) of June |
+----------+--------------------------------------------------------------------+
| Q(S)-JUL | Quarter frequency, anchored at the end (or beginning) of July |
+----------+--------------------------------------------------------------------+
| Q(S)-AUG | Quarter frequency, anchored at the end (or beginning) of August |
+----------+--------------------------------------------------------------------+
| Q(S)-SEP | Quarter frequency, anchored at the end (or beginning) of September |
+----------+--------------------------------------------------------------------+
| Q(S)-OCT | Quarter frequency, anchored at the end (or beginning) of October |
+----------+--------------------------------------------------------------------+
| Q(S)-NOV | Quarter frequency, anchored at the end (or beginning) of November |
+----------+--------------------------------------------------------------------+
| Q(S)-DEC | Quarter frequency, anchored at the end (or beginning) of December |
+----------+--------------------------------------------------------------------+
Finally, the following calendar aliases are supported.
+--------------------------------+---------------------------------------+
| Alias | Date type |
+================================+=======================================+
| standard, gregorian | ``cftime.DatetimeGregorian`` |
+--------------------------------+---------------------------------------+
| proleptic_gregorian | ``cftime.DatetimeProlepticGregorian`` |
+--------------------------------+---------------------------------------+
| noleap, 365_day | ``cftime.DatetimeNoLeap`` |
+--------------------------------+---------------------------------------+
| all_leap, 366_day | ``cftime.DatetimeAllLeap`` |
+--------------------------------+---------------------------------------+
| 360_day | ``cftime.Datetime360Day`` |
+--------------------------------+---------------------------------------+
| julian | ``cftime.DatetimeJulian`` |
+--------------------------------+---------------------------------------+
Examples
--------
This function returns a ``CFTimeIndex``, populated with ``cftime.datetime``
objects associated with the specified calendar type, e.g.
>>> xr.cftime_range(start="2000", periods=6, freq="2MS", calendar="noleap")
CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00,
2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00],
dtype='object', length=6, calendar='noleap', freq='2MS')
As in the standard pandas function, three of the ``start``, ``end``,
``periods``, or ``freq`` arguments must be specified at a given time, with
the other set to ``None``. See the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.date_range.html>`_
for more examples of the behavior of ``date_range`` with each of the
parameters.
See Also
--------
pandas.date_range
"""
# Adapted from pandas.core.indexes.datetimes._generate_range.
if count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the arguments 'start', 'end', 'periods', and 'freq', three "
"must be specified at a time."
)
if start is not None:
start = to_cftime_datetime(start, calendar)
start = _maybe_normalize_date(start, normalize)
if end is not None:
end = to_cftime_datetime(end, calendar)
end = _maybe_normalize_date(end, normalize)
if freq is None:
dates = _generate_linear_range(start, end, periods)
else:
offset = to_offset(freq)
dates = np.array(list(_generate_range(start, end, periods, offset)))
left_closed = False
right_closed = False
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed must be either 'left', 'right' or None")
if not left_closed and len(dates) and start is not None and dates[0] == start:
dates = dates[1:]
if not right_closed and len(dates) and end is not None and dates[-1] == end:
dates = dates[:-1]
return CFTimeIndex(dates, name=name)
| apache-2.0 |
kcavagnolo/astroML | astroML/clustering/mst_clustering.py | 3 | 6348 | """
Minimum Spanning Tree Clustering
"""
import numpy as np
from scipy import sparse
from sklearn.neighbors import kneighbors_graph
from sklearn.mixture import GMM
try:
from scipy.sparse.csgraph import \
minimum_spanning_tree, connected_components
except:
raise ValueError("scipy v0.11 or greater required "
"for minimum spanning tree")
class HierarchicalClustering(object):
"""Hierarchical Clustering via Approximate Euclidean Minimum Spanning Tree
Parameters
----------
n_neighbors : int
number of neighbors of each point used for approximate Euclidean
minimum spanning tree (MST) algorithm. See Notes below.
edge_cutoff : float
specify a fraction of edges to keep when selecting clusters.
edge_cutoff should be between 0 and 1.
min_cluster_size : int, optional
specify a minimum number of points per cluster. If not specified,
all clusters will be kept.
Attributes
----------
X_train_ : ndarray
the training data
full_tree_ : sparse graph
the full approximate Euclidean MST spanning the data
cluster_graph_ : sparse graph
the final (truncated) graph showing clusters
n_components_ : int
the number of clusters found.
labels_ : int
the cluster labels for each training point. Labels range from -1
to n_components_ - 1: points labeled -1 are in the background (i.e.
their clusters were smaller than min_cluster_size)
Notes
-----
This routine uses an approximate Euclidean minimum spanning tree (MST)
to perform hierarchical clustering. A true Euclidean minimum spanning
tree naively costs O[N^3]. Graph traversal algorithms only help so much,
because all N^2 edges must be used as candidates. In this approximate
algorithm, we use k < N edges from each point, so that the cost is only
O[Nk log(Nk)]. For k = N, the approximation is exact; in practice for
well-behaved data sets, the result is exact for k << N.
"""
def __init__(self, n_neighbors=20,
edge_cutoff=0.9,
min_cluster_size=1):
self.n_neighbors = n_neighbors
self.edge_cutoff = edge_cutoff
self.min_cluster_size = min_cluster_size
def fit(self, X):
"""Fit the clustering model
Parameters
----------
X : array_like
the data to be clustered: shape = [n_samples, n_features]
"""
X = np.asarray(X, dtype=float)
self.X_train_ = X
# generate a sparse graph using the k nearest neighbors of each point
G = kneighbors_graph(X, n_neighbors=self.n_neighbors, mode='distance')
# Compute the minimum spanning tree of this graph
self.full_tree_ = minimum_spanning_tree(G, overwrite=True)
# Find the cluster labels
self.n_components_, self.labels_, self.cluster_graph_ =\
self.compute_clusters()
return self
def compute_clusters(self, edge_cutoff=None, min_cluster_size=None):
"""Compute the clusters given a trained tree
After fit() is called, this method may be called to obtain a
clustering result with a new edge_cutoff and min_cluster_size.
Parameters
----------
edge_cutoff : float, optional
specify a fraction of edges to keep when selecting clusters.
edge_cutoff should be between 0 and 1. If not specified,
self.edge_cutoff will be used.
min_cluster_size : int, optional
specify a minimum number of points per cluster. If not specified,
self.min_cluster_size will be used.
Returns
-------
n_components : int
the number of clusters found
labels : ndarray
the labels of each point. Labels range from -1 to
n_components_ - 1: points labeled -1 are in the background
(i.e. their clusters were smaller than min_cluster_size)
T_trunc : sparse matrix
the truncated minimum spanning tree
"""
if edge_cutoff is None:
edge_cutoff = self.edge_cutoff
if min_cluster_size is None:
min_cluster_size = self.min_cluster_size
if not hasattr(self, 'full_tree_'):
raise ValueError("must call fit() before calling "
"compute_clusters()")
T_trunc = self.full_tree_.copy()
# cut-off edges at the percentile given by edge_cutoff
cutoff = np.percentile(T_trunc.data, 100 * edge_cutoff)
T_trunc.data[T_trunc.data > cutoff] = 0
T_trunc.eliminate_zeros()
# find connected components
n_components, labels = connected_components(T_trunc, directed=False)
counts = np.bincount(labels)
# for all components with less than min_cluster_size points, set
# to background, and re-label the clusters
i_bg = np.where(counts < min_cluster_size)[0]
for i in i_bg:
labels[labels == i] = -1
if len(i_bg) > 0:
_, labels = np.unique(labels, return_inverse=True)
labels -= 1
n_components = labels.max() + 1
# eliminate links in T_trunc which are not clusters
I = sparse.eye(len(labels), len(labels))
I.data[0, labels < 0] = 0
T_trunc = I * T_trunc * I
return n_components, labels, T_trunc
def get_graph_segments(X, G):
"""Get graph segments for plotting a 2D graph
Parameters
----------
X : array_like
the data, of shape [n_samples, 2]
G : array_like or sparse graph
the [n_samples, n_samples] matrix encoding the graph of connectinons
on X
Returns
-------
x_coords, y_coords : ndarrays
the x and y coordinates for plotting the graph. They are of size
[2, n_links], and can be visualized using
``plt.plot(x_coords, y_coords, '-k')``
"""
X = np.asarray(X)
if (X.ndim != 2) or (X.shape[1] != 2):
raise ValueError('shape of X should be (n_samples, 2)')
n_samples = X.shape[0]
G = sparse.coo_matrix(G)
A = X[G.row].T
B = X[G.col].T
x_coords = np.vstack([A[0], B[0]])
y_coords = np.vstack([A[1], B[1]])
return x_coords, y_coords
| bsd-2-clause |
strongh/GPy | GPy/models/sparse_gplvm.py | 12 | 1818 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import sys
from GPy.models.sparse_gp_regression import SparseGPRegression
class SparseGPLVM(SparseGPRegression):
"""
Sparse Gaussian Process Latent Variable Model
:param Y: observed data
:type Y: np.ndarray
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
"""
def __init__(self, Y, input_dim, X=None, kernel=None, init='PCA', num_inducing=10):
if X is None:
from ..util.initialization import initialize_latent
X, fracs = initialize_latent(init, input_dim, Y)
SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
def parameters_changed(self):
super(SparseGPLVM, self).parameters_changed()
self.X.gradient = self.kern.gradients_X_diag(self.grad_dict['dL_dKdiag'], self.X)
self.X.gradient += self.kern.gradients_X(self.grad_dict['dL_dKnm'], self.X, self.Z)
def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, plot_inducing=True, legend=True,
plot_limits=None,
aspect='auto', updates=False, predict_kwargs={}, imshow_kwargs={}):
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import dim_reduction_plots
return dim_reduction_plots.plot_latent(self, labels, which_indices,
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
| bsd-3-clause |
RomainBrault/operalib | examples/plot_ovk_sparse_quantile_regression.py | 2 | 2715 | """
======================================================
Data sparse quantile regression with operator-valued kernels
======================================================
An example to illustrate data sparse quantile regression with operator-valued
kernels.
We compare quantile regression with several levels of data sparsity.
"""
# Author: Maxime Sangnier <[email protected]>
# License: MIT
# -*- coding: utf-8 -*-
import time
import numpy as np
import matplotlib.pyplot as plt
from operalib import Quantile, toy_data_quantile
def main():
"""Example of multiple quantile regression."""
print("Creating dataset...")
probs = np.linspace(0.1, 0.9, 5) # Quantile levels of interest
x_train, y_train, _ = toy_data_quantile(100, random_state=0)
x_test, y_test, z_test = toy_data_quantile(1000, probs=probs,
random_state=1)
print("Fitting...")
methods = {'Non-sparse QR':
Quantile(probs=probs, kernel='DGauss', lbda=1e-2, gamma=8.,
gamma_quantile=1e-2),
'Sparse QR':
Quantile(probs=probs, kernel='DGauss', lbda=1e-2, gamma=8.,
gamma_quantile=1e-2, eps=2.5)}
# Fit on training data
for name, reg in sorted(methods.items()):
print(name)
start = time.time()
reg.fit(x_train, y_train)
coefs = reg.model_["coefs"].reshape(y_train.size, probs.size)
n_sv = np.sum(np.linalg.norm(coefs, axis=1) *
reg.lbda / y_train.size > 1e-5)
print('%s learning time: %.3f s' % (name, time.time() - start))
print('%s score %.5f' % (name, reg.score(x_test, y_test)))
print('%s num of support vectors %d' % (name, n_sv))
# Plot the estimated conditional quantiles
plt.figure(figsize=(12, 7))
for i, method in enumerate(sorted(methods.keys())):
plt.subplot(2, 2, i + 1)
plt.plot(x_train, y_train, '.')
plt.gca().set_prop_cycle(None)
for quantile in methods[method].predict(x_test):
plt.plot(x_test, quantile, '-')
plt.gca().set_prop_cycle(None)
for prob, quantile in zip(probs, z_test):
plt.plot(x_test, quantile, '--',
label="theoretical {0:0.2f}".format(prob))
plt.title(method)
plt.legend(fontsize=8)
coefs = methods[method].model_["coefs"].reshape(y_train.size,
probs.size)
plt.subplot(2, 2, 2 + i + 1)
plt.plot(np.linalg.norm(coefs, axis=1))
plt.title("Norm of dual coefs for each point")
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
jkarnows/scikit-learn | sklearn/decomposition/nmf.py | 16 | 19101 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
TimoRoth/oggm | oggm/core/climate.py | 2 | 56827 | """Climate data and mass-balance computations"""
# Built ins
import logging
import os
import datetime
import json
import warnings
# External libs
import numpy as np
import xarray as xr
import netCDF4
import pandas as pd
from scipy import stats
from scipy import optimize
# Optional libs
try:
import salem
except ImportError:
pass
# Locals
from oggm import cfg
from oggm import utils
from oggm.core import centerlines
from oggm import entity_task, global_task
from oggm.exceptions import (MassBalanceCalibrationError, InvalidParamsError,
InvalidWorkflowError)
# Module logger
log = logging.getLogger(__name__)
# Parameters
_brentq_xtol = 2e-12
# Climate relevant params
MB_PARAMS = ['temp_default_gradient', 'temp_all_solid', 'temp_all_liq',
'temp_melt', 'prcp_scaling_factor', 'climate_qc_months']
@entity_task(log, writes=['climate_historical'])
def process_custom_climate_data(gdir, y0=None, y1=None,
output_filesuffix=None):
"""Processes and writes the climate data from a user-defined climate file.
The input file must have a specific format (see
https://github.com/OGGM/oggm-sample-data ->test-files/histalp_merged_hef.nc
for an example).
This is the way OGGM used to do it for HISTALP before it got automatised.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
y0 : int
the starting year of the timeseries to write. The default is to take
the entire time period available in the file, but with this kwarg
you can shorten it (to save space or to crop bad data)
y1 : int
the starting year of the timeseries to write. The default is to take
the entire time period available in the file, but with this kwarg
you can shorten it (to save space or to crop bad data)
output_filesuffix : str
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
"""
if not (('climate_file' in cfg.PATHS) and
os.path.exists(cfg.PATHS['climate_file'])):
raise InvalidParamsError('Custom climate file not found')
if cfg.PARAMS['baseline_climate'] not in ['', 'CUSTOM']:
raise InvalidParamsError("When using custom climate data please set "
"PARAMS['baseline_climate'] to an empty "
"string or `CUSTOM`. Note also that you can "
"now use the `process_histalp_data` task for "
"automated HISTALP data processing.")
# read the file
fpath = cfg.PATHS['climate_file']
nc_ts = salem.GeoNetcdf(fpath)
# set temporal subset for the ts data (hydro years)
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
em = sm - 1 if (sm > 1) else 12
yrs = nc_ts.time.year
y0 = yrs[0] if y0 is None else y0
y1 = yrs[-1] if y1 is None else y1
nc_ts.set_period(t0='{}-{:02d}-01'.format(y0, sm),
t1='{}-{:02d}-01'.format(y1, em))
time = nc_ts.time
ny, r = divmod(len(time), 12)
if r != 0:
raise InvalidParamsError('Climate data should be full years')
# Units
assert nc_ts._nc.variables['hgt'].units.lower() in ['m', 'meters', 'meter',
'metres', 'metre']
assert nc_ts._nc.variables['temp'].units.lower() in ['degc', 'degrees',
'degree', 'c']
assert nc_ts._nc.variables['prcp'].units.lower() in ['kg m-2', 'l m-2',
'mm', 'millimeters',
'millimeter']
# geoloc
lon = nc_ts._nc.variables['lon'][:]
lat = nc_ts._nc.variables['lat'][:]
ilon = np.argmin(np.abs(lon - gdir.cenlon))
ilat = np.argmin(np.abs(lat - gdir.cenlat))
ref_pix_lon = lon[ilon]
ref_pix_lat = lat[ilat]
# read the data
temp = nc_ts.get_vardata('temp')
prcp = nc_ts.get_vardata('prcp')
hgt = nc_ts.get_vardata('hgt')
ttemp = temp[:, ilat-1:ilat+2, ilon-1:ilon+2]
itemp = ttemp[:, 1, 1]
thgt = hgt[ilat-1:ilat+2, ilon-1:ilon+2]
ihgt = thgt[1, 1]
thgt = thgt.flatten()
iprcp = prcp[:, ilat, ilon]
nc_ts.close()
# Should we compute the gradient?
use_grad = cfg.PARAMS['temp_use_local_gradient']
igrad = None
if use_grad:
igrad = np.zeros(len(time)) * np.NaN
for t, loct in enumerate(ttemp):
slope, _, _, p_val, _ = stats.linregress(thgt,
loct.flatten())
igrad[t] = slope if (p_val < 0.01) else np.NaN
gdir.write_monthly_climate_file(time, iprcp, itemp, ihgt,
ref_pix_lon, ref_pix_lat,
filesuffix=output_filesuffix,
gradient=igrad,
source=fpath)
@entity_task(log)
def process_climate_data(gdir, y0=None, y1=None, output_filesuffix=None,
**kwargs):
"""Adds the selected climate data to this glacier directory.
Short wrapper deciding on which task to run based on
`cfg.PARAMS['baseline_climate']`.
If you want to make it explicit, simply call the relevant task
(e.g. oggm.shop.cru.process_cru_data).
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
y0 : int
the starting year of the timeseries to write. The default is to take
the entire time period available in the file, but with this kwarg
you can shorten it (to save space or to crop bad data)
y1 : int
the starting year of the timeseries to write. The default is to take
the entire time period available in the file, but with this kwarg
you can shorten it (to save space or to crop bad data)
output_filesuffix : str
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
**kwargs :
any other argument relevant to the task that will be called.
"""
# Which climate should we use?
baseline = cfg.PARAMS['baseline_climate']
if baseline == 'CRU':
from oggm.shop.cru import process_cru_data
process_cru_data(gdir, output_filesuffix=output_filesuffix,
y0=y0, y1=y1, **kwargs)
elif baseline == 'HISTALP':
from oggm.shop.histalp import process_histalp_data
process_histalp_data(gdir, output_filesuffix=output_filesuffix,
y0=y0, y1=y1, **kwargs)
elif baseline in ['ERA5', 'ERA5L', 'CERA', 'ERA5dr']:
from oggm.shop.ecmwf import process_ecmwf_data
process_ecmwf_data(gdir, output_filesuffix=output_filesuffix,
dataset=baseline, y0=y0, y1=y1, **kwargs)
elif '+' in baseline:
# This bit below assumes ECMWF only datasets, but it should be
# quite easy to extend for HISTALP+ERA5L for example
from oggm.shop.ecmwf import process_ecmwf_data
his, ref = baseline.split('+')
s = 'tmp_'
process_ecmwf_data(gdir, output_filesuffix=s+his, dataset=his,
y0=y0, y1=y1, **kwargs)
process_ecmwf_data(gdir, output_filesuffix=s+ref, dataset=ref,
y0=y0, y1=y1, **kwargs)
historical_delta_method(gdir,
ref_filesuffix=s+ref,
hist_filesuffix=s+his,
output_filesuffix=output_filesuffix)
elif '|' in baseline:
from oggm.shop.ecmwf import process_ecmwf_data
his, ref = baseline.split('|')
s = 'tmp_'
process_ecmwf_data(gdir, output_filesuffix=s+his, dataset=his,
y0=y0, y1=y1, **kwargs)
process_ecmwf_data(gdir, output_filesuffix=s+ref, dataset=ref,
y0=y0, y1=y1, **kwargs)
historical_delta_method(gdir,
ref_filesuffix=s+ref,
hist_filesuffix=s+his,
output_filesuffix=output_filesuffix,
replace_with_ref_data=False)
elif baseline == 'CUSTOM':
process_custom_climate_data(gdir, y0=y0, y1=y1,
output_filesuffix=output_filesuffix,
**kwargs)
else:
raise ValueError("cfg.PARAMS['baseline_climate'] not understood")
@entity_task(log, writes=['climate_historical'])
def historical_delta_method(gdir, ref_filesuffix='', hist_filesuffix='',
output_filesuffix='', ref_year_range=None,
delete_input_files=True, scale_stddev=True,
replace_with_ref_data=True):
"""Applies the anomaly method to historical climate data.
This function can be used to prolongate historical time series,
for example by bias-correcting CERA-20C to ERA5 or ERA5-Land.
The timeseries must be already available in the glacier directory
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
where to write the data
ref_filesuffix : str
the filesuffix of the historical climate data to take as reference
hist_filesuffix : str
the filesuffix of the historical climate data to apply to the
reference
output_filesuffix : str
the filesuffix of the output file (usually left empty - i.e. this
file will become the default)
ref_year_range : tuple of str
the year range for which you want to compute the anomalies. The
default is to take the entire reference data period, but you could
also choose `('1961', '1990')` for example
delete_input_files : bool
delete the input files after use - useful for operational runs
where you don't want to carry too many files
scale_stddev : bool
whether or not to scale the temperature standard deviation as well
(you probably want to do that)
replace_with_ref_data : bool
the default is to paste the bias-corrected data where no reference
data is available, i.e. creating timeseries which are not consistent
in time but "better" for recent times (e.g. CERA-20C until 1980,
then ERA5). Set this to False to present this and make a consistent
time series of CERA-20C (but bias corrected to the reference data,
so "better" than CERA-20C out of the box).
"""
if ref_year_range is not None:
raise NotImplementedError()
# Read input
f_ref = gdir.get_filepath('climate_historical', filesuffix=ref_filesuffix)
with xr.open_dataset(f_ref) as ds:
ref_temp = ds['temp']
ref_prcp = ds['prcp']
ref_hgt = float(ds.ref_hgt)
ref_lon = float(ds.ref_pix_lon)
ref_lat = float(ds.ref_pix_lat)
source = ds.attrs.get('climate_source')
f_his = gdir.get_filepath('climate_historical', filesuffix=hist_filesuffix)
with xr.open_dataset(f_his) as ds:
hist_temp = ds['temp']
hist_prcp = ds['prcp']
# To differentiate both cases
if replace_with_ref_data:
source = ds.attrs.get('climate_source') + '+' + source
else:
source = ds.attrs.get('climate_source') + '|' + source
# Common time period
cmn_time = (ref_temp + hist_temp)['time']
assert len(cmn_time) // 12 == len(cmn_time) / 12
# We need an even number of years for this to work
if ((len(cmn_time) // 12) % 2) == 1:
cmn_time = cmn_time.isel(time=slice(12, len(cmn_time)))
assert len(cmn_time) // 12 == len(cmn_time) / 12
assert ((len(cmn_time) // 12) % 2) == 0
cmn_time_range = cmn_time.values[[0, -1]]
# Select ref
sref_temp = ref_temp.sel(time=slice(*cmn_time_range))
sref_prcp = ref_prcp.sel(time=slice(*cmn_time_range))
# See if we need to scale the variability
if scale_stddev:
# This is a bit more arithmetic
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
tmp_sel = hist_temp.sel(time=slice(*cmn_time_range))
tmp_std = tmp_sel.groupby('time.month').std(dim='time')
std_fac = sref_temp.groupby('time.month').std(dim='time') / tmp_std
std_fac = std_fac.roll(month=13-sm, roll_coords=True)
std_fac = np.tile(std_fac.data, len(hist_temp) // 12)
win_size = len(cmn_time) + 1
def roll_func(x, axis=None):
x = x[:, ::12]
n = len(x[0, :]) // 2
xm = np.nanmean(x, axis=axis)
return xm + (x[:, n] - xm) * std_fac
hist_temp = hist_temp.rolling(time=win_size, center=True,
min_periods=1).reduce(roll_func)
# compute monthly anomalies
# of temp
ts_tmp_sel = hist_temp.sel(time=slice(*cmn_time_range))
ts_tmp_avg = ts_tmp_sel.groupby('time.month').mean(dim='time')
ts_tmp = hist_temp.groupby('time.month') - ts_tmp_avg
# of precip -- scaled anomalies
ts_pre_avg = hist_prcp.sel(time=slice(*cmn_time_range))
ts_pre_avg = ts_pre_avg.groupby('time.month').mean(dim='time')
ts_pre_ano = hist_prcp.groupby('time.month') - ts_pre_avg
# scaled anomalies is the default. Standard anomalies above
# are used later for where ts_pre_avg == 0
ts_pre = hist_prcp.groupby('time.month') / ts_pre_avg
# reference averages
# for temp
loc_tmp = sref_temp.groupby('time.month').mean()
ts_tmp = ts_tmp.groupby('time.month') + loc_tmp
# for prcp
loc_pre = sref_prcp.groupby('time.month').mean()
# scaled anomalies
ts_pre = ts_pre.groupby('time.month') * loc_pre
# standard anomalies
ts_pre_ano = ts_pre_ano.groupby('time.month') + loc_pre
# Correct infinite values with standard anomalies
ts_pre.values = np.where(np.isfinite(ts_pre.values),
ts_pre.values,
ts_pre_ano.values)
# The previous step might create negative values (unlikely). Clip them
ts_pre.values = utils.clip_min(ts_pre.values, 0)
assert np.all(np.isfinite(ts_pre.values))
assert np.all(np.isfinite(ts_tmp.values))
if not replace_with_ref_data:
# Just write what we have
gdir.write_monthly_climate_file(ts_tmp.time.values,
ts_pre.values, ts_tmp.values,
ref_hgt, ref_lon, ref_lat,
filesuffix=output_filesuffix,
source=source)
else:
# Select all hist data before the ref
ts_tmp = ts_tmp.sel(time=slice(ts_tmp.time[0], ref_temp.time[0]))
ts_tmp = ts_tmp.isel(time=slice(0, -1))
ts_pre = ts_pre.sel(time=slice(ts_tmp.time[0], ref_temp.time[0]))
ts_pre = ts_pre.isel(time=slice(0, -1))
# Concatenate and write
gdir.write_monthly_climate_file(np.append(ts_pre.time, ref_prcp.time),
np.append(ts_pre, ref_prcp),
np.append(ts_tmp, ref_temp),
ref_hgt, ref_lon, ref_lat,
filesuffix=output_filesuffix,
source=source)
if delete_input_files:
# Delete all files without suffix
if ref_filesuffix:
os.remove(f_ref)
if hist_filesuffix:
os.remove(f_his)
@entity_task(log, writes=['climate_historical'])
def historical_climate_qc(gdir):
"""Check the "quality" of the baseline climate data and correct if needed.
This forces the climate data to have at least N months
(``cfg.PARAMS['climate_qc_months']``) of melt per year
at the terminus of the glacier (i.e. it simply shifts temperatures up
until this condition is reached), and at least N months where accumulation
is possible at the glacier top (i.e. shifting the temperatures down,
only happening if the temperatures were not shifted upwards before).
In practice, this relatively aggressive shift will be compensated by the
calibration, and sensitivity analyses show that the effect is positive
(i.e. less failing glaciers and more realistic temperature sensitivity
parameters) while not affecting the regional results too much.
"""
# Parameters
temp_s = (cfg.PARAMS['temp_all_liq'] + cfg.PARAMS['temp_all_solid']) / 2
temp_m = cfg.PARAMS['temp_melt']
default_grad = cfg.PARAMS['temp_default_gradient']
g_minmax = cfg.PARAMS['temp_local_gradient_bounds']
qc_months = cfg.PARAMS['climate_qc_months']
if qc_months == 0:
return
# Read file
fpath = gdir.get_filepath('climate_historical')
igrad = None
with utils.ncDataset(fpath) as nc:
# time
# Read timeseries
itemp = nc.variables['temp'][:].astype(np.float64)
if 'gradient' in nc.variables:
igrad = nc.variables['gradient'][:].astype(np.float64)
# Security for stuff that can happen with local gradients
igrad = np.where(~np.isfinite(igrad), default_grad, igrad)
igrad = utils.clip_array(igrad, g_minmax[0], g_minmax[1])
ref_hgt = nc.ref_hgt
# Default gradient?
if igrad is None:
igrad = itemp * 0 + default_grad
ny = len(igrad) // 12
assert ny == len(igrad) / 12
# Geometry data
fls = gdir.read_pickle('inversion_flowlines')
heights = np.array([])
for fl in fls:
heights = np.append(heights, fl.surface_h)
top_h = np.max(heights)
bot_h = np.min(heights)
# First check - there should be at least one month of melt every year
prev_ref_hgt = ref_hgt
while True:
ts_bot = itemp + default_grad * (bot_h - ref_hgt)
ts_bot = (ts_bot.reshape((ny, 12)) > temp_m).sum(axis=1)
if np.all(ts_bot >= qc_months):
# Ok all good
break
# put ref hgt a bit higher so that we warm things a bit
ref_hgt += 10
# If we changed this it makes no sense to lower it down again,
# so resume here:
if ref_hgt != prev_ref_hgt:
with utils.ncDataset(fpath, 'a') as nc:
nc.ref_hgt = ref_hgt
nc.uncorrected_ref_hgt = prev_ref_hgt
gdir.add_to_diagnostics('ref_hgt_qc_diff', int(ref_hgt - prev_ref_hgt))
return
# Second check - there should be at least one month of acc every year
while True:
ts_top = itemp + default_grad * (top_h - ref_hgt)
ts_top = (ts_top.reshape((ny, 12)) < temp_s).sum(axis=1)
if np.all(ts_top >= qc_months):
# Ok all good
break
# put ref hgt a bit lower so that we cold things a bit
ref_hgt -= 10
if ref_hgt != prev_ref_hgt:
with utils.ncDataset(fpath, 'a') as nc:
nc.ref_hgt = ref_hgt
nc.uncorrected_ref_hgt = prev_ref_hgt
gdir.add_to_diagnostics('ref_hgt_qc_diff', int(ref_hgt - prev_ref_hgt))
def mb_climate_on_height(gdir, heights, *, time_range=None, year_range=None):
"""Mass-balance climate of the glacier at a specific height
Reads the glacier's monthly climate data file and computes the
temperature "energies" (temp above 0) and solid precipitation at the
required height.
All MB parameters are considered here! (i.e. melt temp, precip scaling
factor, etc.)
Parameters
----------
gdir : GlacierDirectory
the glacier directory
heights: ndarray
a 1D array of the heights (in meter) where you want the data
time_range : [datetime, datetime], optional
default is to read all data but with this you
can provide a [t0, t1] bounds (inclusive).
year_range : [int, int], optional
Provide a [y0, y1] year range to get the data for specific
(hydrological) years only. Easier to use than the time bounds above.
Returns
-------
(time, tempformelt, prcpsol)::
- time: array of shape (nt,)
- tempformelt: array of shape (len(heights), nt)
- prcpsol: array of shape (len(heights), nt)
"""
if year_range is not None:
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
em = sm - 1 if (sm > 1) else 12
t0 = datetime.datetime(year_range[0]-1, sm, 1)
t1 = datetime.datetime(year_range[1], em, 1)
return mb_climate_on_height(gdir, heights, time_range=[t0, t1])
# Parameters
temp_all_solid = cfg.PARAMS['temp_all_solid']
temp_all_liq = cfg.PARAMS['temp_all_liq']
temp_melt = cfg.PARAMS['temp_melt']
prcp_fac = cfg.PARAMS['prcp_scaling_factor']
default_grad = cfg.PARAMS['temp_default_gradient']
g_minmax = cfg.PARAMS['temp_local_gradient_bounds']
# Read file
igrad = None
with utils.ncDataset(gdir.get_filepath('climate_historical')) as nc:
# time
time = nc.variables['time']
time = netCDF4.num2date(time[:], time.units)
if time_range is not None:
p0 = np.where(time == time_range[0])[0]
try:
p0 = p0[0]
except IndexError:
raise MassBalanceCalibrationError('time_range[0] not found in '
'file')
p1 = np.where(time == time_range[1])[0]
try:
p1 = p1[0]
except IndexError:
raise MassBalanceCalibrationError('time_range[1] not found in '
'file')
else:
p0 = 0
p1 = len(time)-1
time = time[p0:p1+1]
# Read timeseries
itemp = nc.variables['temp'][p0:p1+1].astype(np.float64)
iprcp = nc.variables['prcp'][p0:p1+1].astype(np.float64)
if 'gradient' in nc.variables:
igrad = nc.variables['gradient'][p0:p1+1].astype(np.float64)
# Security for stuff that can happen with local gradients
igrad = np.where(~np.isfinite(igrad), default_grad, igrad)
igrad = utils.clip_array(igrad, g_minmax[0], g_minmax[1])
ref_hgt = nc.ref_hgt
# Default gradient?
if igrad is None:
igrad = itemp * 0 + default_grad
# Correct precipitation
iprcp *= prcp_fac
# For each height pixel:
# Compute temp and tempformelt (temperature above melting threshold)
npix = len(heights)
grad_temp = np.atleast_2d(igrad).repeat(npix, 0)
grad_temp *= (heights.repeat(len(time)).reshape(grad_temp.shape) - ref_hgt)
temp2d = np.atleast_2d(itemp).repeat(npix, 0) + grad_temp
temp2dformelt = temp2d - temp_melt
temp2dformelt = utils.clip_min(temp2dformelt, 0)
# Compute solid precipitation from total precipitation
prcpsol = np.atleast_2d(iprcp).repeat(npix, 0)
fac = 1 - (temp2d - temp_all_solid) / (temp_all_liq - temp_all_solid)
fac = utils.clip_array(fac, 0, 1)
prcpsol = prcpsol * fac
return time, temp2dformelt, prcpsol
def mb_yearly_climate_on_height(gdir, heights, *,
year_range=None, flatten=False):
"""Yearly mass-balance climate of the glacier at a specific height
See also: mb_climate_on_height
Parameters
----------
gdir : GlacierDirectory
the glacier directory
heights: ndarray
a 1D array of the heights (in meter) where you want the data
year_range : [int, int], optional
Provide a [y0, y1] year range to get the data for specific
(hydrological) years only.
flatten : bool
for some applications (glacier average MB) it's ok to flatten the
data (average over height) prior to annual summing.
Returns
-------
(years, tempformelt, prcpsol)::
- years: array of shape (ny,)
- tempformelt: array of shape (len(heights), ny) (or ny if flatten
is set)
- prcpsol: array of shape (len(heights), ny) (or ny if flatten
is set)
"""
time, temp, prcp = mb_climate_on_height(gdir, heights,
year_range=year_range)
ny, r = divmod(len(time), 12)
if r != 0:
raise InvalidParamsError('Climate data should be N full years '
'exclusively')
# Last year gives the tone of the hydro year
years = np.arange(time[-1].year-ny+1, time[-1].year+1, 1)
if flatten:
# Spatial average
temp_yr = np.zeros(len(years))
prcp_yr = np.zeros(len(years))
temp = np.mean(temp, axis=0)
prcp = np.mean(prcp, axis=0)
for i, y in enumerate(years):
temp_yr[i] = np.sum(temp[i*12:(i+1)*12])
prcp_yr[i] = np.sum(prcp[i*12:(i+1)*12])
else:
# Annual prcp and temp for each point (no spatial average)
temp_yr = np.zeros((len(heights), len(years)))
prcp_yr = np.zeros((len(heights), len(years)))
for i, y in enumerate(years):
temp_yr[:, i] = np.sum(temp[:, i*12:(i+1)*12], axis=1)
prcp_yr[:, i] = np.sum(prcp[:, i*12:(i+1)*12], axis=1)
return years, temp_yr, prcp_yr
def mb_yearly_climate_on_glacier(gdir, *, year_range=None):
"""Yearly mass-balance climate at all glacier heights,
multiplied with the flowlines widths. (all in pix coords.)
See also: mb_climate_on_height
Parameters
----------
gdir : GlacierDirectory
the glacier directory
year_range : [int, int], optional
Provide a [y0, y1] year range to get the data for specific
(hydrological) years only.
Returns
-------
(years, tempformelt, prcpsol)::
- years: array of shape (ny)
- tempformelt: array of shape (ny)
- prcpsol: array of shape (ny)
"""
flowlines = gdir.read_pickle('inversion_flowlines')
heights = np.array([])
widths = np.array([])
for fl in flowlines:
heights = np.append(heights, fl.surface_h)
widths = np.append(widths, fl.widths)
years, temp, prcp = mb_yearly_climate_on_height(gdir, heights,
year_range=year_range,
flatten=False)
temp = np.average(temp, axis=0, weights=widths)
prcp = np.average(prcp, axis=0, weights=widths)
return years, temp, prcp
@entity_task(log)
def glacier_mu_candidates(gdir):
"""Computes the mu candidates, glacier wide.
For each 31 year-period centered on the year of interest, mu is is the
temperature sensitivity necessary for the glacier with its current shape
to be in equilibrium with its climate.
This task is just for documentation and testing! It is not used in
production anymore.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
warnings.warn('The task `glacier_mu_candidates` is deprecated. It should '
'only be used for testing.', FutureWarning)
mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])
# Only get the years were we consider looking for tstar
y0, y1 = cfg.PARAMS['tstar_search_window']
ci = gdir.get_climate_info()
y0 = y0 or ci['baseline_hydro_yr_0']
y1 = y1 or ci['baseline_hydro_yr_1']
years, temp_yr, prcp_yr = mb_yearly_climate_on_glacier(gdir,
year_range=[y0, y1])
# Compute mu for each 31-yr climatological period
ny = len(years)
mu_yr_clim = np.zeros(ny) * np.NaN
for i, y in enumerate(years):
# Ignore begin and end
if ((i-mu_hp) < 0) or ((i+mu_hp) >= ny):
continue
t_avg = np.mean(temp_yr[i-mu_hp:i+mu_hp+1])
if t_avg > 1e-3: # if too cold no melt possible
prcp_ts = prcp_yr[i-mu_hp:i+mu_hp+1]
mu_yr_clim[i] = np.mean(prcp_ts) / t_avg
# Check that we found a least one mustar
if np.sum(np.isfinite(mu_yr_clim)) < 1:
raise MassBalanceCalibrationError('({}) no mustar candidates found.'
.format(gdir.rgi_id))
# Write
return pd.Series(data=mu_yr_clim, index=years)
@entity_task(log)
def t_star_from_refmb(gdir, mbdf=None, glacierwide=None,
min_mu_star=None, max_mu_star=None):
"""Computes the ref t* for the glacier, given a series of MB measurements.
Parameters
----------
gdir : oggm.GlacierDirectory
mbdf: a pd.Series containing the observed MB data indexed by year
if None, read automatically from the reference data
Returns
-------
A dict: {t_star:[], bias:[], 'avg_mb_per_mu': [], 'avg_ref_mb': []}
"""
from oggm.core.massbalance import MultipleFlowlineMassBalance
if glacierwide is None:
glacierwide = cfg.PARAMS['tstar_search_glacierwide']
# Be sure we have no marine terminating glacier
assert not gdir.is_tidewater
# Reference time series
if mbdf is None:
mbdf = gdir.get_ref_mb_data()['ANNUAL_BALANCE']
# mu* constraints
if min_mu_star is None:
min_mu_star = cfg.PARAMS['min_mu_star']
if max_mu_star is None:
max_mu_star = cfg.PARAMS['max_mu_star']
# which years to look at
ref_years = mbdf.index.values
# Average oberved mass-balance
ref_mb = np.mean(mbdf)
# Compute one mu candidate per year and the associated statistics
# Only get the years were we consider looking for tstar
y0, y1 = cfg.PARAMS['tstar_search_window']
ci = gdir.get_climate_info()
y0 = y0 or ci['baseline_hydro_yr_0']
y1 = y1 or ci['baseline_hydro_yr_1']
years = np.arange(y0, y1+1)
ny = len(years)
mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])
mb_per_mu = pd.Series(index=years, dtype=float)
if glacierwide:
# The old (but fast) method to find t*
_, temp, prcp = mb_yearly_climate_on_glacier(gdir, year_range=[y0, y1])
# which years to look at
selind = np.searchsorted(years, mbdf.index)
sel_temp = temp[selind]
sel_prcp = prcp[selind]
sel_temp = np.mean(sel_temp)
sel_prcp = np.mean(sel_prcp)
for i, y in enumerate(years):
# Ignore begin and end
if ((i - mu_hp) < 0) or ((i + mu_hp) >= ny):
continue
# Compute the mu candidate
t_avg = np.mean(temp[i - mu_hp:i + mu_hp + 1])
if t_avg < 1e-3: # if too cold no melt possible
continue
mu = np.mean(prcp[i - mu_hp:i + mu_hp + 1]) / t_avg
# Apply it
mb_per_mu[y] = np.mean(sel_prcp - mu * sel_temp)
else:
# The new (but slow) method to find t*
# Compute mu for each 31-yr climatological period
fls = gdir.read_pickle('inversion_flowlines')
for i, y in enumerate(years):
# Ignore begin and end
if ((i-mu_hp) < 0) or ((i+mu_hp) >= ny):
continue
# Calibrate the mu for this year
for fl in fls:
fl.mu_star_is_valid = False
try:
# TODO: this is slow and can be highly optimised
# it reads the same data over and over again
_recursive_mu_star_calibration(gdir, fls, y, first_call=True,
min_mu_star=min_mu_star,
max_mu_star=max_mu_star)
# Compute the MB with it
mb_mod = MultipleFlowlineMassBalance(gdir, fls, bias=0,
check_calib_params=False)
mb_ts = mb_mod.get_specific_mb(fls=fls, year=ref_years)
mb_per_mu[y] = np.mean(mb_ts)
except MassBalanceCalibrationError:
pass
# Diff to reference
diff = (mb_per_mu - ref_mb).dropna()
if len(diff) == 0:
raise MassBalanceCalibrationError('No single valid mu candidate for '
'this glacier!')
# Here we used to keep all possible mu* in order to later select
# them based on some distance search algorithms.
# (revision 81bc0923eab6301306184d26462f932b72b84117)
#
# As of Jul 2018, we will now stop this non-sense:
# out of all mu*, let's just pick the one with the smallest bias.
# It doesn't make much sense, but the same is true for other methods
# as well -> this is how Ben used to do it, and he is clever
# Another way would be to pick the closest to today or something
amin = np.abs(diff).idxmin()
# Write
d = gdir.get_climate_info()
d['t_star'] = amin
d['bias'] = diff[amin]
gdir.write_json(d, 'climate_info')
return {'t_star': amin, 'bias': diff[amin],
'avg_mb_per_mu': mb_per_mu, 'avg_ref_mb': ref_mb}
def calving_mb(gdir):
"""Calving mass-loss in specific MB equivalent.
This is necessary to compute mu star.
"""
if not gdir.is_tidewater:
return 0.
# Ok. Just take the calving rate from cfg and change its units
# Original units: km3 a-1, to change to mm a-1 (units of specific MB)
rho = cfg.PARAMS['ice_density']
return gdir.inversion_calving_rate * 1e9 * rho / gdir.rgi_area_m2
def _fallback_local_t_star(gdir):
"""A Fallback function if climate.local_t_star raises an Error.
This function will still write a `local_mustar.json`, filled with NANs,
if climate.local_t_star fails and cfg.PARAMS['continue_on_error'] = True.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
# Scalars in a small dict for later
df = dict()
df['rgi_id'] = gdir.rgi_id
df['t_star'] = np.nan
df['bias'] = np.nan
df['mu_star_glacierwide'] = np.nan
gdir.write_json(df, 'local_mustar')
@entity_task(log, writes=['local_mustar', 'climate_info'],
fallback=_fallback_local_t_star)
def local_t_star(gdir, *, ref_df=None, tstar=None, bias=None,
clip_mu_star=None, min_mu_star=None, max_mu_star=None):
"""Compute the local t* and associated glacier-wide mu*.
If ``tstar`` and ``bias`` are not provided, they will be interpolated from
the reference t* list (``ref_df``).
If none of these are provided (the default), this list be obtained from
the current working directory (``ref_tstars.csv`` and associated params
``ref_tstars_params.json``). These files can either be generated with a
call to ``compute_ref_t_stars`` if you know what you are doing, ot you
can obtain pre-preprocessed lists from our servers:
https://cluster.klima.uni-bremen.de/~oggm/ref_mb_params/
The best way to fetch them is to use
:py:func:`oggm.workflow.download_ref_tstars`.
Note: the glacier wide mu* is output here just for indication. It might be
different from the flowlines' mu* in some cases.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
ref_df : :py:class:`pandas.DataFrame`, optional
replace the default calibration list with your own.
tstar: int, optional
the year where the glacier should be equilibrium
bias: float, optional
the associated reference bias
clip_mu_star: bool, optional
defaults to cfg.PARAMS['clip_mu_star']
min_mu_star: bool, optional
defaults to cfg.PARAMS['min_mu_star']
max_mu_star: bool, optional
defaults to cfg.PARAMS['max_mu_star']
"""
if tstar is None or bias is None:
# Do our own interpolation
if ref_df is None:
# Use the the local calibration
msg = ('If `ref_df` is not provided, please put a list of '
'`ref_tstars.csv` and associated params '
'`ref_tstars_params.json` in the working directory. '
'Please see the documentation of local_t_star '
'for more information.')
fp = os.path.join(cfg.PATHS['working_dir'], 'ref_tstars.csv')
if not os.path.exists(fp):
raise InvalidWorkflowError(msg)
ref_df = pd.read_csv(fp)
# Check that the params are fine
fp = os.path.join(cfg.PATHS['working_dir'], 'ref_tstars_params.json')
if not os.path.exists(fp):
raise InvalidWorkflowError(msg)
with open(fp, 'r') as fp:
ref_params = json.load(fp)
for k, v in ref_params.items():
if cfg.PARAMS[k] != v:
msg = ('The reference t* list you are trying to use was '
'calibrated with different MB parameters.')
raise MassBalanceCalibrationError(msg)
# Compute the distance to each glacier
distances = utils.haversine(gdir.cenlon, gdir.cenlat,
ref_df.lon, ref_df.lat)
# Take the 10 closest
aso = np.argsort(distances)[0:9]
amin = ref_df.iloc[aso]
distances = distances[aso]**2
# If really close no need to divide, else weighted average
if distances.iloc[0] <= 0.1:
tstar = amin.tstar.iloc[0]
bias = amin.bias.iloc[0]
else:
tstar = int(np.average(amin.tstar, weights=1./distances).round())
bias = np.average(amin.bias, weights=1./distances)
# Add the climate related params to the GlacierDir to make sure
# other tools cannot fool around without re-calibration
out = gdir.get_climate_info()
out['mb_calib_params'] = {k: cfg.PARAMS[k] for k in MB_PARAMS}
gdir.write_json(out, 'climate_info')
# We compute the overall mu* here but this is mostly for testing
# Climate period
mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])
yr = [tstar - mu_hp, tstar + mu_hp]
# Do we have a calving glacier?
cmb = calving_mb(gdir)
log.info('(%s) local mu* computation for t*=%d', gdir.rgi_id, tstar)
# Get the corresponding mu
years, temp_yr, prcp_yr = mb_yearly_climate_on_glacier(gdir, year_range=yr)
assert len(years) == (2 * mu_hp + 1)
# mustar is taking calving into account (units of specific MB)
mustar = (np.mean(prcp_yr) - cmb) / np.mean(temp_yr)
if not np.isfinite(mustar):
raise MassBalanceCalibrationError('{} has a non finite '
'mu'.format(gdir.rgi_id))
# mu* constraints
if clip_mu_star is None:
clip_mu_star = cfg.PARAMS['clip_mu_star']
if min_mu_star is None:
min_mu_star = cfg.PARAMS['min_mu_star']
if max_mu_star is None:
max_mu_star = cfg.PARAMS['max_mu_star']
# Clip it?
if clip_mu_star:
mustar = utils.clip_min(mustar, min_mu_star)
# If mu out of bounds, raise
if not (min_mu_star <= mustar <= max_mu_star):
raise MassBalanceCalibrationError('{}: mu* out of specified bounds: '
'{:.2f}'.format(gdir.rgi_id, mustar))
# Scalars in a small dict for later
df = dict()
df['rgi_id'] = gdir.rgi_id
df['t_star'] = int(tstar)
df['bias'] = bias
df['mu_star_glacierwide'] = mustar
gdir.write_json(df, 'local_mustar')
def _check_terminus_mass_flux(gdir, fls, cmb):
# Avoid code duplication
rho = cfg.PARAMS['ice_density']
# This variable is in "sensible" units normalized by width
flux = fls[-1].flux[-1]
aflux = flux * (gdir.grid.dx ** 2) / rho # m3 ice per year
# If not marine and a bit far from zero, warning
if cmb == 0 and flux > 0 and not np.allclose(flux, 0, atol=0.01):
log.info('(%s) flux should be zero, but is: '
'%.4f m3 ice yr-1', gdir.rgi_id, aflux)
# If not marine and quite far from zero, error
if cmb == 0 and flux > 0 and not np.allclose(flux, 0, atol=1):
msg = ('({}) flux should be zero, but is: {:.4f} m3 ice yr-1'
.format(gdir.rgi_id, aflux))
raise MassBalanceCalibrationError(msg)
def _mu_star_per_minimization(x, fls, cmb, temp, prcp, widths):
# Get the corresponding mu
mus = np.array([])
for fl in fls:
mu = fl.mu_star if fl.mu_star_is_valid else x
mus = np.append(mus, np.ones(fl.nx) * mu)
# TODO: possible optimisation here
out = np.average(prcp - mus[:, np.newaxis] * temp, axis=0, weights=widths)
return np.mean(out - cmb)
def _recursive_mu_star_calibration(gdir, fls, t_star, first_call=True,
force_mu=None, min_mu_star=None,
max_mu_star=None):
# Do we have a calving glacier? This is only for the first call!
# The calving mass-balance is distributed over the valid tributaries of the
# main line, i.e. bad tributaries are not considered for calving
cmb = calving_mb(gdir) if first_call else 0.
# Climate period
mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])
yr_range = [t_star - mu_hp, t_star + mu_hp]
# Get the corresponding mu
heights = np.array([])
widths = np.array([])
for fl in fls:
heights = np.append(heights, fl.surface_h)
widths = np.append(widths, fl.widths)
_, temp, prcp = mb_yearly_climate_on_height(gdir, heights,
year_range=yr_range,
flatten=False)
if force_mu is None:
try:
mu_star = optimize.brentq(_mu_star_per_minimization,
min_mu_star, max_mu_star,
args=(fls, cmb, temp, prcp, widths),
xtol=_brentq_xtol)
except ValueError:
# This happens in very rare cases
_mu_lim = _mu_star_per_minimization(min_mu_star, fls, cmb, temp,
prcp, widths)
if _mu_lim < min_mu_star and np.allclose(_mu_lim, min_mu_star):
mu_star = min_mu_star
else:
raise MassBalanceCalibrationError('{} mu* out of specified '
'bounds.'.format(gdir.rgi_id)
)
if not np.isfinite(mu_star):
raise MassBalanceCalibrationError('{} '.format(gdir.rgi_id) +
'has a non finite mu.')
else:
mu_star = force_mu
# Reset flux
for fl in fls:
fl.flux = np.zeros(len(fl.surface_h))
# Flowlines in order to be sure - start with first guess mu*
for fl in fls:
y, t, p = mb_yearly_climate_on_height(gdir, fl.surface_h,
year_range=yr_range,
flatten=False)
mu = fl.mu_star if fl.mu_star_is_valid else mu_star
fl.set_apparent_mb(np.mean(p, axis=1) - mu*np.mean(t, axis=1),
mu_star=mu)
# Sometimes, low lying tributaries have a non-physically consistent
# Mass-balance. These tributaries wouldn't exist with a single
# glacier-wide mu*, and therefore need a specific calibration.
# All other mus may be affected
if cfg.PARAMS['correct_for_neg_flux'] and (len(fls) > 1):
if np.any([fl.flux_needs_correction for fl in fls]):
# We start with the highest Strahler number that needs correction
not_ok = np.array([fl.flux_needs_correction for fl in fls])
fl = np.array(fls)[not_ok][-1]
# And we take all its tributaries
inflows = centerlines.line_inflows(fl)
# We find a new mu for these in a recursive call
# TODO: this is where a flux kwarg can passed to tributaries
_recursive_mu_star_calibration(gdir, inflows, t_star,
first_call=False,
min_mu_star=min_mu_star,
max_mu_star=max_mu_star)
# At this stage we should be ok
assert np.all([~ fl.flux_needs_correction for fl in inflows])
for fl in inflows:
fl.mu_star_is_valid = True
# After the above are OK we have to recalibrate all below
_recursive_mu_star_calibration(gdir, fls, t_star,
first_call=first_call,
min_mu_star=min_mu_star,
max_mu_star=max_mu_star)
# At this stage we are good
for fl in fls:
fl.mu_star_is_valid = True
def _fallback_mu_star_calibration(gdir):
"""A Fallback function if climate.mu_star_calibration raises an Error.

 This function will still read, expand and write a `local_mustar.json`,
filled with NANs, if climate.mu_star_calibration fails
and if cfg.PARAMS['continue_on_error'] = True.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
# read json
df = gdir.read_json('local_mustar')
# add these keys which mu_star_calibration would add
df['mu_star_per_flowline'] = [np.nan]
df['mu_star_flowline_avg'] = np.nan
df['mu_star_allsame'] = np.nan
# write
gdir.write_json(df, 'local_mustar')
@entity_task(log, writes=['inversion_flowlines'],
fallback=_fallback_mu_star_calibration)
def mu_star_calibration(gdir, min_mu_star=None, max_mu_star=None):
"""Compute the flowlines' mu* and the associated apparent mass-balance.
If low lying tributaries have a non-physically consistent Mass-balance
this function will either filter them out or calibrate each flowline with a
specific mu*. The latter is default and recommended.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
min_mu_star: bool, optional
defaults to cfg.PARAMS['min_mu_star']
max_mu_star: bool, optional
defaults to cfg.PARAMS['max_mu_star']
"""
# Interpolated data
df = gdir.read_json('local_mustar')
t_star = df['t_star']
bias = df['bias']
# mu* constraints
if min_mu_star is None:
min_mu_star = cfg.PARAMS['min_mu_star']
if max_mu_star is None:
max_mu_star = cfg.PARAMS['max_mu_star']
# For each flowline compute the apparent MB
fls = gdir.read_pickle('inversion_flowlines')
# If someone calls the task a second time we need to reset this
for fl in fls:
fl.mu_star_is_valid = False
force_mu = min_mu_star if df['mu_star_glacierwide'] == min_mu_star else None
# Let's go
_recursive_mu_star_calibration(gdir, fls, t_star, force_mu=force_mu,
min_mu_star=min_mu_star,
max_mu_star=max_mu_star)
# If the user wants to filter the bad ones we remove them and start all
# over again until all tributaries are physically consistent with one mu
# This should only work if cfg.PARAMS['correct_for_neg_flux'] == False
do_filter = [fl.flux_needs_correction for fl in fls]
if cfg.PARAMS['filter_for_neg_flux'] and np.any(do_filter):
assert not do_filter[-1] # This should not happen
# Keep only the good lines
# TODO: this should use centerline.line_inflows for more efficiency!
heads = [fl.orig_head for fl in fls if not fl.flux_needs_correction]
centerlines.compute_centerlines(gdir, heads=heads, reset=True)
centerlines.initialize_flowlines(gdir, reset=True)
if gdir.has_file('downstream_line'):
centerlines.compute_downstream_line(gdir, reset=True)
centerlines.compute_downstream_bedshape(gdir, reset=True)
centerlines.catchment_area(gdir, reset=True)
centerlines.catchment_intersections(gdir, reset=True)
centerlines.catchment_width_geom(gdir, reset=True)
centerlines.catchment_width_correction(gdir, reset=True)
local_t_star(gdir, tstar=t_star, bias=bias, reset=True)
# Ok, re-call ourselves
return mu_star_calibration(gdir, reset=True)
# Check and write
rho = cfg.PARAMS['ice_density']
aflux = fls[-1].flux[-1] * 1e-9 / rho * gdir.grid.dx**2
# If not marine and a bit far from zero, warning
cmb = calving_mb(gdir)
if cmb == 0 and not np.allclose(fls[-1].flux[-1], 0., atol=0.01):
log.info('(%s) flux should be zero, but is: '
'%.4f km3 ice yr-1', gdir.rgi_id, aflux)
# If not marine and quite far from zero, error
if cmb == 0 and not np.allclose(fls[-1].flux[-1], 0., atol=1):
msg = ('({}) flux should be zero, but is: {:.4f} km3 ice yr-1'
.format(gdir.rgi_id, aflux))
raise MassBalanceCalibrationError(msg)
gdir.write_pickle(fls, 'inversion_flowlines')
# Store diagnostics
mus = []
weights = []
for fl in fls:
mus.append(fl.mu_star)
weights.append(np.sum(fl.widths))
df['mu_star_per_flowline'] = mus
df['mu_star_flowline_avg'] = np.average(mus, weights=weights)
all_same = np.allclose(mus, mus[0], atol=1e-3)
df['mu_star_allsame'] = all_same
if all_same:
if not np.allclose(df['mu_star_flowline_avg'],
df['mu_star_glacierwide'],
atol=1e-3):
raise MassBalanceCalibrationError('Unexpected difference between '
'glacier wide mu* and the '
'flowlines mu*.')
# Write
gdir.write_json(df, 'local_mustar')
@entity_task(log, writes=['inversion_flowlines', 'linear_mb_params'])
def apparent_mb_from_linear_mb(gdir, mb_gradient=3., ela_h=None):
"""Compute apparent mb from a linear mass-balance assumption (for testing).
This is for testing currently, but could be used as alternative method
for the inversion quite easily.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
# Do we have a calving glacier?
cmb = calving_mb(gdir)
# Get the height and widths along the fls
h, w = gdir.get_inversion_flowline_hw()
# Now find the ELA till the integrated mb is zero
from oggm.core.massbalance import LinearMassBalance
def to_minimize(ela_h):
mbmod = LinearMassBalance(ela_h, grad=mb_gradient)
smb = mbmod.get_specific_mb(heights=h, widths=w)
return smb - cmb
if ela_h is None:
ela_h = optimize.brentq(to_minimize, -1e5, 1e5, xtol=_brentq_xtol)
# For each flowline compute the apparent MB
rho = cfg.PARAMS['ice_density']
fls = gdir.read_pickle('inversion_flowlines')
# Reset flux
for fl in fls:
fl.flux = np.zeros(len(fl.surface_h))
# Flowlines in order to be sure
mbmod = LinearMassBalance(ela_h, grad=mb_gradient)
for fl in fls:
mbz = mbmod.get_annual_mb(fl.surface_h) * cfg.SEC_IN_YEAR * rho
fl.set_apparent_mb(mbz)
# Check and write
aflux = fls[-1].flux[-1] * 1e-9 / rho * gdir.grid.dx**2
# If not marine and a bit far from zero, warning
if cmb == 0 and not np.allclose(fls[-1].flux[-1], 0., atol=0.01):
log.info('(%s) flux should be zero, but is: '
'%.4f km3 ice yr-1', gdir.rgi_id, aflux)
# If not marine and quite far from zero, error
if cmb == 0 and not np.allclose(fls[-1].flux[-1], 0., atol=1):
msg = ('({}) flux should be zero, but is: {:.4f} km3 ice yr-1'
.format(gdir.rgi_id, aflux))
raise MassBalanceCalibrationError(msg)
gdir.write_pickle(fls, 'inversion_flowlines')
gdir.write_pickle({'ela_h': ela_h, 'grad': mb_gradient},
'linear_mb_params')
@entity_task(log, writes=['inversion_flowlines'])
def apparent_mb_from_any_mb(gdir, mb_model=None, mb_years=None):
"""Compute apparent mb from an arbitrary mass-balance profile.
This searches for a mass-balance residual to add to the mass-balance
profile so that the average specific MB is zero.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
mb_model : :py:class:`oggm.core.massbalance.MassBalanceModel`
the mass-balance model to use
mb_years : array
the array of years from which you want to average the MB for (for
mb_model only).
"""
# Do we have a calving glacier?
cmb = calving_mb(gdir)
# For each flowline compute the apparent MB
fls = gdir.read_pickle('inversion_flowlines')
# Unchanged SMB
o_smb = np.mean(mb_model.get_specific_mb(fls=fls, year=mb_years))
def to_minimize(residual_to_opt):
return o_smb + residual_to_opt - cmb
residual = optimize.brentq(to_minimize, -1e5, 1e5, xtol=_brentq_xtol)
# Reset flux
for fl in fls:
fl.flux = np.zeros(len(fl.surface_h))
# Flowlines in order to be sure
rho = cfg.PARAMS['ice_density']
for fl_id, fl in enumerate(fls):
mbz = 0
for yr in mb_years:
mbz += mb_model.get_annual_mb(fl.surface_h, year=yr,
fls=fls, fl_id=fl_id)
mbz = mbz / len(mb_years)
fl.set_apparent_mb(mbz * cfg.SEC_IN_YEAR * rho + residual)
# Check and write
aflux = fls[-1].flux[-1] * 1e-9 / rho * gdir.grid.dx**2
# If not marine and a bit far from zero, warning
if cmb == 0 and not np.allclose(fls[-1].flux[-1], 0., atol=0.01):
log.info('(%s) flux should be zero, but is: '
'%.4f km3 ice yr-1', gdir.rgi_id, aflux)
# If not marine and quite far from zero, error
if cmb == 0 and not np.allclose(fls[-1].flux[-1], 0., atol=1):
msg = ('({}) flux should be zero, but is: {:.4f} km3 ice yr-1'
.format(gdir.rgi_id, aflux))
raise MassBalanceCalibrationError(msg)
gdir.add_to_diagnostics('apparent_mb_from_any_mb_residual', residual)
gdir.write_pickle(fls, 'inversion_flowlines')
@global_task(log)
def compute_ref_t_stars(gdirs):
""" Detects the best t* for the reference glaciers and writes them to disk
Parameters
----------
gdirs : list of :py:class:`oggm.GlacierDirectory` objects
will be filtered for reference glaciers
"""
if not cfg.PARAMS['run_mb_calibration']:
raise InvalidParamsError('Are you sure you want to calibrate the '
'reference t*? There is a pre-calibrated '
'version available. If you know what you are '
'doing and still want to calibrate, set the '
'`run_mb_calibration` parameter to `True`.')
log.info('Compute the reference t* and mu* for WGMS glaciers')
# Reference glaciers only if in the list and period is good
ref_gdirs = utils.get_ref_mb_glaciers(gdirs)
# Run
from oggm.workflow import execute_entity_task
out = execute_entity_task(t_star_from_refmb, ref_gdirs)
# Loop write
df = pd.DataFrame()
for gdir, res in zip(ref_gdirs, out):
if res is None:
# For certain parameters there is no valid mu candidate on certain
# glaciers. E.g. if temp is to low for melt. This will raise an
# error in t_star_from_refmb and should only get here if
# continue_on_error = True
# Do not add this glacier to the ref_tstar.csv
# Think of better solution later
continue
# list of mus compatibles with refmb
rid = gdir.rgi_id
df.loc[rid, 'lon'] = gdir.cenlon
df.loc[rid, 'lat'] = gdir.cenlat
df.loc[rid, 'n_mb_years'] = len(gdir.get_ref_mb_data())
df.loc[rid, 'tstar'] = res['t_star']
df.loc[rid, 'bias'] = res['bias']
# Write out
df['tstar'] = df['tstar'].astype(int)
df['n_mb_years'] = df['n_mb_years'].astype(int)
file = os.path.join(cfg.PATHS['working_dir'], 'ref_tstars.csv')
df.sort_index().to_csv(file)
# We store the associated params to make sure
# other tools cannot fool around without re-calibration
params_file = os.path.join(cfg.PATHS['working_dir'],
'ref_tstars_params.json')
with open(params_file, 'w') as fp:
json.dump({k: cfg.PARAMS[k] for k in MB_PARAMS}, fp)
| bsd-3-clause |
ypkang/Dato-Core | src/unity/python/graphlab/test/test_sarray.py | 13 | 60654 | # -*- coding: utf-8 -*-
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
from graphlab.data_structures.sarray import SArray
from graphlab_util.timezone import GMT
import pandas as pd
import numpy as np
import unittest
import random
import datetime as dt
import copy
import os
import math
import shutil
import array
import util
import time
import itertools
import warnings
import functools
#######################################################
# Metrics tracking tests are in test_usage_metrics.py #
#######################################################
class SArrayTest(unittest.TestCase):
def setUp(self):
self.int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.bool_data = [x % 2 == 0 for x in range(10)]
self.datetime_data = [dt.datetime(2013, 5, 7, 10, 4, 10),dt.datetime(1902, 10, 21, 10, 34, 10),None]
self.float_data = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
self.string_data = ["abc", "def", "hello", "world", "pika", "chu", "hello", "world"]
self.vec_data = [array.array('d', [i, i+1]) for i in self.int_data]
self.list_data = [[i, str(i), i * 1.0] for i in self.int_data]
self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
def __test_equal(self, _sarray, _data, _type):
self.assertEqual(_sarray.dtype(), _type)
self.assertSequenceEqual(list(_sarray.head(_sarray.size())), _data)
def __test_creation(self, data, dtype, expected):
"""
Create sarray from data with dtype, and test it equals to
expected.
"""
s = SArray(data, dtype)
self.__test_equal(s, expected, dtype)
s = SArray(pd.Series(data), dtype)
self.__test_equal(s, expected, dtype)
def __test_creation_type_inference(self, data, expected_dtype, expected):
"""
Create sarray from data with dtype, and test it equals to
expected.
"""
s = SArray(data)
self.__test_equal(s, expected, expected_dtype)
s = SArray(pd.Series(data))
self.__test_equal(s, expected, expected_dtype)
def test_creation(self):
self.__test_creation(self.int_data, int, self.int_data)
self.__test_creation(self.int_data, float, [float(x) for x in self.int_data])
self.__test_creation(self.int_data, str, [str(x) for x in self.int_data])
self.__test_creation(self.float_data, float, self.float_data)
self.assertRaises(TypeError, self.__test_creation, [self.float_data, int])
self.__test_creation(self.string_data, str, self.string_data)
self.assertRaises(TypeError, self.__test_creation, [self.string_data, int])
self.assertRaises(TypeError, self.__test_creation, [self.string_data, float])
expected_output = [chr(x) for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(SArray(self.url, str), expected_output, str)
self.__test_creation(self.vec_data, array.array, self.vec_data)
self.__test_creation(self.list_data, list, self.list_data)
self.__test_creation(self.dict_data, dict, self.dict_data)
# test with type inference
self.__test_creation_type_inference(self.int_data, int, self.int_data)
self.__test_creation_type_inference(self.float_data, float, self.float_data)
self.__test_creation_type_inference(self.bool_data, int, [int(x) for x in self.bool_data])
self.__test_creation_type_inference(self.string_data, str, self.string_data)
self.__test_creation_type_inference(self.vec_data, array.array, self.vec_data)
self.__test_creation_type_inference([np.bool_(True),np.bool_(False)],int,[1,0])
def test_list_with_none_creation(self):
tlist=[[2,3,4],[5,6],[4,5,10,None]]
g=SArray(tlist)
self.assertEqual(len(g), len(tlist))
for i in range(len(tlist)):
self.assertEqual(g[i], tlist[i])
def test_list_with_array_creation(self):
import array
t = array.array('d',[1.1,2,3,4,5.5])
g=SArray(t)
self.assertEqual(len(g), len(t))
self.assertEqual(g.dtype(), float)
glist = list(g)
for i in range(len(glist)):
self.assertAlmostEqual(glist[i], t[i])
t = array.array('i',[1,2,3,4,5])
g=SArray(t)
self.assertEqual(len(g), len(t))
self.assertEqual(g.dtype(), int)
glist = list(g)
for i in range(len(glist)):
self.assertEqual(glist[i], t[i])
def test_save_load(self):
# Make sure these files don't exist before testing
self._remove_sarray_files("intarr")
self._remove_sarray_files("fltarr")
self._remove_sarray_files("strarr")
self._remove_sarray_files("vecarr")
self._remove_sarray_files("listarr")
self._remove_sarray_files("dictarr")
sint = SArray(self.int_data, int)
sflt = SArray([float(x) for x in self.int_data], float)
sstr = SArray([str(x) for x in self.int_data], str)
svec = SArray(self.vec_data, array.array)
slist = SArray(self.list_data, list)
sdict = SArray(self.dict_data, dict)
sint.save('intarr.sidx')
sflt.save('fltarr.sidx')
sstr.save('strarr.sidx')
svec.save('vecarr.sidx')
slist.save('listarr.sidx')
sdict.save('dictarr.sidx')
sint2 = SArray('intarr.sidx')
sflt2 = SArray('fltarr.sidx')
sstr2 = SArray('strarr.sidx')
svec2 = SArray('vecarr.sidx')
slist2 = SArray('listarr.sidx')
sdict2 = SArray('dictarr.sidx')
self.assertRaises(IOError, lambda: SArray('__no_such_file__.sidx'))
self.__test_equal(sint2, self.int_data, int)
self.__test_equal(sflt2, [float(x) for x in self.int_data], float)
self.__test_equal(sstr2, [str(x) for x in self.int_data], str)
self.__test_equal(svec2, self.vec_data, array.array)
self.__test_equal(slist2, self.list_data, list)
self.__test_equal(sdict2, self.dict_data, dict)
# Bad permission
test_dir = 'test_dir'
if os.path.exists(test_dir):
os.removedirs(test_dir)
os.makedirs(test_dir, mode=0000)
with self.assertRaises(IOError):
sint.save(os.path.join(test_dir, 'bad.sidx'))
# Permissions will affect this test first, so no need
# to write something here
with self.assertRaises(IOError):
sint3 = SArray(os.path.join(test_dir, 'bad.sidx'))
os.removedirs(test_dir)
#cleanup
del sint2
del sflt2
del sstr2
del svec2
del slist2
del sdict2
self._remove_sarray_files("intarr")
self._remove_sarray_files("fltarr")
self._remove_sarray_files("strarr")
self._remove_sarray_files("vecarr")
self._remove_sarray_files("listarr")
self._remove_sarray_files("dictarr")
def test_save_load_text(self):
self._remove_single_file('txt_int_arr.txt')
sint = SArray(self.int_data, int)
sint.save('txt_int_arr.txt')
self.assertTrue(os.path.exists('txt_int_arr.txt'))
f = open('txt_int_arr.txt')
lines = f.readlines()
for i in range(len(sint)):
self.assertEquals(int(lines[i]), sint[i])
self._remove_single_file('txt_int_arr.txt')
self._remove_single_file('txt_int_arr')
sint.save('txt_int_arr', format='text')
self.assertTrue(os.path.exists('txt_int_arr'))
f = open('txt_int_arr')
lines = f.readlines()
for i in range(len(sint)):
self.assertEquals(int(lines[i]), sint[i])
self._remove_single_file('txt_int_arr')
def _remove_single_file(self, filename):
try:
os.remove(filename)
except:
pass
def _remove_sarray_files(self, prefix):
filelist = [ f for f in os.listdir(".") if f.startswith(prefix) ]
for f in filelist:
shutil.rmtree(f)
def test_transform(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char), int)
expected_output = [x for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_int, expected_output, int)
# Test randomness across segments, randomized sarray should have different elemetns.
sa_random = SArray(range(0, 16), int).apply(lambda x: random.randint(0, 1000), int)
vec = list(sa_random.head(sa_random.size()))
self.assertFalse(all([x == vec[0] for x in vec]))
# test transform with missing values
sa = SArray([1,2,3,None,4,5])
sa1 = sa.apply(lambda x : x + 1)
self.__test_equal(sa1, [2,3,4,None,5,6], int)
def test_transform_with_multiple_lambda(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char), int)
sa2_int = sa_int.apply(lambda val: val + 1, int)
expected_output = [x for x in range(ord('a') + 1, ord('a') + 26 + 1)]
self.__test_equal(sa2_int, expected_output, int)
def test_transform_with_exception(self):
sa_char = SArray(['a' for i in xrange(10000)], str)
# # type mismatch exception
self.assertRaises(TypeError, lambda: sa_char.apply(lambda char: char, int).head(1))
# # divide by 0 exception
self.assertRaises(ZeroDivisionError, lambda: sa_char.apply(lambda char: ord(char) / 0, float))
def test_transform_with_type_inference(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char))
expected_output = [x for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_int, expected_output, int)
sa_bool = sa_char.apply(lambda char: ord(char) > ord('c'))
expected_output = [int(x > ord('c')) for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_bool, expected_output, int)
# # divide by 0 exception
self.assertRaises(ZeroDivisionError, lambda: sa_char.apply(lambda char: ord(char) / 0))
# Test randomness across segments, randomized sarray should have different elemetns.
sa_random = SArray(range(0, 16), int).apply(lambda x: random.randint(0, 1000))
vec = list(sa_random.head(sa_random.size()))
self.assertFalse(all([x == vec[0] for x in vec]))
def test_transform_on_lists(self):
sa_int = SArray(self.int_data, int)
sa_vec2 = sa_int.apply(lambda x: [x, x+1, str(x)])
expected = [[i, i + 1, str(i)] for i in self.int_data]
self.__test_equal(sa_vec2, expected, list)
sa_int_again = sa_vec2.apply(lambda x: int(x[0]))
self.__test_equal(sa_int_again, self.int_data, int)
# transform from vector to vector
sa_vec = SArray(self.vec_data, array.array)
sa_vec2 = sa_vec.apply(lambda x: x)
self.__test_equal(sa_vec2, self.vec_data, array.array)
# transform on list
sa_list = SArray(self.list_data, list)
sa_list2 = sa_list.apply(lambda x: x)
self.__test_equal(sa_list2, self.list_data, list)
# transform dict to list
sa_dict = SArray(self.dict_data, dict)
sa_list = sa_dict.apply(lambda x: x.keys())
self.__test_equal(sa_list, [x.keys() for x in self.dict_data], list)
def test_transform_dict(self):
# lambda accesses dict
sa_dict = SArray([{'a':1}, {1:2}, {'c': 'a'}, None], dict)
sa_bool_r = sa_dict.apply(lambda x: x.has_key('a') if x != None else None, skip_undefined=False)
expected_output = [1, 0, 0, None]
self.__test_equal(sa_bool_r, expected_output, int)
# lambda returns dict
expected_output = [{'a':1}, {1:2}, None, {'c': 'a'}]
sa_dict = SArray(expected_output, dict)
lambda_out = sa_dict.apply(lambda x: x)
self.__test_equal(lambda_out, expected_output, dict)
def test_filter_dict(self):
data = [{'a':1}, {1:2}, None, {'c': 'a'}]
expected_output = [{'a':1}]
sa_dict = SArray(expected_output, dict)
ret = sa_dict.filter(lambda x: x.has_key('a'))
self.__test_equal(ret, expected_output, dict)
# try second time to make sure the lambda system still works
expected_output = [{1:2}]
sa_dict = SArray(expected_output, dict)
lambda_out = sa_dict.filter(lambda x: x.has_key(1))
self.__test_equal(lambda_out, expected_output, dict)
def test_filter(self):
# test empty
s = SArray([], float)
no_change = s.filter(lambda x : x == 0)
self.assertEqual(no_change.size(), 0)
# test normal case
s = SArray(self.int_data, int)
middle_of_array = s.filter(lambda x: x > 3 and x < 8)
self.assertEqual(list(middle_of_array.head(10)), [x for x in range(4,8)])
# test normal string case
s = SArray(self.string_data, str)
exp_val_list = [x for x in self.string_data if x != 'world']
# Remove all words whose second letter is not in the first half of the alphabet
second_letter = s.filter(lambda x: len(x) > 1 and (ord(x[1]) > ord('a')) and (ord(x[1]) < ord('n')))
self.assertEqual(list(second_letter.head(10)), exp_val_list)
# test not-a-lambda
def a_filter_func(x):
return ((x > 4.4) and (x < 6.8))
s = SArray(self.int_data, float)
another = s.filter(a_filter_func)
self.assertEqual(list(another.head(10)), [5.,6.])
sa = SArray(self.float_data)
# filter by self
sa2 = sa[sa]
self.assertEquals(list(sa.head(10)), list(sa2.head(10)))
# filter by zeros
sa_filter = SArray([0,0,0,0,0,0,0,0,0,0])
sa2 = sa[sa_filter]
self.assertEquals(len(sa2), 0)
# filter by wrong size
sa_filter = SArray([0,2,5])
with self.assertRaises(IndexError):
sa2 = sa[sa_filter]
def test_any_all(self):
s = SArray([0,1,2,3,4,5,6,7,8,9], int)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), False)
s = SArray([0,0,0,0,0], int)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), False)
s = SArray(self.string_data, str)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), True)
s = SArray(self.int_data, int)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), True)
# test empty
s = SArray([], int)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), True)
s = SArray([[], []], array.array)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), False)
s = SArray([[],[1.0]], array.array)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), False)
def test_astype(self):
# test empty
s = SArray([], int)
as_out = s.astype(float)
self.assertEqual(as_out.dtype(), float)
# test float -> int
s = SArray(map(lambda x: x+0.2, self.float_data), float)
as_out = s.astype(int)
self.assertEqual(list(as_out.head(10)), self.int_data)
# test int->string
s = SArray(self.int_data, int)
as_out = s.astype(str)
self.assertEqual(list(as_out.head(10)), map(lambda x: str(x), self.int_data))
i_out = as_out.astype(int)
self.assertEqual(list(i_out.head(10)), list(s.head(10)))
s = SArray(self.vec_data, array.array)
with self.assertRaises(RuntimeError):
s.astype(int)
with self.assertRaises(RuntimeError):
s.astype(float)
s = SArray(["a","1","2","3"])
with self.assertRaises(RuntimeError):
s.astype(int)
self.assertEqual(list(s.astype(int,True).head(4)), [None,1,2,3])
s = SArray(["[1 2 3]","[4;5]"])
ret = list(s.astype(array.array).head(2))
self.assertEqual(ret, [array.array('d',[1,2,3]),array.array('d',[4,5])])
s = SArray(["[1,\"b\",3]","[4,5]"])
ret = list(s.astype(list).head(2))
self.assertEqual(ret, [[1,"b",3],[4,5]])
s = SArray(["{\"a\":2,\"b\":3}","{}"])
ret = list(s.astype(dict).head(2))
self.assertEqual(ret, [{"a":2,"b":3},{}])
s = SArray(["[1abc]"])
ret = list(s.astype(list).head(1))
self.assertEqual(ret, [["1abc"]])
s = SArray(["{1xyz:1a,2b:2}"])
ret = list(s.astype(dict).head(1))
self.assertEqual(ret, [{"1xyz":"1a","2b":2}])
def test_clip(self):
# invalid types
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.clip(25,26)
with self.assertRaises(RuntimeError):
s.clip_lower(25)
with self.assertRaises(RuntimeError):
s.clip_upper(26)
# int w/ int, test lower and upper functions too
# int w/float, no change
s = SArray(self.int_data, int)
clip_out = s.clip(3,7).head(10)
# test that our list isn't cast to float if nothing happened
clip_out_nc = s.clip(0.2, 10.2).head(10)
lclip_out = s.clip_lower(3).head(10)
rclip_out = s.clip_upper(7).head(10)
self.assertEqual(len(clip_out), len(self.int_data))
self.assertEqual(len(lclip_out), len(self.int_data))
self.assertEqual(len(rclip_out), len(self.int_data))
for i in range(0,len(clip_out)):
if i < 2:
self.assertEqual(clip_out[i], 3)
self.assertEqual(lclip_out[i], 3)
self.assertEqual(rclip_out[i], self.int_data[i])
self.assertEqual(clip_out_nc[i], self.int_data[i])
elif i > 6:
self.assertEqual(clip_out[i], 7)
self.assertEqual(lclip_out[i], self.int_data[i])
self.assertEqual(rclip_out[i], 7)
self.assertEqual(clip_out_nc[i], self.int_data[i])
else:
self.assertEqual(clip_out[i], self.int_data[i])
self.assertEqual(clip_out_nc[i], self.int_data[i])
# int w/float, change
# float w/int
# float w/float
clip_out = s.clip(2.8, 7.2).head(10)
fs = SArray(self.float_data, float)
ficlip_out = fs.clip(3, 7).head(10)
ffclip_out = fs.clip(2.8, 7.2).head(10)
for i in range(0,len(clip_out)):
if i < 2:
self.assertAlmostEqual(clip_out[i], 2.8)
self.assertAlmostEqual(ffclip_out[i], 2.8)
self.assertAlmostEqual(ficlip_out[i], 3.)
elif i > 6:
self.assertAlmostEqual(clip_out[i], 7.2)
self.assertAlmostEqual(ffclip_out[i], 7.2)
self.assertAlmostEqual(ficlip_out[i], 7.)
else:
self.assertAlmostEqual(clip_out[i], self.float_data[i])
self.assertAlmostEqual(ffclip_out[i], self.float_data[i])
self.assertAlmostEqual(ficlip_out[i], self.float_data[i])
vs = SArray(self.vec_data, array.array);
clipvs = vs.clip(3, 7).head(100)
self.assertEqual(len(clipvs), len(self.vec_data));
for i in range(0, len(clipvs)):
a = clipvs[i]
b = self.vec_data[i]
self.assertEqual(len(a), len(b))
for j in range(0, len(b)):
if b[j] < 3:
b[j] = 3
elif b[j] > 7:
b[j] = 7
self.assertEqual(a, b)
def test_missing(self):
s=SArray(self.int_data, int)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.int_data + [None], int)
self.assertEqual(s.num_missing(), 1)
s=SArray(self.float_data, float)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.float_data + [None], float)
self.assertEqual(s.num_missing(), 1)
s=SArray(self.string_data, str)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.string_data + [None], str)
self.assertEqual(s.num_missing(), 1)
s=SArray(self.vec_data, array.array)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.vec_data + [None], array.array)
self.assertEqual(s.num_missing(), 1)
def test_nonzero(self):
# test empty
s = SArray([],int)
nz_out = s.nnz()
self.assertEqual(nz_out, 0)
# test all nonzero
s = SArray(self.float_data, float)
nz_out = s.nnz()
self.assertEqual(nz_out, len(self.float_data))
# test all zero
s = SArray([0 for x in range(0,10)], int)
nz_out = s.nnz()
self.assertEqual(nz_out, 0)
# test strings
str_list = copy.deepcopy(self.string_data)
str_list.append("")
s = SArray(str_list, str)
nz_out = s.nnz()
self.assertEqual(nz_out, len(self.string_data))
def test_std_var(self):
# test empty
s = SArray([], int)
self.assertTrue(s.std() is None)
self.assertTrue(s.var() is None)
# increasing ints
s = SArray(self.int_data, int)
self.assertAlmostEqual(s.var(), 8.25)
self.assertAlmostEqual(s.std(), 2.8722813)
# increasing floats
s = SArray(self.float_data, float)
self.assertAlmostEqual(s.var(), 8.25)
self.assertAlmostEqual(s.std(), 2.8722813)
# vary ddof
self.assertAlmostEqual(s.var(ddof=3), 11.7857143)
self.assertAlmostEqual(s.var(ddof=6), 20.625)
self.assertAlmostEqual(s.var(ddof=9), 82.5)
self.assertAlmostEqual(s.std(ddof=3), 3.4330328)
self.assertAlmostEqual(s.std(ddof=6), 4.5414755)
self.assertAlmostEqual(s.std(ddof=9), 9.08295106)
# bad ddof
with self.assertRaises(RuntimeError):
s.var(ddof=11)
with self.assertRaises(RuntimeError):
s.std(ddof=11)
# bad type
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.std()
with self.assertRaises(RuntimeError):
s.var()
# overflow test
huge_int = 9223372036854775807
s = SArray([1, huge_int], int)
self.assertAlmostEqual(s.var(), 21267647932558653957237540927630737409.0)
self.assertAlmostEqual(s.std(), 4611686018427387900.0)
def test_tail(self):
# test empty
s = SArray([], int)
self.assertEqual(len(s.tail()), 0)
# test standard tail
s = SArray([x for x in range(0,40)], int)
self.assertEqual(s.tail(), [x for x in range(30,40)])
# smaller amount
self.assertEqual(s.tail(3), [x for x in range(37,40)])
# larger amount
self.assertEqual(s.tail(40), [x for x in range(0,40)])
# too large
self.assertEqual(s.tail(81), [x for x in range(0,40)])
def test_max_min_sum_mean(self):
# negative and positive
s = SArray([-2,-1,0,1,2], int)
self.assertEqual(s.max(), 2)
self.assertEqual(s.min(), -2)
self.assertEqual(s.sum(), 0)
self.assertAlmostEqual(s.mean(), 0.)
# test valid and invalid types
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.max()
with self.assertRaises(RuntimeError):
s.min()
with self.assertRaises(RuntimeError):
s.sum()
with self.assertRaises(RuntimeError):
s.mean()
s = SArray(self.int_data, int)
self.assertEqual(s.max(), 10)
self.assertEqual(s.min(), 1)
self.assertEqual(s.sum(), 55)
self.assertAlmostEqual(s.mean(), 5.5)
s = SArray(self.float_data, float)
self.assertEqual(s.max(), 10.)
self.assertEqual(s.min(), 1.)
self.assertEqual(s.sum(), 55.)
self.assertAlmostEqual(s.mean(), 5.5)
# test all negative
s = SArray(map(lambda x: x*-1, self.int_data), int)
self.assertEqual(s.max(), -1)
self.assertEqual(s.min(), -10)
self.assertEqual(s.sum(), -55)
self.assertAlmostEqual(s.mean(), -5.5)
# test empty
s = SArray([], float)
self.assertTrue(s.max() is None)
self.assertTrue(s.min() is None)
self.assertTrue(s.sum() is None)
self.assertTrue(s.mean() is None)
# test big ints
huge_int = 9223372036854775807
s = SArray([1, huge_int], int)
self.assertEqual(s.max(), huge_int)
self.assertEqual(s.min(), 1)
# yes, we overflow
self.assertEqual(s.sum(), (huge_int+1)*-1)
# ...but not here
self.assertAlmostEqual(s.mean(), 4611686018427387904.)
a = SArray([[1,2],[1,2],[1,2]], array.array)
self.assertEqual(a.sum(), array.array('d', [3,6]))
self.assertEqual(a.mean(), array.array('d', [1,2]))
with self.assertRaises(RuntimeError):
a.max()
with self.assertRaises(RuntimeError):
a.min()
a = SArray([[1,2],[1,2],[1,2,3]], array.array)
with self.assertRaises(RuntimeError):
a.sum()
with self.assertRaises(RuntimeError):
a.mean()
def test_python_special_functions(self):
s = SArray([], int)
self.assertEqual(len(s), 0)
self.assertEqual(str(s), '[]')
self.assertEqual(bool(s), False)
# increasing ints
s = SArray(self.int_data, int)
self.assertEqual(len(s), len(self.int_data))
self.assertEqual(str(s), str(self.int_data))
self.assertEqual(bool(s), True)
realsum = sum(self.int_data)
sum1 = sum([x for x in s])
sum2 = s.sum()
sum3 = s.apply(lambda x:x, int).sum()
self.assertEquals(sum1, realsum)
self.assertEquals(sum2, realsum)
self.assertEquals(sum3, realsum)
def test_scalar_operators(self):
s=np.array([1,2,3,4,5,6,7,8,9,10]);
t = SArray(s, int)
self.__test_equal(t + 1, list(s + 1), int)
self.__test_equal(t - 1, list(s - 1), int)
# we handle division differently. All divisions cast to float
self.__test_equal(t / 2, list(s / 2.0), float)
self.__test_equal(t * 2, list(s * 2), int)
self.__test_equal(t < 5, list(s < 5), int)
self.__test_equal(t > 5, list(s > 5), int)
self.__test_equal(t <= 5, list(s <= 5), int)
self.__test_equal(t >= 5, list(s >= 5), int)
self.__test_equal(t == 5, list(s == 5), int)
self.__test_equal(t != 5, list(s != 5), int)
self.__test_equal(1.5 + t, list(1.5 + s), float)
self.__test_equal(1.5 - t, list(1.5 - s), float)
self.__test_equal(2.0 / t, list(2.0 / s), float)
self.__test_equal(2 / t, list(2.0 / s), float)
self.__test_equal(2.5 * t, list(2.5 * s), float)
s=["a","b","c"]
t = SArray(s, str)
self.__test_equal(t + "x", [i + "x" for i in s], str)
with self.assertRaises(RuntimeError):
t - 'x'
with self.assertRaises(RuntimeError):
t * 'x'
with self.assertRaises(RuntimeError):
t / 'x'
s = SArray(self.vec_data, array.array)
self.__test_equal(s + 1, [array.array('d', [float(j) + 1 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s - 1, [array.array('d', [float(j) - 1 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s * 2, [array.array('d', [float(j) * 2 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s / 2, [array.array('d', [float(j) / 2 for j in i]) for i in self.vec_data], array.array)
s = SArray([1,2,3,4,None])
self.__test_equal(s == None, [0,0,0,0,1], int)
self.__test_equal(s != None, [1,1,1,1,0], int)
def test_vector_operators(self):
s=np.array([1,2,3,4,5,6,7,8,9,10]);
s2=np.array([5,4,3,2,1,10,9,8,7,6]);
t = SArray(s, int)
t2 = SArray(s2, int)
self.__test_equal(t + t2, list(s + s2), int)
self.__test_equal(t - t2, list(s - s2), int)
# we handle division differently. All divisions cast to float
self.__test_equal(t / t2, list(s.astype(float) / s2), float)
self.__test_equal(t * t2, list(s * s2), int)
self.__test_equal(t < t2, list(s < s2), int)
self.__test_equal(t > t2, list(s > s2), int)
self.__test_equal(t <= t2, list(s <= s2), int)
self.__test_equal(t >= t2, list(s >= s2), int)
self.__test_equal(t == t2, list(s == s2), int)
self.__test_equal(t != t2, list(s != s2), int)
s = SArray(self.vec_data, array.array)
self.__test_equal(s + s, [array.array('d', [float(j) + float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s - s, [array.array('d', [float(j) - float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s * s, [array.array('d', [float(j) * float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s / s, [array.array('d', [float(j) / float(j) for j in i]) for i in self.vec_data], array.array)
t = SArray(self.float_data, float)
self.__test_equal(s + t, [array.array('d', [float(j) + i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_equal(s - t, [array.array('d', [float(j) - i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_equal(s * t, [array.array('d', [float(j) * i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_equal(s / t, [array.array('d', [float(j) / i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
s = SArray([1,2,3,4,None])
self.assertTrue((s==s).all())
s = SArray([1,2,3,4,None])
self.assertFalse((s!=s).any())
def test_logical_ops(self):
s=np.array([0,0,0,0,1,1,1,1]);
s2=np.array([0,1,0,1,0,1,0,1]);
t = SArray(s, int)
t2 = SArray(s2, int)
self.__test_equal(t & t2, list(((s & s2) > 0).astype(int)), int)
self.__test_equal(t | t2, list(((s | s2) > 0).astype(int)), int)
def test_string_operators(self):
s=["a","b","c","d","e","f","g","h","i","j"];
s2=["e","d","c","b","a","j","i","h","g","f"];
t = SArray(s, str)
t2 = SArray(s2, str)
self.__test_equal(t + t2, ["".join(x) for x in zip(s,s2)], str)
self.__test_equal(t + "x", [x + "x" for x in s], str)
self.__test_equal(t < t2, [x < y for (x,y) in zip(s,s2)], int)
self.__test_equal(t > t2, [x > y for (x,y) in zip(s,s2)], int)
self.__test_equal(t == t2, [x == y for (x,y) in zip(s,s2)], int)
self.__test_equal(t != t2, [x != y for (x,y) in zip(s,s2)], int)
self.__test_equal(t <= t2, [x <= y for (x,y) in zip(s,s2)], int)
self.__test_equal(t >= t2, [x >= y for (x,y) in zip(s,s2)], int)
def test_vector_operator_missing_propagation(self):
t = SArray([1,2,3,4,None,6,7,8,9,None], float) # missing 4th and 9th
t2 = SArray([None,4,3,2,np.nan,10,9,8,7,6], float) # missing 0th and 4th
self.assertEquals(len((t + t2).dropna()), 7);
self.assertEquals(len((t - t2).dropna()), 7);
self.assertEquals(len((t * t2).dropna()), 7);
def test_dropna(self):
no_nas = ['strings', 'yeah', 'nan', 'NaN', 'NA', 'None']
t = SArray(no_nas)
self.assertEquals(len(t.dropna()), 6)
self.assertEquals(list(t.dropna()), no_nas)
t2 = SArray([None,np.nan])
self.assertEquals(len(t2.dropna()), 0)
self.assertEquals(list(SArray(self.int_data).dropna()), self.int_data)
self.assertEquals(list(SArray(self.float_data).dropna()), self.float_data)
def test_fillna(self):
# fillna shouldn't fill anything
no_nas = ['strings', 'yeah', 'nan', 'NaN', 'NA', 'None']
t = SArray(no_nas)
out = t.fillna('hello')
self.assertEquals(list(out), no_nas)
# Normal integer case (float auto casted to int)
t = SArray([53,23,None,np.nan,5])
self.assertEquals(list(t.fillna(-1.0)), [53,23,-1,-1,5])
# dict type
t = SArray(self.dict_data+[None])
self.assertEquals(list(t.fillna({1:'1'})), self.dict_data+[{1:'1'}])
# list type
t = SArray(self.list_data+[None])
self.assertEquals(list(t.fillna([0,0,0])), self.list_data+[[0,0,0]])
# vec type
t = SArray(self.vec_data+[None])
self.assertEquals(list(t.fillna(array.array('f',[0.0,0.0]))), self.vec_data+[array.array('f',[0.0,0.0])])
# empty sarray
t = SArray()
self.assertEquals(len(t.fillna(0)), 0)
def test_sample(self):
sa = SArray(data=self.int_data)
sa_sample = sa.sample(.5, 9)
sa_sample2 = sa.sample(.5, 9)
self.assertEqual(sa_sample.head(), sa_sample2.head())
for i in sa_sample:
self.assertTrue(i in self.int_data)
with self.assertRaises(ValueError):
sa.sample(3)
sa_sample = SArray().sample(.5, 9)
self.assertEqual(len(sa_sample), 0)
def test_vector_slice(self):
d=[[1],[1,2],[1,2,3]]
g=SArray(d, array.array)
self.assertEqual(list(g.vector_slice(0).head()), [1,1,1])
self.assertEqual(list(g.vector_slice(0,2).head()), [None,array.array('d', [1,2]),array.array('d', [1,2])])
self.assertEqual(list(g.vector_slice(0,3).head()), [None,None,array.array('d', [1,2,3])])
g=SArray(self.vec_data, array.array);
self.__test_equal(g.vector_slice(0), self.float_data, float)
self.__test_equal(g.vector_slice(0, 2), self.vec_data, array.array)
def test_lazy_eval(self):
sa = SArray(range(-10, 10))
sa = sa + 1
sa1 = sa >= 0
sa2 = sa <= 0
sa3 = sa[sa1 & sa2]
item_count = sa3.size()
self.assertEqual(item_count, 1)
def __test_append(self, data1, data2, dtype):
sa1 = SArray(data1, dtype)
sa2 = SArray(data2, dtype)
sa3 = sa1.append(sa2)
self.__test_equal(sa3, data1 + data2, dtype)
sa3 = sa2.append(sa1)
self.__test_equal(sa3, data2 + data1, dtype)
def test_append(self):
n = len(self.int_data)
m = n / 2
self.__test_append(self.int_data[0:m], self.int_data[m:n], int)
self.__test_append(self.bool_data[0:m], self.bool_data[m:n], int)
self.__test_append(self.string_data[0:m], self.string_data[m:n], str)
self.__test_append(self.float_data[0:m], self.float_data[m:n], float)
self.__test_append(self.vec_data[0:m], self.vec_data[m:n], array.array)
self.__test_append(self.dict_data[0:m], self.dict_data[m:n], dict)
def test_append_exception(self):
val1 = [i for i in range(1, 1000)]
val2 = [str(i) for i in range(-10, 1)]
sa1 = SArray(val1, int)
sa2 = SArray(val2, str)
with self.assertRaises(RuntimeError):
sa3 = sa1.append(sa2)
def test_word_count(self):
sa = SArray(["This is someurl http://someurl!!", "中文 应该也 行", 'Сблъсъкът между'])
expected = [{"this": 1, "someurl": 2, "is": 1, "http": 1}, {"中文": 1, "应该也": 1, "行": 1}, {"Сблъсъкът": 1, "между": 1}]
expected2 = [{"This": 1, "someurl": 2, "is": 1, "http": 1}, {"中文": 1, "应该也": 1, "行": 1}, {"Сблъсъкът": 1, "между": 1}]
sa1 = sa._count_words()
self.assertEquals(sa1.dtype(), dict)
self.__test_equal(sa1, expected, dict)
sa1 = sa._count_words(to_lower=False)
self.assertEquals(sa1.dtype(), dict)
self.__test_equal(sa1, expected2, dict)
#should fail if the input type is not string
sa = SArray([1, 2, 3])
with self.assertRaises(TypeError):
sa._count_words()
def test_ngram_count(self):
sa_word = SArray(["I like big dogs. They are fun. I LIKE BIG DOGS", "I like.", "I like big"])
sa_character = SArray(["Fun. is. fun","Fun is fun.","fu", "fun"])
# Testing word n-gram functionality
result = sa_word._count_ngrams(3)
result2 = sa_word._count_ngrams(2)
result3 = sa_word._count_ngrams(3,"word", to_lower=False)
result4 = sa_word._count_ngrams(2,"word", to_lower=False)
expected = [{'fun i like': 1, 'i like big': 2, 'they are fun': 1, 'big dogs they': 1, 'like big dogs': 2, 'are fun i': 1, 'dogs they are': 1}, {}, {'i like big': 1}]
expected2 = [{'i like': 2, 'dogs they': 1, 'big dogs': 2, 'are fun': 1, 'like big': 2, 'they are': 1, 'fun i': 1}, {'i like': 1}, {'i like': 1, 'like big': 1}]
expected3 = [{'I like big': 1, 'fun I LIKE': 1, 'I LIKE BIG': 1, 'LIKE BIG DOGS': 1, 'They are fun': 1, 'big dogs They': 1, 'like big dogs': 1, 'are fun I': 1, 'dogs They are': 1}, {}, {'I like big': 1}]
expected4 = [{'I like': 1, 'like big': 1, 'I LIKE': 1, 'BIG DOGS': 1, 'are fun': 1, 'LIKE BIG': 1, 'big dogs': 1, 'They are': 1, 'dogs They': 1, 'fun I': 1}, {'I like': 1}, {'I like': 1, 'like big': 1}]
self.assertEquals(result.dtype(), dict)
self.__test_equal(result, expected, dict)
self.assertEquals(result2.dtype(), dict)
self.__test_equal(result2, expected2, dict)
self.assertEquals(result3.dtype(), dict)
self.__test_equal(result3, expected3, dict)
self.assertEquals(result4.dtype(), dict)
self.__test_equal(result4, expected4, dict)
#Testing character n-gram functionality
result5 = sa_character._count_ngrams(3, "character")
result6 = sa_character._count_ngrams(2, "character")
result7 = sa_character._count_ngrams(3, "character", to_lower=False)
result8 = sa_character._count_ngrams(2, "character", to_lower=False)
result9 = sa_character._count_ngrams(3, "character", to_lower=False, ignore_space=False)
result10 = sa_character._count_ngrams(2, "character", to_lower=False, ignore_space=False)
result11 = sa_character._count_ngrams(3, "character", to_lower=True, ignore_space=False)
result12 = sa_character._count_ngrams(2, "character", to_lower=True, ignore_space=False)
expected5 = [{'fun': 2, 'nis': 1, 'sfu': 1, 'isf': 1, 'uni': 1}, {'fun': 2, 'nis': 1, 'sfu': 1, 'isf': 1, 'uni': 1}, {}, {'fun': 1}]
expected6 = [{'ni': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 2}, {'ni': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 2}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected7 = [{'sfu': 1, 'Fun': 1, 'uni': 1, 'fun': 1, 'nis': 1, 'isf': 1}, {'sfu': 1, 'Fun': 1, 'uni': 1, 'fun': 1, 'nis': 1, 'isf': 1}, {}, {'fun': 1}]
expected8 = [{'ni': 1, 'Fu': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 1}, {'ni': 1, 'Fu': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected9 = [{' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'Fun': 1, 'n i': 1, 'fun': 1, 'is ': 1}, {' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'Fun': 1, 'n i': 1, 'fun': 1, 'is ': 1}, {}, {'fun': 1}]
expected10 = [{' f': 1, 'fu': 1, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1, 'Fu': 1}, {' f': 1, 'fu': 1, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1, 'Fu': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected11 = [{' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'n i': 1, 'fun': 2, 'is ': 1}, {' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'n i': 1, 'fun': 2, 'is ': 1}, {}, {'fun': 1}]
expected12 = [{' f': 1, 'fu': 2, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1}, {' f': 1, 'fu': 2, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
self.assertEquals(result5.dtype(), dict)
self.__test_equal(result5, expected5, dict)
self.assertEquals(result6.dtype(), dict)
self.__test_equal(result6, expected6, dict)
self.assertEquals(result7.dtype(), dict)
self.__test_equal(result7, expected7, dict)
self.assertEquals(result8.dtype(), dict)
self.__test_equal(result8, expected8, dict)
self.assertEquals(result9.dtype(), dict)
self.__test_equal(result9, expected9, dict)
self.assertEquals(result10.dtype(), dict)
self.__test_equal(result10, expected10, dict)
self.assertEquals(result11.dtype(), dict)
self.__test_equal(result11, expected11, dict)
self.assertEquals(result12.dtype(), dict)
self.__test_equal(result12, expected12, dict)
sa = SArray([1, 2, 3])
with self.assertRaises(TypeError):
#should fail if the input type is not string
sa._count_ngrams()
with self.assertRaises(TypeError):
#should fail if n is not of type 'int'
sa_word._count_ngrams(1.01)
with self.assertRaises(ValueError):
#should fail with invalid method
sa_word._count_ngrams(3,"bla")
with self.assertRaises(ValueError):
#should fail with n <0
sa_word._count_ngrams(0)
with warnings.catch_warnings(True) as context:
sa_word._count_ngrams(10)
assert len(context) == 1
def test_dict_keys(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
sa = SArray(self.dict_data)
sa_keys = sa.dict_keys()
self.assertEquals(sa_keys, [str(i) for i in self.int_data])
# na value
d = [{'a': 1}, {None: 2}, {"b": None}, None]
sa = SArray(d)
sa_keys = sa.dict_keys()
self.assertEquals(sa_keys, [['a'], [None], ['b'], None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_keys()
# empty SArray with type
sa = SArray([], dict)
self.assertEquals(list(sa.dict_keys().head(10)), [], list)
def test_dict_values(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
sa = SArray(self.dict_data)
sa_values = sa.dict_values()
self.assertEquals(sa_values, [[i, float(i)] for i in self.int_data])
# na value
d = [{'a': 1}, {None: 'str'}, {"b": None}, None]
sa = SArray(d)
sa_values = sa.dict_values()
self.assertEquals(sa_values, [[1], ['str'], [None], None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_values()
# empty SArray with type
sa = SArray([], dict)
self.assertEquals(list(sa.dict_values().head(10)), [], list)
def test_dict_trim_by_keys(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
d = [{'a':1, 'b': [1,2]}, {None: 'str'}, {"b": None, "c": 1}, None]
sa = SArray(d)
sa_values = sa.dict_trim_by_keys(['a', 'b'])
self.assertEquals(sa_values, [{}, {None: 'str'}, {"c": 1}, None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_trim_by_keys([])
sa = SArray([], dict)
self.assertEquals(list(sa.dict_trim_by_keys([]).head(10)), [], list)
def test_dict_trim_by_values(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None]
sa = SArray(d)
sa_values = sa.dict_trim_by_values(5,10)
self.assertEquals(sa_values, [{'b': 20, 'c':None}, {None:5}, None])
# no upper key
sa_values = sa.dict_trim_by_values(2)
self.assertEquals(sa_values, [{'b': 20}, {"b": 4, None:5}, None])
# no param
sa_values = sa.dict_trim_by_values()
self.assertEquals(sa_values, [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None])
# no lower key
sa_values = sa.dict_trim_by_values(upper=7)
self.assertEquals(sa_values, [{'a':1, 'c':None}, {"b": 4, None: 1}, None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_trim_by_values()
sa = SArray([], dict)
self.assertEquals(list(sa.dict_trim_by_values().head(10)), [], list)
def test_dict_has_any_keys(self):
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None, {'a':0}]
sa = SArray(d)
sa_values = sa.dict_has_any_keys([])
self.assertEquals(sa_values, [0,0,0,0])
sa_values = sa.dict_has_any_keys(['a'])
self.assertEquals(sa_values, [1,0,0,1])
# one value is auto convert to list
sa_values = sa.dict_has_any_keys("a")
self.assertEquals(sa_values, [1,0,0,1])
sa_values = sa.dict_has_any_keys(['a', 'b'])
self.assertEquals(sa_values, [1,1,0,1])
with self.assertRaises(TypeError):
sa.dict_has_any_keys()
#empty SArray
sa = SArray()
with self.assertRaises(TypeError):
sa.dict_has_any_keys()
sa = SArray([], dict)
self.assertEquals(list(sa.dict_has_any_keys([]).head(10)), [], list)
def test_dict_has_all_keys(self):
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None, {'a':0}]
sa = SArray(d)
sa_values = sa.dict_has_all_keys([])
self.assertEquals(sa_values, [1,1,0,1])
sa_values = sa.dict_has_all_keys(['a'])
self.assertEquals(sa_values, [1,0,0,1])
# one value is auto convert to list
sa_values = sa.dict_has_all_keys("a")
self.assertEquals(sa_values, [1,0,0,1])
sa_values = sa.dict_has_all_keys(['a', 'b'])
self.assertEquals(sa_values, [1,0,0,0])
sa_values = sa.dict_has_all_keys([None, "b"])
self.assertEquals(sa_values, [0,1,0,0])
with self.assertRaises(TypeError):
sa.dict_has_all_keys()
#empty SArray
sa = SArray()
with self.assertRaises(TypeError):
sa.dict_has_all_keys()
sa = SArray([], dict)
self.assertEquals(list(sa.dict_has_all_keys([]).head(10)), [], list)
def test_save_load_cleanup_file(self):
# simlarly for SArray
with util.TempDirectory() as f:
sa = SArray(range(1,1000000))
sa.save(f)
# 17 for each sarray, 1 object.bin, 1 ini
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# sf1 now references the on disk file
sa1 = SArray(f);
# create another SFrame and save to the same location
sa2 = SArray([str(i) for i in range(1,100000)])
sa2.save(f)
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# now sf1 should still be accessible
self.__test_equal(sa1, list(sa), int)
# and sf2 is correct too
sa3 = SArray(f)
self.__test_equal(sa3, list(sa2), str)
# when sf1 goes out of scope, the tmp files should be gone
sa1 = 1
time.sleep(1) # give time for the files being deleted
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# list_to_compare must have all unique values for this to work
def __generic_unique_test(self, list_to_compare):
test = SArray(list_to_compare + list_to_compare)
self.assertEquals(sorted(list(test.unique())), sorted(list_to_compare))
def test_unique(self):
# Test empty SArray
test = SArray([])
self.assertEquals(list(test.unique()), [])
# Test one value
test = SArray([1])
self.assertEquals(list(test.unique()), [1])
# Test many of one value
test = SArray([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
self.assertEquals(list(test.unique()), [1])
# Test all unique values
test = SArray(self.int_data)
self.assertEquals(sorted(list(test.unique())), self.int_data)
# Test an interesting sequence
interesting_ints = [4654,4352436,5453,7556,45435,4654,5453,4654,5453,1,1,1,5,5,5,8,66,7,7,77,90,-34]
test = SArray(interesting_ints)
u = test.unique()
self.assertEquals(len(u), 13)
# We do not preserve order
self.assertEquals(sorted(list(u)), sorted(np.unique(interesting_ints)))
# Test other types
self.__generic_unique_test(self.string_data[0:6])
# only works reliably because these are values that floats can perform
# reliable equality tests
self.__generic_unique_test(self.float_data)
self.__generic_unique_test(self.list_data)
self.__generic_unique_test(self.vec_data)
with self.assertRaises(TypeError):
SArray(self.dict_data).unique()
def test_item_len(self):
# empty SArray
test = SArray([])
with self.assertRaises(TypeError):
self.assertEquals(test.item_length())
# wrong type
test = SArray([1,2,3])
with self.assertRaises(TypeError):
self.assertEquals(test.item_length())
test = SArray(['1','2','3'])
with self.assertRaises(TypeError):
self.assertEquals(test.item_length())
# vector type
test = SArray([[], [1], [1,2], [1,2,3], None])
item_length = test.item_length();
self.assertEquals(list(item_length), list([0, 1,2,3,None]))
# dict type
test = SArray([{}, {'key1': 1}, {'key2':1, 'key1':2}, None])
self.assertEquals(list(test.item_length()), list([0, 1,2,None]))
# list type
test = SArray([[], [1,2], ['str', 'str2'], None])
self.assertEquals(list(test.item_length()), list([0, 2,2,None]))
def test_random_access(self):
t = list(range(0,100000))
s = SArray(t)
# simple slices
self.__test_equal(s[1:10000], t[1:10000], int)
self.__test_equal(s[0:10000:3], t[0:10000:3], int)
self.__test_equal(s[1:10000:3], t[1:10000:3], int)
self.__test_equal(s[2:10000:3], t[2:10000:3], int)
self.__test_equal(s[3:10000:101], t[3:10000:101], int)
# negative slices
self.__test_equal(s[-5:], t[-5:], int)
self.__test_equal(s[-1:], t[-1:], int)
self.__test_equal(s[-100:-10], t[-100:-10], int)
self.__test_equal(s[-100:-10:2], t[-100:-10:2], int)
# single element reads
self.assertEquals(s[511], t[511])
self.assertEquals(s[1912], t[1912])
self.assertEquals(s[-1], t[-1])
self.assertEquals(s[-10], t[-10])
# A cache boundary
self.assertEquals(s[32*1024-1], t[32*1024-1])
self.assertEquals(s[32*1024], t[32*1024])
# totally different
self.assertEquals(s[19312], t[19312])
# edge case odities
self.__test_equal(s[10:100:100], t[10:100:100], int)
self.__test_equal(s[-100:len(s):10], t[-100:len(t):10], int)
self.__test_equal(s[-1:-2], t[-1:-2], int)
self.__test_equal(s[-1:-1000:2], t[-1:-1000:2], int)
with self.assertRaises(IndexError):
s[len(s)]
# with caching abilities; these should be fast, as 32K
# elements are cached.
for i in range(0, 100000, 100):
self.assertEquals(s[i], t[i])
for i in range(0, 100000, 100):
self.assertEquals(s[-i], t[-i])
def test_sort(self):
test = SArray([1,2,3,5,1,4])
ascending = SArray([1,1,2,3,4,5])
descending = SArray([5,4,3,2,1,1])
result = test.sort()
self.assertEqual(result, ascending)
result = test.sort(ascending = False)
self.assertEqual(result, descending)
with self.assertRaises(TypeError):
SArray([[1,2], [2,3]]).sort()
def test_unicode_encode_should_not_fail(self):
g=SArray([{'a':u'\u2019'}])
g=SArray([u'123',u'\u2019'])
g=SArray(['123',u'\u2019'])
def test_read_from_avro(self):
data = """Obj\x01\x04\x16avro.schema\xec\x05{"fields": [{"type": "string", "name": "business_id"}, {"type": "string", "name": "date"}, {"type": "string", "name": "review_id"}, {"type": "int", "name": "stars"}, {"type": "string", "name": "text"}, {"type": "string", "name": "type"}, {"type": "string", "name": "user_id"}, {"type": {"type": "map", "values": "int"}, "name": "votes"}], "type": "record", "name": "review"}\x14avro.codec\x08null\x00\x0e7\x91\x0b#.\x8f\xa2H%<G\x9c\x89\x93\xfb\x04\xe8 ,sgBl3UDEcNYKwuUb92CYdA\x142009-01-25,Zj-R0ZZqIKFx56LY2su1iQ\x08\x80\x19The owner of China King had never heard of Yelp...until Jim W rolled up on China King!\n\nThe owner of China King, Michael, is very friendly and chatty. Be Prepared to chat for a few minutes if you strike up a conversation.\n\nThe service here was terrific. We had several people fussing over us but the primary server, Maggie was a gem. \n\nMy wife and the kids opted for the Americanized menu and went with specials like sweet and sour chicken, shrimp in white sauce and garlic beef. Each came came with soup, egg roll and rice. I sampled the garlic beef which they prepared with a kung pao brown sauce (a decision Maggie and my wife arrived at after several minutes of discussion) it had a nice robust flavor and the veggies were fresh and flavorful. I also sampled the shrimp which were succulent and the white sauce had a little more distinctiveness to it than the same sauce at many Chinese restaurants.\n\nI ordered from the traditional menu but went not too adventurous with sizzling plate with scallops and shrimp in black pepper sauce. Very enjoyable. Again, succulent shrimp. The scallops were tasty as well. Realizing that I moved here from Boston and I go into any seafood experience with diminished expectations now that I live in the west, I have to say the scallops are among the fresher and judiciously prepared that I have had in Phoenix.\n\nOverall China King delivered a very tasty and very fresh meal. They have a fairly extensive traditional menu which I look forward to exploring further.\n\nThanks to Christine O for her review...after reading that I knew China King was A-OK.\x0creview,P2kVk4cIWyK4e4h14RhK-Q\x06\nfunny\x08\x0cuseful\x12\x08cool\x0e\x00,arKckMf7lGNYjXjKo6DXcA\x142012-05-05,EyVfhRDlyip2ErKMOHEA-A\x08\xa4\x04We\'ve been here a few times and we love all the fresh ingredients. The pizza is good when you eat it fresh but if you like to eat your pizza cold then you\'ll be biting into hard dough. Their Nutella pizza is good. Take a menu and check out their menu and hours for specials.\x0creview,x1Yl1dpNcWCCEdpME9dg0g\x06\nfunny\x02\x0cuseful\x02\x08cool\x00\x00\x0e7\x91\x0b#.\x8f\xa2H%<G\x9c\x89\x93\xfb"""
test_avro_file = open("test.avro", "wb")
test_avro_file.write(data)
test_avro_file.close()
sa = SArray.from_avro("test.avro")
self.assertEqual(sa.dtype(), dict)
self.assertEqual(len(sa), 2)
def test_from_const(self):
g = SArray.from_const('a', 100)
self.assertEqual(len(g), 100)
self.assertEqual(list(g), ['a']*100)
g = SArray.from_const(dt.datetime(2013, 5, 7, 10, 4, 10),10)
self.assertEqual(len(g), 10)
self.assertEqual(list(g), [dt.datetime(2013, 5, 7, 10, 4, 10,tzinfo=GMT(0))]*10)
g = SArray.from_const(0, 0)
self.assertEqual(len(g), 0)
g = SArray.from_const(None, 100)
self.assertEquals(list(g), [None] * 100)
self.assertEqual(g.dtype(), float)
def test_from_sequence(self):
with self.assertRaises(TypeError):
g = SArray.from_sequence()
g = SArray.from_sequence(100)
self.assertEqual(list(g), range(100))
g = SArray.from_sequence(10, 100)
self.assertEqual(list(g), range(10, 100))
g = SArray.from_sequence(100, 10)
self.assertEqual(list(g), range(100, 10))
def test_datetime_to_str(self):
sa = SArray(self.datetime_data)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,['2013-05-07T10:04:10GMT+00', '1902-10-21T10:34:10GMT+00', None],str)
sa = SArray([None,None,None],dtype=dt.datetime)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,[None,None,None],str)
sa = SArray(dtype=dt.datetime)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,[],str)
sa = SArray([None,None,None])
self.assertRaises(TypeError,sa.datetime_to_str)
sa = SArray()
self.assertRaises(TypeError,sa.datetime_to_str)
def test_str_to_datetime(self):
sa_string = SArray(['2013-05-07T10:04:10GMT+00', '1902-10-21T10:34:10GMT+00', None])
sa_datetime_back = sa_string.str_to_datetime()
expected = [dt.datetime(2013, 5, 7, 10, 4, 10,tzinfo=GMT(0)),dt.datetime(1902, 10, 21, 10, 34, 10,tzinfo=GMT(0)),None]
self.__test_equal(sa_datetime_back,expected,dt.datetime)
sa_string = SArray([None,None,None],str)
sa_datetime_back = sa_string.str_to_datetime()
self.__test_equal(sa_datetime_back,[None,None,None],dt.datetime)
sa_string = SArray(dtype=str)
sa_datetime_back = sa_string.str_to_datetime()
self.__test_equal(sa_datetime_back,[],dt.datetime)
sa = SArray([None,None,None])
self.assertRaises(TypeError,sa.str_to_datetime)
sa = SArray()
self.assertRaises(TypeError,sa.str_to_datetime)
# hour without leading zero
sa = SArray(['10/30/2014 9:01'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M')
expected = [dt.datetime(2014, 10, 30, 9, 1, tzinfo=GMT(0))]
self.__test_equal(sa,expected,dt.datetime)
# without delimiters
sa = SArray(['10302014 0901', '10302014 2001'])
sa = sa.str_to_datetime('%m%d%Y %H%M')
expected = [dt.datetime(2014, 10, 30, 9, 1, tzinfo=GMT(0)),
dt.datetime(2014, 10, 30, 20, 1, tzinfo=GMT(0))]
self.__test_equal(sa,expected,dt.datetime)
# another without delimiter test
sa = SArray(['20110623T191001'])
sa = sa.str_to_datetime("%Y%m%dT%H%M%S%F%q")
expected = [dt.datetime(2011, 06, 23, 19, 10, 1, tzinfo=GMT(0))]
self.__test_equal(sa,expected,dt.datetime)
# am pm
sa = SArray(['10/30/2014 9:01am', '10/30/2014 9:01pm'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M%p')
expected = [dt.datetime(2014, 10, 30, 9, 1, tzinfo=GMT(0)),
dt.datetime(2014, 10, 30, 21, 1, tzinfo=GMT(0))]
self.__test_equal(sa,expected,dt.datetime)
sa = SArray(['10/30/2014 9:01AM', '10/30/2014 9:01PM'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M%P')
expected = [dt.datetime(2014, 10, 30, 9, 1, tzinfo=GMT(0)),
dt.datetime(2014, 10, 30, 21, 1, tzinfo=GMT(0))]
self.__test_equal(sa,expected,dt.datetime)
# failure 13pm
sa = SArray(['10/30/2014 13:01pm'])
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %H:%M%p')
# failure hour 13 when %l should only have up to hour 12
sa = SArray(['10/30/2014 13:01'])
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %l:%M')
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %L:%M')
def test_apply_with_partial(self):
sa = SArray([1, 2, 3, 4, 5])
def concat_fn(character, number):
return '%s%d' % (character, number)
my_partial_fn = functools.partial(concat_fn, 'x')
sa_transformed = sa.apply(my_partial_fn)
self.assertEqual(list(sa_transformed), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_apply_with_functor(self):
sa = SArray([1, 2, 3, 4, 5])
class Concatenator(object):
def __init__(self, character):
self.character = character
def __call__(self, number):
return '%s%d' % (self.character, number)
concatenator = Concatenator('x')
sa_transformed = sa.apply(concatenator)
self.assertEqual(list(sa_transformed), ['x1', 'x2', 'x3', 'x4', 'x5'])
| agpl-3.0 |
AtsushiHashimoto/fujino_mthesis | tools/grouping/python3/clustering_of_exclusive_keywords.py | 1 | 4287 | # _*_ coding: utf-8 -*-
# Python 3.x
"""
入力: cooccurrence.pickle
出力保存先ディレクトリ
出力: clustering_keywords.pickle
keywordsのリスト
各keywordsの頻度
共起頻度を表す行列(インデックスは上記のリスト順)
レシピ数
"""
import os
import csv
import pickle
import argparse
import itertools
import numpy as np
import pandas as pd
import scipy.spatial.distance as ssd
import scipy.cluster.hierarchy as sch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('cooccurrence_path', help=u'cooccurrence.pickle')
parser.add_argument('output_dir', help=u'出力ディレクトリ')
params = parser.parse_args()
return vars(params)
def estimate_cooccur_prob(keywords, occur, cooccur):
cooccur_prob = np.zeros((len(keywords), len(keywords)))
for i,j in itertools.combinations(range(len(keywords)), 2):
cooccur_prob[i,j] = np.max([ cooccur[i,j] / float(occur[i]), cooccur[i,j] / float(occur[j])])
assert cooccur_prob[i,j] <= 1.0, "%s %s %d %d" % (keywords[i], keywords[j], cooccur[i,j], occur[i], occur[j])
cooccur_prob[j,i] = cooccur_prob[i,j]
return cooccur_prob
def estimate_cooccur_prob_per_recipe(keywords, occur, cooccur, recipe_no):
cooccur_prob = np.zeros((len(keywords), len(keywords)))
for i,j in itertools.combinations(range(len(keywords)), 2):
cooccur_prob[i,j] = cooccur[i,j] / recipe_no
cooccur_prob[j,i] = cooccur_prob[i,j]
return cooccur_prob
def read_ings(ings_path):
df = pd.read_csv(ings_path, encoding="utf-8")
ings = dict(zip(df.iloc[:, 0], df.iloc[:, 1]))
return ings
def main(params):
cooccurrence_path = params['cooccurrence_path']
output_dir = params['output_dir']
with open(cooccurrence_path, 'rb') as fin:
keywords, occur, cooccur, recipe_no = pickle.load(fin)
keywords = np.array(keywords)
occur = np.array(occur)
#食材以外, 少ないものを除く
idx = occur > 100
keywords = keywords[idx]
occur = occur[idx]
cooccur = cooccur[idx][:, idx]
print ("# of keywords:",keywords.size)
# 共起確率計算
cooccur_prob = estimate_cooccur_prob(keywords, occur, cooccur)
# 確率をそのまま距離に つまり共起しないものを同じクラスタにまとめる
d_array = ssd.squareform(cooccur_prob) # linkageへの入力は(1,2),...(1,n),(2,3)...というベクトル
coprob_mean = np.mean(d_array[d_array > 0])
print ("coprob_mean %.3e" % coprob_mean)
result = sch.linkage(d_array, method = 'complete')
ths = np.array(result[::-1, 2]) #クラスタ数が少ない順の閾値
mx_cluster = len(np.where(ths > 0)[0])
th = coprob_mean
n_food = 0
cls = sch.fcluster(result, th, "distance")
print ("n_cluster %d" % len(set(cls)))
for c in set(cls):
same_cls = keywords[cls == c]
n = same_cls.size
idx = np.argsort(occur[cls == c])[::-1] # 降順
with open(os.path.join(output_dir, "cluster_%04d.txt" % c), "wt") as fout:
writer = csv.writer(fout, delimiter = '\t')
writer.writerow(["label", "occur"])
foods = np.c_[ same_cls[idx].reshape(n,1), occur[cls == c][idx].reshape(n,1)]
n_food += len(foods)
writer.writerows(foods)
print ("n_food", n_food)
#plt.figure(figsize=(7,7))
#plt.plot(range(1, len(ths)+1), ths)
#plt.xlim(0,mx_cluster)
#plt.ylim(0,1)
#plt.savefig(os.path.join(output_dir, "th_for_cluster_no.png"))
plt.figure(figsize=(7,7))
plt.plot(range(1, len(ths)+1), ths)
plt.hlines(th, 0, len(set(cls)), color='r')
plt.vlines(len(set(cls)), 0, th, color='r')
plt.plot(len(set(cls)), th, marker='D', ms=10, color='r')
plt.xlim(0,mx_cluster)
plt.ylim(0,1)
plt.savefig(os.path.join(output_dir, "th_for_cluster_no_%.2e_%d.png" % (th, len(set(cls)))))
with open(os.path.join(output_dir, 'clustering_keywords.pickle'), 'wb') as fout:
pickle.dump((keywords, occur, cooccur, recipe_no), fout, protocol=0)
if __name__ == '__main__':
params = parse()
main(params)
| bsd-2-clause |
nmearl/pynamic-old | pynamic/multinest.py | 1 | 4248 | from __future__ import absolute_import, unicode_literals, print_function
__author__ = 'nmearl'
import json
import os
import numpy as np
import pymultinest
import matplotlib.pyplot as plt
import utilfuncs
max_lnlike = -np.inf
mod_pars = None
photo_data = None
rv_data = None
ncores = 1
fname = ""
def per_iteration(mod_pars, theta, lnl, model):
global max_lnlike
if lnl > max_lnlike:
max_lnlike = lnl
params = utilfuncs.split_parameters(theta, mod_pars[0])
redchisqr = np.sum(((photo_data[1] - model) / photo_data[2]) ** 2) / \
(photo_data[1].size - 1 - (mod_pars[0] * 5 + (mod_pars[0] - 1) * 6))
utilfuncs.iterprint(mod_pars, params, max_lnlike, redchisqr, 0.0, 0.0)
utilfuncs.report_as_input(mod_pars, params, fname)
def lnprior(cube, ndim, nparams):
theta = np.array([cube[i] for i in range(ndim)])
masses, radii, fluxes, u1, u2, a, e, inc, om, ln, ma = utilfuncs.split_parameters(theta, mod_pars[0])
masses = 10**(masses*8 - 9)
radii = 10**(radii*4 - 4)
fluxes = 10**(fluxes*4 - 4)
a = 10**(a*2 - 2)
e = 10**(e*3 - 3)
inc *= 2.0 * np.pi
om = 2.0 * np.pi * 10**(om*2 - 2)
ln = 2.0 * np.pi * 10**(ln*8 - 8)
ma = 2.0 * np.pi * 10**(ma*2 - 2)
theta = np.concatenate([masses, radii, fluxes, u1, u2, a, e, inc, om, ln, ma])
for i in range(ndim):
cube[i] = theta[i]
def lnlike(cube, ndim, nparams):
theta = np.array([cube[i] for i in range(ndim)])
params = utilfuncs.split_parameters(theta, mod_pars[0])
mod_flux, mod_rv = utilfuncs.model(mod_pars, params, photo_data[0], rv_data[0], ncores)
flnl = np.sum((-0.5 * ((mod_flux - photo_data[1]) / photo_data[2]) ** 2))
rvlnl = np.sum((-0.5 * ((mod_rv - rv_data[1]) / rv_data[2])**2))
per_iteration(mod_pars, theta, flnl, mod_flux)
return flnl + rvlnl
def generate(lmod_pars, lparams, lphoto_data, lrv_data, lncores, lfname):
global mod_pars, params, photo_data, rv_data, ncores, fname
mod_pars, params, photo_data, rv_data, ncores, fname = \
lmod_pars, lparams, lphoto_data, lrv_data, lncores, lfname
# number of dimensions our problem has
parameters = ["{0}".format(i) for i in range(mod_pars[0] * 5 + (mod_pars[0] - 1) * 6)]
nparams = len(parameters)
# make sure the output directories exist
if not os.path.exists("./output/{0}/multinest".format(fname)):
os.makedirs(os.path.join("./", "output", "{0}".format(fname), "multinest"))
if not os.path.exists("./output/{0}/plots".format(fname)):
os.makedirs(os.path.join("./", "output", "{0}".format(fname), "plots"))
if not os.path.exists("chains"): os.makedirs("chains")
# we want to see some output while it is running
progress_plot = pymultinest.ProgressPlotter(n_params=nparams,
outputfiles_basename='output/{0}/multinest/'.format(fname))
progress_plot.start()
# progress_print = pymultinest.ProgressPrinter(n_params=nparams, outputfiles_basename='output/{0}/multinest/'.format(fname))
# progress_print.start()
# run MultiNest
pymultinest.run(lnlike, lnprior, nparams, outputfiles_basename=u'./output/{0}/multinest/'.format(fname),
resume=True, verbose=True,
sampling_efficiency='parameter', n_live_points=1000)
# run has completed
progress_plot.stop()
# progress_print.stop()
json.dump(parameters, open('./output/{0}/multinest/params.json'.format(fname), 'w')) # save parameter names
# plot the distribution of a posteriori possible models
plt.figure()
plt.plot(photo_data[0], photo_data[1], '+ ', color='red', label='data')
a = pymultinest.Analyzer(outputfiles_basename="./output/{0}/reports/".format(fname), n_params=nparams)
for theta in a.get_equal_weighted_posterior()[::100, :-1]:
params = utilfuncs.split_parameters(theta, mod_pars[0])
mod_flux, mod_rv = utilfuncs.model(mod_pars, params, photo_data[0], rv_data[0])
plt.plot(photo_data[0], mod_flux, '-', color='blue', alpha=0.3, label='data')
utilfuncs.report_as_input(params, fname)
plt.savefig('./output/{0}/plots/posterior.pdf'.format(fname))
plt.close() | mit |
hainm/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
gertingold/scipy | scipy/signal/fir_filter_design.py | 4 | 47327 | # -*- coding: utf-8 -*-
"""Functions for FIR filter design."""
from __future__ import division, print_function, absolute_import
from math import ceil, log
import operator
import warnings
import numpy as np
from numpy.fft import irfft, fft, ifft
from scipy.special import sinc
from scipy.linalg import (toeplitz, hankel, solve, LinAlgError, LinAlgWarning,
lstsq)
from scipy._lib.six import string_types
from . import sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase']
def _get_fs(fs, nyq):
"""
Utility for replacing the argument 'nyq' (with default 1) with 'fs'.
"""
if nyq is None and fs is None:
fs = 2
elif nyq is not None:
if fs is not None:
raise ValueError("Values cannot be given for both 'nyq' and 'fs'.")
fs = 2*nyq
return fs
# Some notes on function parameters:
#
# `cutoff` and `width` are given as numbers between 0 and 1. These are
# relative frequencies, expressed as a fraction of the Nyquist frequency.
# For example, if the Nyquist frequency is 2 KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
Examples
--------
Suppose we want to design a lowpass filter, with 65 dB attenuation
in the stop band. The Kaiser window parameter to be used in the
window method is computed by `kaiser_beta(65)`:
>>> from scipy.signal import kaiser_beta
>>> kaiser_beta(65)
6.20426
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
numtaps : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter,
expressed as a fraction of the Nyquist frequency.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
Examples
--------
Suppose we want to design a FIR filter using the Kaiser window method
that will have 211 taps and a transition width of 9 Hz for a signal that
is sampled at 480 Hz. Expressed as a fraction of the Nyquist frequency,
the width is 9/(0.5*480) = 0.0375. The approximate attenuation (in dB)
is computed as follows:
>>> from scipy.signal import kaiser_atten
>>> kaiser_atten(211, 0.0375)
64.48099630593983
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Determine the filter window parameters for the Kaiser window method.
The parameters returned by this function are generally used to create
a finite impulse response filter using the window method, with either
`firwin` or `firwin2`.
Parameters
----------
ripple : float
Upper bound for the deviation (in dB) of the magnitude of the
filter's frequency response from that of the desired filter (not
including frequencies in any transition intervals). That is, if w
is the frequency expressed as a fraction of the Nyquist frequency,
A(w) is the actual frequency response of the filter and D(w) is the
desired frequency response, the design requirement is that::
abs(A(w) - D(w))) < 10**(-ripple/20)
for 0 <= w <= 1 and w not in a transition interval.
width : float
Width of transition region, normalized so that 1 corresponds to pi
radians / sample. That is, the frequency is expressed as a fraction
of the Nyquist frequency.
Returns
-------
numtaps : int
The length of the Kaiser window.
beta : float
The beta parameter for the Kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.kaiser(numtaps, beta, sym=True)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
Examples
--------
We will use the Kaiser window method to design a lowpass FIR filter
for a signal that is sampled at 1000 Hz.
We want at least 65 dB rejection in the stop band, and in the pass
band the gain should vary no more than 0.5%.
We want a cutoff frequency of 175 Hz, with a transition between the
pass band and the stop band of 24 Hz. That is, in the band [0, 163],
the gain varies no more than 0.5%, and in the band [187, 500], the
signal is attenuated by at least 65 dB.
>>> from scipy.signal import kaiserord, firwin, freqz
>>> import matplotlib.pyplot as plt
>>> fs = 1000.0
>>> cutoff = 175
>>> width = 24
The Kaiser method accepts just a single parameter to control the pass
band ripple and the stop band rejection, so we use the more restrictive
of the two. In this case, the pass band ripple is 0.005, or 46.02 dB,
so we will use 65 dB as the design parameter.
Use `kaiserord` to determine the length of the filter and the
parameter for the Kaiser window.
>>> numtaps, beta = kaiserord(65, width/(0.5*fs))
>>> numtaps
167
>>> beta
6.20426
Use `firwin` to create the FIR filter.
>>> taps = firwin(numtaps, cutoff, window=('kaiser', beta),
... scale=False, nyq=0.5*fs)
Compute the frequency response of the filter. ``w`` is the array of
frequencies, and ``h`` is the corresponding complex array of frequency
responses.
>>> w, h = freqz(taps, worN=8000)
>>> w *= 0.5*fs/np.pi # Convert w to Hz.
Compute the deviation of the magnitude of the filter's response from
that of the ideal lowpass filter. Values in the transition region are
set to ``nan``, so they won't appear in the plot.
>>> ideal = w < cutoff # The "ideal" frequency response.
>>> deviation = np.abs(np.abs(h) - ideal)
>>> deviation[(w > cutoff - 0.5*width) & (w < cutoff + 0.5*width)] = np.nan
Plot the deviation. A close look at the left end of the stop band shows
that the requirement for 65 dB attenuation is violated in the first lobe
by about 0.125 dB. This is not unusual for the Kaiser window method.
>>> plt.plot(w, 20*np.log10(np.abs(deviation)))
>>> plt.xlim(0, 0.5*fs)
>>> plt.ylim(-90, -60)
>>> plt.grid(alpha=0.25)
>>> plt.axhline(-65, color='r', ls='--', alpha=0.3)
>>> plt.xlabel('Frequency (Hz)')
>>> plt.ylabel('Deviation from ideal (dB)')
>>> plt.title('Lowpass Filter Frequency Response')
>>> plt.show()
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=None, fs=None):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist frequency, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist frequency.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be odd if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `fs`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `fs/2`. The values 0 and
`fs/2` must not be included in `cutoff`.
width : float or None, optional
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `fs`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values, optional
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : {True, False, 'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
If False, the DC gain is 0. Can also be a string argument for the
desired filter type (equivalent to ``btype`` in IIR design functions).
.. versionadded:: 1.3.0
Support for string arguments.
scale : bool, optional
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `fs/2` (the Nyquist frequency) if the first passband ends at
`fs/2` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float, optional
*Deprecated. Use `fs` instead.* This is the Nyquist frequency.
Each frequency in `cutoff` must be between 0 and `nyq`. Default
is 1.
fs : float, optional
The sampling frequency of the signal. Each frequency in `cutoff`
must be between 0 and ``fs/2``. Default is 2.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to ``fs/2``, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See Also
--------
firwin2
firls
minimum_phase
remez
Examples
--------
Low-pass from 0 to f:
>>> from scipy import signal
>>> numtaps = 3
>>> f = 0.1
>>> signal.firwin(numtaps, f)
array([ 0.06799017, 0.86401967, 0.06799017])
Use a specific window function:
>>> signal.firwin(numtaps, f, window='nuttall')
array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04])
High-pass ('stop' from 0 to f):
>>> signal.firwin(numtaps, f, pass_zero=False)
array([-0.00859313, 0.98281375, -0.00859313])
Band-pass:
>>> f1, f2 = 0.1, 0.2
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
array([ 0.06301614, 0.88770441, 0.06301614])
Band-stop:
>>> signal.firwin(numtaps, [f1, f2])
array([-0.00801395, 1.0160279 , -0.00801395])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):
>>> f3, f4 = 0.3, 0.4
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
array([-0.01376344, 1.02752689, -0.01376344])
Multi-band (passbands are [f1, f2] and [f3,f4]):
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
array([ 0.04890915, 0.91284326, 0.04890915])
""" # noqa: E501
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
nyq = 0.5 * _get_fs(fs, nyq)
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than fs/2.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
if isinstance(pass_zero, str):
if pass_zero in ('bandstop', 'lowpass'):
if pass_zero == 'lowpass':
if cutoff.size != 1:
raise ValueError('cutoff must have one element if '
'pass_zero=="lowpass", got %s'
% (cutoff.shape,))
elif cutoff.size <= 1:
raise ValueError('cutoff must have at least two elements if '
'pass_zero=="bandstop", got %s'
% (cutoff.shape,))
pass_zero = True
elif pass_zero in ('bandpass', 'highpass'):
if pass_zero == 'highpass':
if cutoff.size != 1:
raise ValueError('cutoff must have one element if '
'pass_zero=="highpass", got %s'
% (cutoff.shape,))
elif cutoff.size <= 1:
raise ValueError('cutoff must have at least two elements if '
'pass_zero=="bandpass", got %s'
% (cutoff.shape,))
pass_zero = False
else:
raise ValueError('pass_zero must be True, False, "bandpass", '
'"lowpass", "highpass", or "bandstop", got '
'%s' % (pass_zero,))
pass_zero = bool(operator.index(pass_zero)) # ensure bool-like
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist frequency.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=None,
antisymmetric=False, fs=None):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency is half `fs`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be ``fs/2``.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float, optional
*Deprecated. Use `fs` instead.* This is the Nyquist frequency.
Each frequency in `freq` must be between 0 and `nyq`. Default is 1.
antisymmetric : bool, optional
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
fs : float, optional
The sampling frequency of the signal. Each frequency in `cutoff`
must be between 0 and ``fs/2``. Default is 2.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
firls
firwin
minimum_phase
remez
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
nyq = 0.5 * _get_fs(fs, nyq)
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with fs/2.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the "
"Nyquist frequency.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero "
"and Nyquist frequencies.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero "
"frequency.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=None, type='bandpass',
maxiter=25, grid_density=16, fs=None):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges.
All elements must be non-negative and less than half the sampling
frequency as given by `fs`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
*Deprecated. Use `fs` instead.*
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
* 'bandpass' : flat response in bands. This is the default.
* 'differentiator' : frequency proportional response in bands.
* 'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
fs : float, optional
The sampling frequency of the signal. Default is 1.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
firls
firwin
firwin2
minimum_phase
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
In these examples `remez` gets used creating a bandpass, bandstop, lowpass
and highpass filter. The used parameters are the filter order, an array
with according frequency boundaries, the desired attenuation values and the
sampling frequency. Using `freqz` the corresponding frequency response
gets calculated and plotted.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> def plot_response(fs, w, h, title):
... "Utility function to plot response functions"
... fig = plt.figure()
... ax = fig.add_subplot(111)
... ax.plot(0.5*fs*w/np.pi, 20*np.log10(np.abs(h)))
... ax.set_ylim(-40, 5)
... ax.set_xlim(0, 0.5*fs)
... ax.grid(True)
... ax.set_xlabel('Frequency (Hz)')
... ax.set_ylabel('Gain (dB)')
... ax.set_title(title)
This example shows a steep low pass transition according to the small
transition width and high filter order:
>>> fs = 22050.0 # Sample rate, Hz
>>> cutoff = 8000.0 # Desired cutoff frequency, Hz
>>> trans_width = 100 # Width of transition from pass band to stop band, Hz
>>> numtaps = 400 # Size of the FIR filter.
>>> taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs], [1, 0], Hz=fs)
>>> w, h = signal.freqz(taps, [1], worN=2000)
>>> plot_response(fs, w, h, "Low-pass Filter")
This example shows a high pass filter:
>>> fs = 22050.0 # Sample rate, Hz
>>> cutoff = 2000.0 # Desired cutoff frequency, Hz
>>> trans_width = 250 # Width of transition from pass band to stop band, Hz
>>> numtaps = 125 # Size of the FIR filter.
>>> taps = signal.remez(numtaps, [0, cutoff - trans_width, cutoff, 0.5*fs],
... [0, 1], Hz=fs)
>>> w, h = signal.freqz(taps, [1], worN=2000)
>>> plot_response(fs, w, h, "High-pass Filter")
For a signal sampled with 22 kHz a bandpass filter with a pass band of 2-5
kHz gets calculated using the Remez algorithm. The transition width is 260
Hz and the filter order 10:
>>> fs = 22000.0 # Sample rate, Hz
>>> band = [2000, 5000] # Desired pass band, Hz
>>> trans_width = 260 # Width of transition from pass band to stop band, Hz
>>> numtaps = 10 # Size of the FIR filter.
>>> edges = [0, band[0] - trans_width, band[0], band[1],
... band[1] + trans_width, 0.5*fs]
>>> taps = signal.remez(numtaps, edges, [0, 1, 0], Hz=fs)
>>> w, h = signal.freqz(taps, [1], worN=2000)
>>> plot_response(fs, w, h, "Band-pass Filter")
It can be seen that for this bandpass filter, the low order leads to higher
ripple and less steep transitions. There is very low attenuation in the
stop band and little overshoot in the pass band. Of course the desired
gain can be better approximated with a higher filter order.
The next example shows a bandstop filter. Because of the high filter order
the transition is quite steep:
>>> fs = 20000.0 # Sample rate, Hz
>>> band = [6000, 8000] # Desired stop band, Hz
>>> trans_width = 200 # Width of transition from pass band to stop band, Hz
>>> numtaps = 175 # Size of the FIR filter.
>>> edges = [0, band[0] - trans_width, band[0], band[1], band[1] + trans_width, 0.5*fs]
>>> taps = signal.remez(numtaps, edges, [1, 0, 1], Hz=fs)
>>> w, h = signal.freqz(taps, [1], worN=2000)
>>> plot_response(fs, w, h, "Band-stop Filter")
>>> plt.show()
"""
if Hz is None and fs is None:
fs = 1.0
elif Hz is not None:
if fs is not None:
raise ValueError("Values cannot be given for both 'Hz' and 'fs'.")
fs = Hz
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, fs,
maxiter, grid_density)
def firls(numtaps, bands, desired, weight=None, nyq=None, fs=None):
"""
FIR filter design using least-squares error minimization.
Calculate the filter coefficients for the linear-phase finite
impulse response (FIR) filter which has the best approximation
to the desired frequency response described by `bands` and
`desired` in the least squares sense (i.e., the integral of the
weighted mean-squared error within the specified bands is
minimized).
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be odd.
bands : array_like
A monotonic nondecreasing sequence containing the band edges in
Hz. All elements must be non-negative and less than or equal to
the Nyquist frequency given by `nyq`.
desired : array_like
A sequence the same size as `bands` containing the desired gain
at the start and end point of each band.
weight : array_like, optional
A relative weighting to give to each band region when solving
the least squares problem. `weight` has to be half the size of
`bands`.
nyq : float, optional
*Deprecated. Use `fs` instead.*
Nyquist frequency. Each frequency in `bands` must be between 0
and `nyq` (inclusive). Default is 1.
fs : float, optional
The sampling frequency of the signal. Each frequency in `bands`
must be between 0 and ``fs/2`` (inclusive). Default is 2.
Returns
-------
coeffs : ndarray
Coefficients of the optimal (in a least squares sense) FIR filter.
See also
--------
firwin
firwin2
minimum_phase
remez
Notes
-----
This implementation follows the algorithm given in [1]_.
As noted there, least squares design has multiple advantages:
1. Optimal in a least-squares sense.
2. Simple, non-iterative method.
3. The general solution can obtained by solving a linear
system of equations.
4. Allows the use of a frequency dependent weighting function.
This function constructs a Type I linear phase FIR filter, which
contains an odd number of `coeffs` satisfying for :math:`n < numtaps`:
.. math:: coeffs(n) = coeffs(numtaps - 1 - n)
The odd number of coefficients and filter symmetry avoid boundary
conditions that could otherwise occur at the Nyquist and 0 frequencies
(e.g., for Type II, III, or IV variants).
.. versionadded:: 0.18
References
----------
.. [1] Ivan Selesnick, Linear-Phase Fir Filter Design By Least Squares.
OpenStax CNX. Aug 9, 2005.
http://cnx.org/contents/eb1ecb35-03a9-4610-ba87-41cd771c95f2@7
Examples
--------
We want to construct a band-pass filter. Note that the behavior in the
frequency ranges between our stop bands and pass bands is unspecified,
and thus may overshoot depending on the parameters of our filter:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> fig, axs = plt.subplots(2)
>>> fs = 10.0 # Hz
>>> desired = (0, 0, 1, 1, 0, 0)
>>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))):
... fir_firls = signal.firls(73, bands, desired, fs=fs)
... fir_remez = signal.remez(73, bands, desired[::2], fs=fs)
... fir_firwin2 = signal.firwin2(73, bands, desired, fs=fs)
... hs = list()
... ax = axs[bi]
... for fir in (fir_firls, fir_remez, fir_firwin2):
... freq, response = signal.freqz(fir)
... hs.append(ax.semilogy(0.5*fs*freq/np.pi, np.abs(response))[0])
... for band, gains in zip(zip(bands[::2], bands[1::2]),
... zip(desired[::2], desired[1::2])):
... ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2)
... if bi == 0:
... ax.legend(hs, ('firls', 'remez', 'firwin2'),
... loc='lower center', frameon=False)
... else:
... ax.set_xlabel('Frequency (Hz)')
... ax.grid(True)
... ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude')
...
>>> fig.tight_layout()
>>> plt.show()
""" # noqa
nyq = 0.5 * _get_fs(fs, nyq)
numtaps = int(numtaps)
if numtaps % 2 == 0 or numtaps < 1:
raise ValueError("numtaps must be odd and >= 1")
M = (numtaps-1) // 2
# normalize bands 0->1 and make it 2 columns
nyq = float(nyq)
if nyq <= 0:
raise ValueError('nyq must be positive, got %s <= 0.' % nyq)
bands = np.asarray(bands).flatten() / nyq
if len(bands) % 2 != 0:
raise ValueError("bands must contain frequency pairs.")
if (bands < 0).any() or (bands > 1).any():
raise ValueError("bands must be between 0 and 1 relative to Nyquist")
bands.shape = (-1, 2)
# check remaining params
desired = np.asarray(desired).flatten()
if bands.size != desired.size:
raise ValueError("desired must have one entry per frequency, got %s "
"gains for %s frequencies."
% (desired.size, bands.size))
desired.shape = (-1, 2)
if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any():
raise ValueError("bands must be monotonically nondecreasing and have "
"width > 0.")
if (bands[:-1, 1] > bands[1:, 0]).any():
raise ValueError("bands must not overlap.")
if (desired < 0).any():
raise ValueError("desired must be non-negative.")
if weight is None:
weight = np.ones(len(desired))
weight = np.asarray(weight).flatten()
if len(weight) != len(desired):
raise ValueError("weight must be the same size as the number of "
"band pairs (%s)." % (len(bands),))
if (weight < 0).any():
raise ValueError("weight must be non-negative.")
# Set up the linear matrix equation to be solved, Qa = b
# We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n)
# where Q1(k,n)=q(k−n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel.
# We omit the factor of 0.5 above, instead adding it during coefficient
# calculation.
# We also omit the 1/π from both Q and b equations, as they cancel
# during solving.
# We have that:
# q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π)
# Using our nomalization ω=πf and with a constant weight W over each
# interval f1->f2 we get:
# q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf
# integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
n = np.arange(numtaps)[:, np.newaxis, np.newaxis]
q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weight)
# Now we assemble our sum of Toeplitz and Hankel
Q1 = toeplitz(q[:M+1])
Q2 = hankel(q[:M+1], q[M:])
Q = Q1 + Q2
# Now for b(n) we have that:
# b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π)
# Using our normalization ω=πf and with a constant weight W over each
# interval and a linear term for D(ω) we get (over each f1->f2 interval):
# b(n) = W ∫ (mf+c)cos(πnf)df
# = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2
# integrated over each f1->f2 pair (i.e., value at f2 - value at f1).
n = n[:M + 1] # only need this many coefficients here
# Choose m and c such that we are at the start and end weights
m = (np.diff(desired, axis=1) / np.diff(bands, axis=1))
c = desired[:, [0]] - bands[:, [0]] * m
b = bands * (m*bands + c) * np.sinc(bands * n)
# Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0
b[0] -= m * bands * bands / 2.
b[1:] += m * np.cos(n[1:] * np.pi * bands) / (np.pi * n[1:]) ** 2
b = np.dot(np.diff(b, axis=2)[:, :, 0], weight)
# Now we can solve the equation
try: # try the fast way
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
a = solve(Q, b, sym_pos=True, check_finite=False)
for ww in w:
if (ww.category == LinAlgWarning and
str(ww.message).startswith('Ill-conditioned matrix')):
raise LinAlgError(str(ww.message))
except LinAlgError: # in case Q is rank deficient
# This is faster than pinvh, even though we don't explicitly use
# the symmetry here. gelsy was faster than gelsd and gelss in
# some non-exhaustive tests.
a = lstsq(Q, b, lapack_driver='gelsy')[0]
# make coefficients symmetric (linear phase)
coeffs = np.hstack((a[:0:-1], 2 * a[0], a[1:]))
return coeffs
def _dhtm(mag):
"""Compute the modified 1D discrete Hilbert transform
Parameters
----------
mag : ndarray
The magnitude spectrum. Should be 1D with an even length, and
preferably a fast length for FFT/IFFT.
"""
# Adapted based on code by Niranjan Damera-Venkata,
# Brian L. Evans and Shawn R. McCaslin (see refs for `minimum_phase`)
sig = np.zeros(len(mag))
# Leave Nyquist and DC at 0, knowing np.abs(fftfreq(N)[midpt]) == 0.5
midpt = len(mag) // 2
sig[1:midpt] = 1
sig[midpt+1:] = -1
# eventually if we want to support complex filters, we will need a
# np.abs() on the mag inside the log, and should remove the .real
recon = ifft(mag * np.exp(fft(sig * ifft(np.log(mag))))).real
return recon
def minimum_phase(h, method='homomorphic', n_fft=None):
"""Convert a linear-phase FIR filter to minimum phase
Parameters
----------
h : array
Linear-phase FIR filter coefficients.
method : {'hilbert', 'homomorphic'}
The method to use:
'homomorphic' (default)
This method [4]_ [5]_ works best with filters with an
odd number of taps, and the resulting minimum phase filter
will have a magnitude response that approximates the square
root of the the original filter's magnitude response.
'hilbert'
This method [1]_ is designed to be used with equiripple
filters (e.g., from `remez`) with unity or zero gain
regions.
n_fft : int
The number of points to use for the FFT. Should be at least a
few times larger than the signal length (see Notes).
Returns
-------
h_minimum : array
The minimum-phase version of the filter, with length
``(length(h) + 1) // 2``.
See Also
--------
firwin
firwin2
remez
Notes
-----
Both the Hilbert [1]_ or homomorphic [4]_ [5]_ methods require selection
of an FFT length to estimate the complex cepstrum of the filter.
In the case of the Hilbert method, the deviation from the ideal
spectrum ``epsilon`` is related to the number of stopband zeros
``n_stop`` and FFT length ``n_fft`` as::
epsilon = 2. * n_stop / n_fft
For example, with 100 stopband zeros and a FFT length of 2048,
``epsilon = 0.0976``. If we conservatively assume that the number of
stopband zeros is one less than the filter length, we can take the FFT
length to be the next power of 2 that satisfies ``epsilon=0.01`` as::
n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01)))
This gives reasonable results for both the Hilbert and homomorphic
methods, and gives the value used when ``n_fft=None``.
Alternative implementations exist for creating minimum-phase filters,
including zero inversion [2]_ and spectral factorization [3]_ [4]_.
For more information, see:
http://dspguru.com/dsp/howtos/how-to-design-minimum-phase-fir-filters
Examples
--------
Create an optimal linear-phase filter, then convert it to minimum phase:
>>> from scipy.signal import remez, minimum_phase, freqz, group_delay
>>> import matplotlib.pyplot as plt
>>> freq = [0, 0.2, 0.3, 1.0]
>>> desired = [1, 0]
>>> h_linear = remez(151, freq, desired, Hz=2.)
Convert it to minimum phase:
>>> h_min_hom = minimum_phase(h_linear, method='homomorphic')
>>> h_min_hil = minimum_phase(h_linear, method='hilbert')
Compare the three filters:
>>> fig, axs = plt.subplots(4, figsize=(4, 8))
>>> for h, style, color in zip((h_linear, h_min_hom, h_min_hil),
... ('-', '-', '--'), ('k', 'r', 'c')):
... w, H = freqz(h)
... w, gd = group_delay((h, 1))
... w /= np.pi
... axs[0].plot(h, color=color, linestyle=style)
... axs[1].plot(w, np.abs(H), color=color, linestyle=style)
... axs[2].plot(w, 20 * np.log10(np.abs(H)), color=color, linestyle=style)
... axs[3].plot(w, gd, color=color, linestyle=style)
>>> for ax in axs:
... ax.grid(True, color='0.5')
... ax.fill_between(freq[1:3], *ax.get_ylim(), color='#ffeeaa', zorder=1)
>>> axs[0].set(xlim=[0, len(h_linear) - 1], ylabel='Amplitude', xlabel='Samples')
>>> axs[1].legend(['Linear', 'Min-Hom', 'Min-Hil'], title='Phase')
>>> for ax, ylim in zip(axs[1:], ([0, 1.1], [-150, 10], [-60, 60])):
... ax.set(xlim=[0, 1], ylim=ylim, xlabel='Frequency')
>>> axs[1].set(ylabel='Magnitude')
>>> axs[2].set(ylabel='Magnitude (dB)')
>>> axs[3].set(ylabel='Group delay')
>>> plt.tight_layout()
References
----------
.. [1] N. Damera-Venkata and B. L. Evans, "Optimal design of real and
complex minimum phase digital FIR filters," Acoustics, Speech,
and Signal Processing, 1999. Proceedings., 1999 IEEE International
Conference on, Phoenix, AZ, 1999, pp. 1145-1148 vol.3.
doi: 10.1109/ICASSP.1999.756179
.. [2] X. Chen and T. W. Parks, "Design of optimal minimum phase FIR
filters by direct factorization," Signal Processing,
vol. 10, no. 4, pp. 369-383, Jun. 1986.
.. [3] T. Saramaki, "Finite Impulse Response Filter Design," in
Handbook for Digital Signal Processing, chapter 4,
New York: Wiley-Interscience, 1993.
.. [4] J. S. Lim, Advanced Topics in Signal Processing.
Englewood Cliffs, N.J.: Prentice Hall, 1988.
.. [5] A. V. Oppenheim, R. W. Schafer, and J. R. Buck,
"Discrete-Time Signal Processing," 2nd edition.
Upper Saddle River, N.J.: Prentice Hall, 1999.
""" # noqa
h = np.asarray(h)
if np.iscomplexobj(h):
raise ValueError('Complex filters not supported')
if h.ndim != 1 or h.size <= 2:
raise ValueError('h must be 1D and at least 2 samples long')
n_half = len(h) // 2
if not np.allclose(h[-n_half:][::-1], h[:n_half]):
warnings.warn('h does not appear to by symmetric, conversion may '
'fail', RuntimeWarning)
if not isinstance(method, string_types) or method not in \
('homomorphic', 'hilbert',):
raise ValueError('method must be "homomorphic" or "hilbert", got %r'
% (method,))
if n_fft is None:
n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01)))
n_fft = int(n_fft)
if n_fft < len(h):
raise ValueError('n_fft must be at least len(h)==%s' % len(h))
if method == 'hilbert':
w = np.arange(n_fft) * (2 * np.pi / n_fft * n_half)
H = np.real(fft(h, n_fft) * np.exp(1j * w))
dp = max(H) - 1
ds = 0 - min(H)
S = 4. / (np.sqrt(1+dp+ds) + np.sqrt(1-dp+ds)) ** 2
H += ds
H *= S
H = np.sqrt(H, out=H)
H += 1e-10 # ensure that the log does not explode
h_minimum = _dhtm(H)
else: # method == 'homomorphic'
# zero-pad; calculate the DFT
h_temp = np.abs(fft(h, n_fft))
# take 0.25*log(|H|**2) = 0.5*log(|H|)
h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up
np.log(h_temp, out=h_temp)
h_temp *= 0.5
# IDFT
h_temp = ifft(h_temp).real
# multiply pointwise by the homomorphic filter
# lmin[n] = 2u[n] - d[n]
win = np.zeros(n_fft)
win[0] = 1
stop = (len(h) + 1) // 2
win[1:stop] = 2
if len(h) % 2:
win[stop] = 1
h_temp *= win
h_temp = ifft(np.exp(fft(h_temp)))
h_minimum = h_temp.real
n_out = n_half + len(h) % 2
return h_minimum[:n_out]
| bsd-3-clause |
waynenilsen/statsmodels | statsmodels/sandbox/examples/example_gam_0.py | 33 | 4574 | '''first examples for gam and PolynomialSmoother used for debugging
This example was written as a test case.
The data generating process is chosen so the parameters are well identified
and estimated.
Note: uncomment plt.show() to display graphs
'''
example = 2 #3 # 1,2 or 3
import numpy as np
from statsmodels.compat.python import zip
import numpy.random as R
import matplotlib.pyplot as plt
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
#np.random.seed(987654)
standardize = lambda x: (x - x.mean()) / x.std()
demean = lambda x: (x - x.mean())
nobs = 500
lb, ub = -1., 1. #for Poisson
#lb, ub = -0.75, 2 #0.75 #for Binomial
x1 = R.uniform(lb, ub, nobs) #R.standard_normal(nobs)
x1 = np.linspace(lb, ub, nobs)
x1.sort()
x2 = R.uniform(lb, ub, nobs) #
#x2 = R.standard_normal(nobs)
x2.sort()
#x2 = np.cos(x2)
x2 = x2 + np.exp(x2/2.)
#x2 = np.log(x2-x2.min()+0.1)
y = 0.5 * R.uniform(lb, ub, nobs) #R.standard_normal((nobs,))
f1 = lambda x1: (2*x1 - 0.5 * x1**2 - 0.75 * x1**3) # + 0.1 * np.exp(-x1/4.))
f2 = lambda x2: (x2 - 1* x2**2) # - 0.75 * np.exp(x2))
z = standardize(f1(x1)) + standardize(f2(x2))
z = standardize(z) + 1 # 0.1
#try this
z = f1(x1) + f2(x2)
#z = demean(z)
z -= np.median(z)
print('z.std()', z.std())
#z = standardize(z) + 0.2
# with standardize I get better values, but I don't know what the true params are
print(z.mean(), z.min(), z.max())
#y += z #noise
y = z
d = np.array([x1,x2]).T
if example == 1:
print("normal")
m = AdditiveModel(d)
m.fit(y)
x = np.linspace(-2,2,50)
print(m)
import scipy.stats, time
if example == 2:
print("binomial")
mod_name = 'Binomial'
f = families.Binomial()
#b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(z)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
#for plotting
yp = f.link.inverse(y)
p = b
if example == 3:
print("Poisson")
f = families.Poisson()
#y = y/y.max() * 3
yp = f.link.inverse(z)
#p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(z)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
if example > 1:
y_pred = m.results.mu# + m.results.alpha#m.results.predict(d)
plt.figure()
plt.subplot(2,2,1)
plt.plot(p, '.')
plt.plot(yp, 'b-', label='true')
plt.plot(y_pred, 'r-', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM ' + mod_name)
counter = 2
for ii, xx in zip(['z', 'x1', 'x2'], [z, x1, x2]):
sortidx = np.argsort(xx)
#plt.figure()
plt.subplot(2, 2, counter)
plt.plot(xx[sortidx], p[sortidx], '.')
plt.plot(xx[sortidx], yp[sortidx], 'b.', label='true')
plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM ' + mod_name + ' ' + ii)
counter += 1
# counter = 2
# for ii, xx in zip(['z', 'x1', 'x2'], [z, x1, x2]):
# #plt.figure()
# plt.subplot(2, 2, counter)
# plt.plot(xx, p, '.')
# plt.plot(xx, yp, 'b-', label='true')
# plt.plot(xx, y_pred, 'r-', label='GAM')
# plt.legend(loc='upper left')
# plt.title('gam.GAM Poisson ' + ii)
# counter += 1
plt.figure()
plt.plot(z, 'b-', label='true' )
plt.plot(np.log(m.results.mu), 'r-', label='GAM')
plt.title('GAM Poisson, raw')
plt.figure()
plt.plot(x1, standardize(m.smoothers[0](x1)), 'r')
plt.plot(x1, standardize(f1(x1)), linewidth=2)
plt.figure()
plt.plot(x2, standardize(m.smoothers[1](x2)), 'r')
plt.plot(x2, standardize(f2(x2)), linewidth=2)
##y_pred = m.results.predict(d)
##plt.figure()
##plt.plot(z, p, '.')
##plt.plot(z, yp, 'b-', label='true')
##plt.plot(z, y_pred, 'r-', label='AdditiveModel')
##plt.legend()
##plt.title('gam.AdditiveModel')
#plt.show()
## pylab.figure(num=1)
## pylab.plot(x1, standardize(m.smoothers[0](x1)), 'b')
## pylab.plot(x1, standardize(f1(x1)), linewidth=2)
## pylab.figure(num=2)
## pylab.plot(x2, standardize(m.smoothers[1](x2)), 'b')
## pylab.plot(x2, standardize(f2(x2)), linewidth=2)
## pylab.show()
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-0.18.1/examples/mixture/plot_concentration_prior.py | 25 | 5631 | """
========================================================================
Concentration Prior Type Analysis of Variation Bayesian Gaussian Mixture
========================================================================
This example plots the ellipsoids obtained from a toy dataset (mixture of three
Gaussians) fitted by the ``BayesianGaussianMixture`` class models with a
Dirichlet distribution prior
(``weight_concentration_prior_type='dirichlet_distribution'``) and a Dirichlet
process prior (``weight_concentration_prior_type='dirichlet_process'``). On
each figure, we plot the results for three different values of the weight
concentration prior.
The ``BayesianGaussianMixture`` class can adapt its number of mixture
componentsautomatically. The parameter ``weight_concentration_prior`` has a
direct link with the resulting number of components with non-zero weights.
Specifying a low value for the concentration prior will make the model put most
of the weight on few components set the remaining components weights very close
to zero. High values of the concentration prior will allow a larger number of
components to be active in the mixture.
The Dirichlet process prior allows to define an infinite number of components
and automatically selects the correct number of components: it activates a
component only if it is necessary.
On the contrary the classical finite mixture model with a Dirichlet
distribution prior will favor more uniformly weighted components and therefore
tends to divide natural clusters into unnecessary sub-components.
"""
# Author: Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn.mixture import BayesianGaussianMixture
print(__doc__)
def plot_ellipses(ax, weights, means, covars):
for n in range(means.shape[0]):
eig_vals, eig_vecs = np.linalg.eigh(covars[n])
unit_eig_vec = eig_vecs[0] / np.linalg.norm(eig_vecs[0])
angle = np.arctan2(unit_eig_vec[1], unit_eig_vec[0])
# Ellipse needs degrees
angle = 180 * angle / np.pi
# eigenvector normalization
eig_vals = 2 * np.sqrt(2) * np.sqrt(eig_vals)
ell = mpl.patches.Ellipse(means[n], eig_vals[0], eig_vals[1],
180 + angle)
ell.set_clip_box(ax.bbox)
ell.set_alpha(weights[n])
ell.set_facecolor('#56B4E9')
ax.add_artist(ell)
def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False):
ax1.set_title(title)
ax1.scatter(X[:, 0], X[:, 1], s=5, marker='o', color=colors[y], alpha=0.8)
ax1.set_xlim(-2., 2.)
ax1.set_ylim(-3., 3.)
ax1.set_xticks(())
ax1.set_yticks(())
plot_ellipses(ax1, estimator.weights_, estimator.means_,
estimator.covariances_)
ax2.get_xaxis().set_tick_params(direction='out')
ax2.yaxis.grid(True, alpha=0.7)
for k, w in enumerate(estimator.weights_):
ax2.bar(k - .45, w, width=0.9, color='#56B4E9', zorder=3)
ax2.text(k, w + 0.007, "%.1f%%" % (w * 100.),
horizontalalignment='center')
ax2.set_xlim(-.6, 2 * n_components - .4)
ax2.set_ylim(0., 1.1)
ax2.tick_params(axis='y', which='both', left='off',
right='off', labelleft='off')
ax2.tick_params(axis='x', which='both', top='off')
if plot_title:
ax1.set_ylabel('Estimated Mixtures')
ax2.set_ylabel('Weight of each component')
# Parameters of the dataset
random_state, n_components, n_features = 2, 3, 2
colors = np.array(['#0072B2', '#F0E442', '#D55E00'])
covars = np.array([[[.7, .0], [.0, .1]],
[[.5, .0], [.0, .1]],
[[.5, .0], [.0, .1]]])
samples = np.array([200, 500, 200])
means = np.array([[.0, -.70],
[.0, .0],
[.0, .70]])
# mean_precision_prior= 0.8 to minimize the influence of the prior
estimators = [
("Finite mixture with a Dirichlet distribution\nprior and "
r"$\gamma_0=$", BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_distribution",
n_components=2 * n_components, reg_covar=0, init_params='random',
max_iter=1500, mean_precision_prior=.8,
random_state=random_state), [0.001, 1, 1000]),
("Infinite mixture with a Dirichlet process\n prior and" r"$\gamma_0=$",
BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_process",
n_components=2 * n_components, reg_covar=0, init_params='random',
max_iter=1500, mean_precision_prior=.8,
random_state=random_state), [1, 1000, 100000])]
# Generate data
rng = np.random.RandomState(random_state)
X = np.vstack([
rng.multivariate_normal(means[j], covars[j], samples[j])
for j in range(n_components)])
y = np.concatenate([j * np.ones(samples[j], dtype=int)
for j in range(n_components)])
# Plot results in two different figures
for (title, estimator, concentrations_prior) in estimators:
plt.figure(figsize=(4.7 * 3, 8))
plt.subplots_adjust(bottom=.04, top=0.90, hspace=.05, wspace=.05,
left=.03, right=.99)
gs = gridspec.GridSpec(3, len(concentrations_prior))
for k, concentration in enumerate(concentrations_prior):
estimator.weight_concentration_prior = concentration
estimator.fit(X)
plot_results(plt.subplot(gs[0:2, k]), plt.subplot(gs[2, k]), estimator,
X, y, r"%s$%.1e$" % (title, concentration),
plot_title=k == 0)
plt.show()
| bsd-3-clause |
cauchycui/scikit-learn | sklearn/neighbors/regression.py | 106 | 10572 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
altairpearl/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 58 | 17158 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
soulmachine/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
CKehl/pylearn2 | pylearn2/testing/skip.py | 49 | 1363 | """
Helper functions for determining which tests to skip.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from nose.plugins.skip import SkipTest
import os
from theano.sandbox import cuda
scipy_works = True
try:
import scipy
except ImportError:
# pyflakes gets mad if you set scipy to None here
scipy_works = False
sklearn_works = True
try:
import sklearn
except ImportError:
sklearn_works = False
h5py_works = True
try:
import h5py
except ImportError:
h5py_works = False
matplotlib_works = True
try:
from matplotlib import pyplot
except ImportError:
matplotlib_works = False
def skip_if_no_data():
if 'PYLEARN2_DATA_PATH' not in os.environ:
raise SkipTest()
def skip_if_no_scipy():
if not scipy_works:
raise SkipTest()
def skip_if_no_sklearn():
if not sklearn_works:
raise SkipTest()
def skip_if_no_gpu():
if cuda.cuda_available == False:
raise SkipTest('Optional package cuda disabled.')
def skip_if_no_h5py():
if not h5py_works:
raise SkipTest()
def skip_if_no_matplotlib():
if not matplotlib_works:
raise SkipTest("matplotlib and pyplot are not available")
| bsd-3-clause |
allisony/aplpy | aplpy/tests/test_rgb.py | 2 | 1477 | import os
import warnings
import matplotlib
matplotlib.use('Agg')
import numpy as np
from astropy.io import fits
from .. import FITSFigure
from ..rgb import make_rgb_image
from .test_images import BaseImageTests
HEADER = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/2d_fits', '1904-66_TAN.hdr')
class TestRGB(BaseImageTests):
def test_rgb(self, generate, tmpdir):
# Regression test to check that RGB recenter works properly
r_file = tmpdir.join('r.fits').strpath
g_file = tmpdir.join('g.fits').strpath
b_file = tmpdir.join('b.fits').strpath
rgb_file = tmpdir.join('rgb.png').strpath
np.random.seed(12345)
header = fits.Header.fromtextfile(HEADER)
r = fits.PrimaryHDU(np.random.random((12,12)), header)
r.writeto(r_file)
g = fits.PrimaryHDU(np.random.random((12,12)), header)
g.writeto(g_file)
b = fits.PrimaryHDU(np.random.random((12,12)), header)
b.writeto(b_file)
make_rgb_image([r_file, g_file, b_file], rgb_file, embed_avm_tags=False)
f = FITSFigure(r_file, figsize=(3,3))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f.show_rgb(rgb_file)
f.tick_labels.set_xformat('dd.d')
f.tick_labels.set_yformat('dd.d')
f.recenter(359.3, -72.1, radius=0.05)
self.generate_or_test(generate, f, 'test_rgb.png', tolerance=2)
f.close()
| mit |
manashmndl/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
mlyundin/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015 | Code/Machine_Learning_Algos/training_t2.py | 1 | 5928 | __author__ = "Can Ozbek Arnav"
import pandas as pd
import numpy as np
import pylab
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import sys
sys.path.append("/Users/ahmetcanozbek/Desktop/EE660/660Project/Code_Final_Used/functions")
import ml_aux_functions as ml_aux
import crop_rock
#PREPROCESSING
#Read the files
df_full = pd.read_pickle("/Users/ahmetcanozbek/Desktop/660Stuff/msd_train_t2.pkl") # 80%
print "DEBUG: file read."
#Get rid of the rows that have missing values (nan) and UNCAT
df_full = df_full[ df_full["Genre"] != "UNCAT" ]
df_full = df_full.dropna()
y_full = df_full["Genre"]
X_full = df_full.drop(["Genre", "Track ID", "Year"], axis=1)
#Split the 80% of data to 70% Training and 30% Validation Data
from sklearn.cross_validation import train_test_split
X_train, X_validation, y_train, y_validation = \
train_test_split(X_full, y_full, train_size=0.7, random_state=42)
print "DEBUG: Data splitted"
df_train_toCrop = pd.concat([y_train, X_train], axis=1, join='inner')
#Crop the dataset
maxval = crop_rock.find_second_max_value(df_train_toCrop)
df_cropped = crop_rock.drop_excess_rows(df_train_toCrop, maxval)
y_cropped = df_cropped["Genre"]
X_cropped = df_cropped.drop(["Genre"], axis=1)
# Start LDA Classification
print "Performing LDA Classification:"
from sklearn.lda import LDA
clf = LDA(solver='svd', shrinkage=None, n_components=None).fit(X_cropped, np.ravel(y_cropped[:]))
#Use X_cropped to get best model
y_train_predicted = clf.predict(X_train)
print "Error rate for LDA on Training: ", ml_aux.get_error_rate(y_train,y_train_predicted)
# ml_aux.plot_confusion_matrix(y_cropped, predicted, "CM on LDA cropped")
# plt.show()
y_validation_predicted = clf.predict(X_validation)
print "Error rate for LDA on Validation: ", ml_aux.get_error_rate(y_validation,y_validation_predicted)
# ml_aux.plot_confusion_matrix(y_validation, y_validation_predicted, "CM on LDA validation (t1)")
# plt.show()
# Start Adaboost Classification
from sklearn.ensemble import AdaBoostClassifier
adaboost_model = AdaBoostClassifier(n_estimators=50)
adaboost_model = adaboost_model.fit(X_cropped,y_cropped)
# predicted = adaboost_model.predict(X_cropped)
# print "Error rate for LDA on Cropped: ", ml_aux.get_error_rate(y_cropped,predicted)
# ml_aux.plot_confusion_matrix(y_cropped, predicted, "CM on LDA cropped")
# plt.show()
y_validation_predicted = adaboost_model.predict(X_validation)
print "Error rate for Adaboost on Validation: ", ml_aux.get_error_rate(y_validation,y_validation_predicted)
# ml_aux.plot_confusion_matrix(y_validation, y_validation_predicted, "CM on Adaboost validation (t1)")
# plt.show()
# Start QDA Classification
print "Performing QDA Classification:"
from sklearn.qda import QDA
clf = QDA(priors=None, reg_param=0.001).fit(X_cropped, np.ravel(y_cropped[:]))
y_validation_predicted = clf.predict(X_validation)
print "Error rate for QDA (Validation): ", ml_aux.get_error_rate(y_validation,y_validation_predicted)
# Start Random Forest Classification
print "Performing Random Classification:"
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators=500)
forest = forest.fit(X_cropped, np.ravel(y_cropped[:]))
y_validation_predicted = forest.predict(X_validation)
print "Error rate for Random Forest (Validation): ", ml_aux.get_error_rate(y_validation,y_validation_predicted)
# ml_aux.plot_confusion_matrix(y_validation, y_validation_predicted, "CM Random Forest (t1)")
# plt.show()
# Start k nearest neighbor Classification
print "Performing kNN Classification:"
from sklearn import neighbors
knn_model = neighbors.KNeighborsClassifier(n_neighbors=2, algorithm='auto',leaf_size=15)
knn_model.fit(X_cropped, y_cropped)
# y_train_predicted = knn_model.predict(X_train)
# print "Error Rate for kNN (Cropped): ", ml_aux.get_error_rate(y_train, y_train_predicted)
y_validation_predicted = knn_model.predict(X_validation)
print "Error Rate for kNN on Validation (t1): ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
# Start Naive Bayes Classification
print "Performing Naive Bayes Classification:"
from sklearn.naive_bayes import GaussianNB
naivebayes_model = GaussianNB()
naivebayes_model.fit(X_cropped, y_cropped)
y_validation_predicted = naivebayes_model.predict(X_validation)
print "Naive Bayes Error Rate on Validation (t1): ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
# Start SVM Classification
print "Performing SVM Classification:"
from sklearn.svm import SVC
svm_model = SVC(kernel='rbf' ,probability=True, max_iter=100000)
svm_model.fit(X_cropped, y_cropped)
y_train_predicted = svm_model.predict(X_train)
print "SVM Error rate on training data (t1): ", ml_aux.get_error_rate(y_train, y_train_predicted)
# ml_aux.plot_confusion_matrix(y_train, y_train_predicted, "CM SVM Training (t1)")
# plt.show()
y_validation_predicted = svm_model.predict(X_validation)
print "SVM Error rate on validation (t1): ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
# Start k nearest Centroid Classification
print "Performing kNC Classification:"
from sklearn.neighbors.nearest_centroid import NearestCentroid
knnc_model = NearestCentroid()
knnc_model.fit(X_cropped, y_cropped)
y_validation_predicted = knnc_model.predict(X_validation)
print "Error Rate on kNNC (t1) Validation: ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
# Start Bagging Classification
print "Performing Bagging Classification:"
# Bagging
from sklearn.ensemble import BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
# Bagging
bagging1 = BaggingClassifier(KNeighborsClassifier(n_neighbors=2),max_samples=1.0, max_features=0.1)
bagging1.fit(X_cropped, y_cropped)
y_validation_predicted = bagging1.predict(X_validation)
print "Error Rate kNN with Baggging Validation: ", ml_aux.get_error_rate(y_validation, y_validation_predicted)
| mit |
ngoix/OCRF | sklearn/linear_model/least_angle.py | 11 | 57260 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..model_selection import check_cv
from ..exceptions import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..externals.six import string_types
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif isinstance(Gram, string_types) and Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by
# the test suite. The `equality_tolerance` margin added in 0.16
# to get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale = self._preprocess_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_offset, y_offset, X_scale)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=self.copy_X)
y = as_float_array(y, copy=self.copy_X)
# init cross-validation generator
cv = check_cv(self.cv, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv.split(X, y))
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
pascalgutjahr/Praktikum-1 | V602_Roentgen_Em_Absorp/aurum.py | 1 | 1308 | import matplotlib as mpl
from scipy.optimize import curve_fit
mpl.use('pgf')
import matplotlib.pyplot as plt
plt.rcParams['lines.linewidth'] = 1
import numpy as np
mpl.rcParams.update({
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'pgf.texsystem': 'lualatex',
'pgf.preamble': r'\usepackage{unicode-math}\usepackage{siunitx}'
})
Z, T = np.genfromtxt('txt/plot.txt', unpack=True, skip_header = 3)
# T = Winkel Theta, Z = Ordnungszahl
The = 2 *np.pi * T / 360 # Winkel in rad
h = 6.626 *10**(-34)
c = 299729458
d = 201 *10**(-12)
e = 1.602 *10**(-19)
E_K = (h*c) / (2*d*np.sin(The)) / e
W = np.sqrt(E_K)
Z2 = Z**2
x_plot = np. linspace(90, 130)
def f(W, m, b):
return m*W + b
params, covariance = curve_fit(f, W, Z2)
errors = np.sqrt(np.diag(covariance))
print("m=", params[0], "+-", errors[0])
print("b=", params[1], "+-", errors[1])
# m= 22.9616004655 +- 3.5769453201
# b= -1348.56433773 +- 404.895551595
plt.plot(x_plot, f(x_plot, *params), 'r-', label='lineare Regression', linewidth=1)
plt.plot(W, Z2, 'rx', label='Messwerte')
plt.plot()
plt.ylabel(r'$Z^2')
plt.xlabel(r'$\sqrt{E_K}\,/\,\sqrt{\si{\electronvolt}}')
plt.xlim(min(W)-2, max(W)+2)
plt.ylim(850, 1650)
plt.grid()
plt.legend(loc='best')
plt.tight_layout()
plt.savefig('bilder/plot.pdf')
| mit |
abimannans/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
tooringanalytics/pyambiguity | ambiguity.py | 1 | 9591 | #!/usr/bin/env python
""" Ambiguity.py : Python equivalent of ambiguity_1.m
Author : Tooring Analytics
"""
import numpy as np
import numpy.matlib as npml
import scipy.sparse
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
# inputs
DEFAULT_SIGNAL = np.ones((1, 51))
def ambiguity(u_basic=DEFAULT_SIGNAL,
fcode=True,
f_basic=None,
F=0,
K=0,
T=0,
N=0,
sr=0,
plot_title="",
plot1_file=None,
plot2_file=None,
plot_format="svg",
plot_mesh=True,
elev=50,
azim=-135):
""" Compute Ambiguity & generate Plots for given input parameters
Params:
-------
u_basic: numpy.ndarray or array-like. Input signal.
fcode: bool True if frequency coding allowed, false otherwise
f_basic: numpy.ndarray or array-like. Frequency coding in
units of 1/tb (row vector of same length)
F: int. Maximal Doppler shift for ambiguity in plot
[in units of 1/Mtb] (e.g. 1)
K: int. Number of Doppler grid points for calculation (e.g. 100)
T: float. Maximal Delay for ambiguity plot [in units of Mtb]
N: int. Number of delay grid points on each side (e.g. 100)
sr: int/float. Over sampling ratio (>=1) (e.g. 10)
plot1_file: str. Name of file where first plot will be stored.
If 'None', pops up an itneractive window to display this plot.
plot2_file: str. Name of file where second plot will be stored.
If 'None', pops up an itneractive window to display this plot.
plot_format: str. Output format for plot. (e.g. 'svg', 'png', 'pdf'
etc. Check matplotlib docs for supported formats.)
plot_mesh: bool. If True (default), plots a mesh, if False plots a
surface.
elev: float.(default=50) Elevation for 3-D plot viewpoint.
azim: float.(default=-135) Azimuth in degrees for 3-D plot viewpoint.
Returns:
--------
(delay, freq, a): 3-tuple of array_like's, where delay, freq and a
are the time, frequence and amplitude values.
"""
# Initialization
m_basic = np.amax(u_basic.shape)
u = None
# Ambiguity implementation
df = float(F) / float(K) / float(m_basic)
r = np.ceil(sr * (N + 1) / float(T) / float(m_basic))
if r == 1:
dt = 1
m = m_basic
uamp = np.abs(u_basic)
phas = np.multiply(uamp, 0)
phas = np.angle(u_basic)
if fcode:
phas = np.add(phas, np.multiply(2 * np.pi, np.cumsum(f_basic)))
uexp = np.exp(1.0j * phas)
u = np.multiply(uamp, uexp)
else:
dt = 1 / r
ud = np.diagflat(u_basic)
ao = np.ones((r, m_basic))
m = m_basic * r
ao_dot_ud = np.dot(ao, ud)
# MATLAB/Octave uses fortran-like row-major order reshaping
u_basic = np.reshape(ao_dot_ud, (1, m), order='F')
uamp = np.abs(u_basic)
phas = np.angle(u_basic)
u = u_basic
if fcode:
ff = np.diagflat(f_basic)
coef = 2 * np.pi * dt
vecprod = np.dot(ao, ff)
vecprod_reshaped = np.reshape(vecprod, (1, m), order='F')
cumsummed = np.reshape(np.cumsum(vecprod_reshaped),
(1, m),
order='F')
add_term = np.multiply(coef, cumsummed)
phas = add_term + phas
comprod = np.multiply(1.0j, phas)
uexp = np.exp(comprod)
u = np.multiply(uamp, uexp)
t = np.array([np.arange(0, r * m_basic) / r])
tscale1 = np.hstack((
np.hstack((
np.array([[0]]),
np.array([np.arange(0, r * m_basic)])
)),
np.array([[r * m_basic - 1]])
)) / r
diff = np.diff(phas)
nanarray = np.array([[np.nan]])
temp = np.hstack((nanarray, diff))
dphas = np.multiply(temp, float(r) / 2. / np.pi)
fig_1 = plt.figure(1)
plt.clf()
plt.hold(False)
axes1 = plt.subplot(3, 1, 1)
zerovec = np.array([[0]])
abs_uamp = np.abs(uamp)
ar1 = np.hstack((zerovec, abs_uamp))
ar2 = np.hstack((ar1, zerovec))
axes1.plot(tscale1.flatten(),
ar2.flatten(),
c="r",
linewidth=1.5)
axes1.set_ylabel(' $Amplitude$ ')
#axes1.set_xlim(-np.inf, np.inf)
#axes1.set_ylim(-np.inf, np.amax(abs_uamp) + 0.05*np.amax(abs_uamp));
axes2 = plt.subplot(3, 1, 2)
axes2.plot(t.flatten(),
phas.flatten(),
c="r",
linewidth=1.5)
# plt.axis(np.array([-np.inf, np.inf, -np.inf, np.inf]))
axes2.set_ylabel(' $Phase [rad]$ ')
axes3 = plt.subplot(3, 1, 3)
axes3.plot(t.flatten(),
(dphas * np.ceil(np.amax(t))).flatten(),
c="r",
linewidth=1.5)
# plt.axis(np.array([-np.inf, np.inf, -np.inf, np.inf]))
axes3.set_xlabel(' $\\itt/t_b$ ')
axes3.set_ylabel(' $\\itf*Mt_b$ ')
fig_1.suptitle(plot_title + ', 2-D Plot')
if plot1_file is not None:
fig_1.savefig(plot1_file, format=plot_format)
else:
plt.show()
dtau = np.ceil(T * m) * dt / N
tau = np.round(np.dot(np.array([np.arange(0., N+1., 1.)]), dtau / dt)) * dt
f = np.array([np.dot(np.arange(0., K+1, 1.), df)])
f = np.hstack((-1 * np.fliplr(f), f))
Tm = np.ceil(np.dot(T, m)).astype(int)
m_plus_Tm = int(m + Tm)
uT = np.conj(u)
mat1 = scipy.sparse.spdiags(uT.flatten(),
0,
m_plus_Tm,
m,
format="csc")
zTm = np.zeros((1, Tm))
u_padded = np.hstack((np.hstack((zTm, u)), zTm))
cidx = np.array([np.arange(0, int(m + Tm))]).astype(int)
ridx = np.round(tau / dt).T.astype(int)
# Use repmat instead of the explicit Tony's Trick in the matlab code
ar1 = npml.repmat(cidx, N + 1, 1)
ar2 = npml.repmat(ridx, 1, m + Tm)
index = np.add(ar1, ar2)
u_padded_rep = np.array([u_padded[0, colindex]
for colindex in index])
mat2 = scipy.sparse.csc_matrix(u_padded_rep)
uu_pos = mat2.dot(mat1)
uu_pos = uu_pos.tocsr()
e = np.exp(np.multiply(-1j * 2. * np.pi, np.dot(f.T, t)))
# By rules of matrix transposition:
# np.dot(a,b).T = np.dot(b.T, a.T)
# Let a = e, and
# let b = uu_pos.conj(),
# Then,
# np.dot(e, uu_pos.conj()).T = uu_pos.conj().transpose(True).dot(e.T)
# hence, np.dot(e, uu_pos.conj()) =
# uu_pos.conj().transpose(True).dot(e.T).transpose(True)
# uu_pos_dash = uu_pos.transpose(True).conj()
# uu_pos_dash_trans = uu_pos_dash.transpose(True)
e_sparse = scipy.sparse.csc_matrix(e)
# e_trans = e_sparse.transpose(True)
# e_dot_uu_pos_dash = (uu_pos_dash_trans.dot(e_trans)).transpose(True)
e_dot_uu_pos_dash = e_sparse.dot(uu_pos.conj().transpose(True))
a_pos = np.abs(e_dot_uu_pos_dash.toarray())
a_pos = a_pos / np.amax(np.amax(a_pos))
a_slice1 = a_pos[0:K+1, :]
conj_a_slice1 = np.conj(a_slice1)
flipud_conj = np.flipud(conj_a_slice1)
a_slice2 = a_pos[K+1:2*K+2, :]
fliplr_a_slice2 = np.fliplr(a_slice2)
a = np.hstack((flipud_conj, fliplr_a_slice2))
fliplr_tau = -1 * np.fliplr(tau)
delay = np.hstack((fliplr_tau, tau))
f_k = f[:, K + 1:2*K+2]
maxT = np.ceil(np.amax(t))
freq = np.multiply(f_k, maxT)
delay_slice1 = delay[0, 0:N]
delay_slice2 = delay[0, N+1:2*N]
delay = np.array([np.hstack((delay_slice1, delay_slice2))])
idx1 = np.arange(0, N)
idx2 = np.arange(N+1, 2*N)
a_cols = np.hstack((idx1, idx2))
a = a[:, a_cols]
(amf, amt) = a.shape
# We use matplotlib's built-in colormaps, so no use for this.
# cm = np.zeros((64, 3))
# cm[:,2] = np.reshape(np.ones((64, 1)), (64,))
fig_2 = plt.figure(2)
plt.clf()
plt.hold(False)
ax3d = plt.subplot(111, projection='3d')
x_coords = delay
y_coords = np.hstack((np.zeros((1, 1)), freq)).T
mesh_z = np.vstack((np.zeros((1, amt)), a))
(mesh_x, mesh_y) = np.meshgrid(x_coords, y_coords)
if plot_mesh:
ax3d.plot_wireframe(mesh_x, mesh_y, mesh_z)
else:
ax3d.plot_surface(mesh_x, mesh_y, mesh_z,
linewidth=0, cmap=cm.coolwarm)
plt.hold(True)
x_coords = delay
y_coords = np.array([[0, 0]]).T
surface_z = np.vstack((np.zeros((1, amt)), a[0]))
(surface_x, surface_y) = np.meshgrid(x_coords, y_coords)
ax3d.plot_surface(surface_x, surface_y, surface_z,
linewidth=0, cmap=cm.Blues)
# Initialize the camera position. Matplotlib has a
# different default orientation that matlab, so
# elev & azimuth are adjusted accordingly.
ax3d.view_init(elev=elev, azim=azim)
# ax3d.axis([-1 * np.inf, np.inf, -1 * np.inf, np.inf, 0, 1])
ax3d.set_xlabel(' $ \\tau/\\itt_b$ ',
fontsize=12)
ax3d.set_ylabel(' $\\it\\nu * \\itMt_b$ ',
fontsize=12)
ax3d.set_zlabel(' $|\\it\\chi(\\it\\tau,\\it\\nu)|$ ',
fontsize=12)
plt.hold(False)
fig_2.suptitle(plot_title + ', 3-D Plot')
if plot2_file is not None:
fig_2.savefig(plot2_file, format=plot_format)
else:
plt.show()
return (delay, freq, a)
| mit |
JamiiTech/mplh5canvas | examples/monitor_plot.py | 4 | 1847 | #!/usr/bin/python
"""Plot embedded in HTML wrapper with custom user events..."""
import matplotlib
matplotlib.use('module://mplh5canvas.backend_h5canvas')
from pylab import *
import time
sensor_list = ['enviro.wind_speed','enviro.wind_direction','enviro.ambient_temperature','enviro.humidity']
def user_cmd_ret(*args):
"""Handle any data returned from calls to canvas.send_cmd()"""
print "Got return from user event:",args
def user_event(figure_id, *args):
f = figure(int(figure_id)+1)
# make the specified figure active for the rest of the calls in this method
sensors = args[:-1]
clf()
xlabel('time (s)')
ylabel('value')
count = 1
for sensor in sensors:
t = arange(0, 100, 1)
s = sin(count*pi*t/10) * 10
plot(t,s,linewidth=1.0,label=sensor)
count+=0.5
legend()
f.canvas.draw()
f.canvas.send_cmd("alert('Server says: Plot updated...'); document.documentURI;")
# send a command back to the browser on completion of the event
# output of this command (in this case the documentURI is returned to the server if user_cmd_ret is set
# show a plot
title('No sensors selected')
f = gcf()
ax = gca()
# some sensors
sensor_select = "".join(['<option value="'+x+'">'+x+'</option>' for x in sensor_list])
f.canvas._user_event = user_event
# register handler for client side javascript calls to user_event
f.canvas._user_cmd_ret = user_cmd_ret
# register handler for any returns from send_cmd
html_wrap_file = open("./examples/monitor_plot.html")
cc = html_wrap_file.read().replace("<!--sensor-list-->",sensor_select)
# read in a custom HTML wrapper and populate dynamic content
f.canvas._custom_content = cc
# specify a custom HTML wrapper to use in place of default (thumbnail view)
html_wrap_file.close()
f.canvas.draw()
show(layout='figure1')
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
anoopkunchukuttan/theano-rnn | hf_example.py | 9 | 7374 | """
This code uses the recurrent neural net implementation in rnn.py
but trains it using Hessian-Free optimization.
It requires the theano-hf package:
https://github.com/boulanni/theano-hf
@author Graham Taylor
"""
from rnn import MetaRNN
from hf import SequenceDataset, hf_optimizer
import numpy as np
import matplotlib.pyplot as plt
import logging
def test_real(n_updates=100):
""" Test RNN with real-valued outputs. """
n_hidden = 10
n_in = 5
n_out = 3
n_steps = 10
n_seq = 1000
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps, n_out))
targets[:, 1:, 0] = seq[:, :-1, 3] # delayed 1
targets[:, 1:, 1] = seq[:, :-1, 2] # delayed 1
targets[:, 2:, 2] = seq[:, :-2, 0] # delayed 2
targets += 0.01 * np.random.standard_normal(targets.shape)
# SequenceDataset wants a list of sequences
# this allows them to be different lengths, but here they're not
seq = [i for i in seq]
targets = [i for i in targets]
gradient_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=100)
cg_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=20)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
activation='tanh')
opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y],
s=model.rnn.y_pred,
costs=[model.rnn.loss(model.y)], h=model.rnn.h)
opt.train(gradient_dataset, cg_dataset, num_updates=n_updates)
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[0])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.plot(targets[0])
guess = model.predict(seq[0])
guessed_targets = plt.plot(guess, linestyle='--')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_title('solid: true output, dashed: model output')
def test_binary(multiple_out=False, n_updates=250):
""" Test RNN with binary outputs. """
n_hidden = 10
n_in = 5
if multiple_out:
n_out = 2
else:
n_out = 1
n_steps = 10
n_seq = 100
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps, n_out), dtype='int32')
# whether lag 1 (dim 3) is greater than lag 2 (dim 0)
targets[:, 2:, 0] = np.cast[np.int32](seq[:, 1:-1, 3] > seq[:, :-2, 0])
if multiple_out:
# whether product of lag 1 (dim 4) and lag 1 (dim 2)
# is less than lag 2 (dim 0)
targets[:, 2:, 1] = np.cast[np.int32](
(seq[:, 1:-1, 4] * seq[:, 1:-1, 2]) > seq[:, :-2, 0])
# SequenceDataset wants a list of sequences
# this allows them to be different lengths, but here they're not
seq = [i for i in seq]
targets = [i for i in targets]
gradient_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=500)
cg_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=100)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
activation='tanh', output_type='binary')
# optimizes negative log likelihood
# but also reports zero-one error
opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y],
s=model.rnn.y_pred,
costs=[model.rnn.loss(model.y),
model.rnn.errors(model.y)], h=model.rnn.h)
# using settings of initial_lambda and mu given in Nicolas' RNN example
# seem to do a little worse than the default
opt.train(gradient_dataset, cg_dataset, num_updates=n_updates)
seqs = xrange(10)
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
guess = model.predict_proba(seq[seq_num])
guessed_targets = plt.step(xrange(n_steps), guess)
plt.setp(guessed_targets, linestyle='--', marker='d')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_ylim((-0.1, 1.1))
ax2.set_title('solid: true output, dashed: model output (prob)')
def test_softmax(n_updates=250):
""" Test RNN with softmax outputs. """
n_hidden = 10
n_in = 5
n_steps = 10
n_seq = 100
n_classes = 3
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps), dtype='int32')
thresh = 0.5
# if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh
# class 1
# if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh
# class 2
# if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh
# class 0
targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1
targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2
#targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])
# SequenceDataset wants a list of sequences
# this allows them to be different lengths, but here they're not
seq = [i for i in seq]
targets = [i for i in targets]
gradient_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=500)
cg_dataset = SequenceDataset([seq, targets], batch_size=None,
number_batches=100)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
activation='tanh', output_type='softmax',
use_symbolic_softmax=True)
# optimizes negative log likelihood
# but also reports zero-one error
opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y],
s=model.rnn.y_pred,
costs=[model.rnn.loss(model.y),
model.rnn.errors(model.y)], h=model.rnn.h)
# using settings of initial_lambda and mu given in Nicolas' RNN example
# seem to do a little worse than the default
opt.train(gradient_dataset, cg_dataset, num_updates=n_updates)
seqs = xrange(10)
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input')
ax2 = plt.subplot(212)
# blue line will represent true classes
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
# show probabilities (in b/w) output by model
guess = model.predict_proba(seq[seq_num])
guessed_probs = plt.imshow(guess.T, interpolation='nearest',
cmap='gray')
ax2.set_title('blue: true class, grayscale: probs assigned by model')
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
#test_real(n_updates=20)
#test_binary(multiple_out=True, n_updates=20)
test_softmax(n_updates=20)
| bsd-3-clause |
lastralab/Statistics | PeaR.py | 1 | 4823 | # -*- coding: utf-8 -*-
#!/usr/bin/python
# Author: Niam Moltta
# UY - 2017
# Pearson's Correlation Coefficient
import numpy as np
from scipy.stats.stats import pearsonr
import matplotlib.pylab as plt
import re
from sklearn import preprocessing
import pandas as pd
import seaborn
print ' '
print ' '
print ' Welcome to PeaR.py'
print ' - by Niam Moltta -'
print ' ~~/\//V\ '
print ' '
print ' '
print ' '
print "Application: PEARSON'S CORRELATION COEFFICIENT.\n\nINSTRUCTIONS:\n\n- Select file, select two numeric columns.\n- Returns Pearson's Coefficient and p-value.\n- Returns graph of correlation relationship.\n\n * Up to +-0.6 may indicate it is a considerable correlation for social sciences, \n but not for data that you got from very sophisticated instruments.\n\n"
fhand = raw_input('Enter file name: ')
print ' '
if fhand == '':
print ' '
print "Avoid becoming a vanellus chilensis!"
print ' '
exit()
filecsv = str(fhand)
data = pd.read_csv(filecsv)
print ' '
frame = pd.DataFrame(data)
colist = frame.columns
columns = np.asarray(colist)
while True:
print ' '
print 'Columns in', re.findall('(.+?).csv', filecsv), 'are:\n'
print columns
print ' '
hand = raw_input('Enter column header for variable x: ')
column1 = str(hand)
print ' '
if (column1 == 'ya') | (column1 == ''):
break
else:
hand2 = raw_input('Enter column header for variable y: ')
column2 = str(hand2)
print ' '
if (column2 == 'ya') | (column2 == ''):
break
else:
print ' --------------------------------------------------------- '
print "Calculating correlation for:\n", column1,"and", column2
print ' --------------------------------------------------------- '
C1 = data[column1]
C2 = data[column2]
x = np.asarray(C1)
y = np.asarray(C2)
# Calculate a Pearson correlation coefficient and the p-value for testing non-correlation
Pear = pearsonr(x, y)
if (Pear[0] == 1)|(Pear[0] == -1):
print "Pearson's Coefficient =", Pear[0]
print ' '
else:
print "Pearson's Coefficient =", Pear[0]
print ' '
print 'p-value =', Pear[1]
print ' '
Coef = Pear[0]
pval = Pear[1]
r2 = str(Coef)
p = str(pval)
pvalue = 'p-value = '+ p
R2 = "Pearson's = "+ r2
xcums = np.cumsum(x)
ycums = np.cumsum(y)
yc = sorted(ycums, reverse=True)
if Coef < 0 :
plt.plot(xcums, 'b', label=column1)
plt.plot(yc, 'r', label=column2)
plt.title(R2)
plt.xlabel(pvalue)
plt.ylabel("Correlation")
print ('To continue, you must save the figure and close it, or just close it. You can also zoom in it or move the graph to see it better, use the buttons.\n')
plt.legend()
plt.show()
print ' '
else:
plt.plot(xcums, 'b', label=column1)
plt.plot(ycums, 'r', label=column2)
plt.title(R2)
plt.xlabel(pvalue)
plt.ylabel("Correlation")
print ('To continue, you must save the figure and close it, or just close it. You can also zoom in it or move the graph to see it better, use the buttons.\n')
plt.legend()
plt.show()
print ' '
'''The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as x increases, so does
y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.'''
print ' '
print 'Hasta la vista, human.'
print ' '
exit()
| mit |
UFABC-AM-2016-1/constrained-k-means | generate_constraints_link.py | 1 | 1649 | import numpy as np
import json
from sklearn.datasets import load_digits, load_iris, load_diabetes
LINK_ARRAY_SIZE = 20
datasets =[
# ("iris", load_iris()),
#("digits", load_digits()),
("diabetes", load_diabetes())
]
def generate(link_array_size):
for name, data_set in datasets:
samples = np.random.choice(len(data_set.data), link_array_size)
must_links = []
cannot_links = []
for sample in samples:
value = data_set.target[sample]
for selected in range(len(data_set.data)):
if value == data_set.target[selected]:
if sample == selected:
continue
must_link = [
np.asarray(data_set.data[sample]),
np.asarray(data_set.data[selected])
]
must_links.append(must_link)
break
else:
continue
samples = np.random.choice(len(data_set.data), link_array_size)
for sample in samples:
value = data_set.target[sample]
for selected in range(len(data_set.data)):
if value != data_set.target[selected]:
cannot_link = [
np.asarray(data_set.data[sample]),
np.asarray(data_set.data[selected])
]
cannot_links.append(cannot_link)
break
else:
continue
links = {'must_link': must_links, 'cannot_link': cannot_links}
np.save(name, links)
| mit |
saiwing-yeung/scikit-learn | sklearn/discriminant_analysis.py | 2 | 28563 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
.. versionchanged:: 0.17
Deprecated :class:`lda.LDA` have been moved to *LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
.. versionadded:: 0.17
tol : float, optional
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self.n_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
.. versionchanged:: 0.17
Deprecated :class:`qda.QDA` have been moved to *QuadraticDiscriminantAnalysis*.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
shangwuhencc/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
joernhees/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 42 | 27323 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# The string "cosine" uses sklearn.metric,
# while the function cosine is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean scikit-learn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
def test_cosine_distances():
# Check the pairwise Cosine distances computation
rng = np.random.RandomState(1337)
x = np.abs(rng.rand(910))
XA = np.vstack([x, x])
D = cosine_distances(XA)
assert_array_almost_equal(D, [[0., 0.], [0., 0.]])
# check that all elements are in [0, 2]
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0., 0.])
XB = np.vstack([x, -x])
D2 = cosine_distances(XB)
# check that all elements are in [0, 2]
assert_true(np.all(D2 >= 0.))
assert_true(np.all(D2 <= 2.))
# check that diagonal elements are equal to 0 and non diagonal to 2
assert_array_almost_equal(D2, [[0., 2.], [2., 0.]])
# check large random matrix
X = np.abs(rng.rand(1000, 5000))
D = cosine_distances(X)
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0.] * D.shape[0])
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
drpngx/tensorflow | tensorflow/contrib/timeseries/examples/lstm.py | 24 | 13826 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A more advanced example, of building an RNN-based time series model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from os import path
import tempfile
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
from tensorflow.contrib.timeseries.python.timeseries import state_management
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
"""A time series model-building example using an RNNCell."""
def __init__(self, num_units, num_features, exogenous_feature_columns=None,
dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
exogenous_feature_columns: A list of `tf.feature_column`s representing
features which are inputs to the model but are not predicted by
it. These must then be present for training, evaluation, and
prediction.
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
exogenous_feature_columns=exogenous_feature_columns,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics=None):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
with tf.variable_scope("", use_resource=True):
# Use ResourceVariables to avoid race conditions.
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=functools.partial(tf.layers.dense, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
"""Return initial state for the time series model."""
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The most recently seen exogenous features.
tf.zeros(self._get_exogenous_embedding_shape(), dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _filtering_step(self, current_times, current_values, state, predictions):
"""Update model state based on observations.
Note that we don't do much here aside from computing a loss. In this case
it's easier to update the RNN state in _prediction_step, since that covers
running the RNN both on observations (from this method) and our own
predictions. This distinction can be important for probabilistic models,
where repeatedly predicting without filtering should lead to low-confidence
predictions.
Args:
current_times: A [batch size] integer Tensor.
current_values: A [batch size, self.num_features] floating point Tensor
with new observations.
state: The model's state tuple.
predictions: The output of the previous `_prediction_step`.
Returns:
A tuple of new state and a predictions dictionary updated to include a
loss (note that we could also return other measures of goodness of fit,
although only "loss" will be optimized).
"""
state_from_time, prediction, exogenous, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
# Subtract the mean and divide by the variance of the series. Slightly
# more efficient if done for a whole window (using the normalize_features
# argument to SequentialTimeSeriesModel).
transformed_values = self._scale_data(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
# through the LSTM until the next _imputation_step.
new_state_tuple = (current_times, transformed_values,
exogenous, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
"""Advance the RNN state using a previous observation or prediction."""
_, previous_observation_or_prediction, exogenous, lstm_state = state
# Update LSTM state based on the most recent exogenous and endogenous
# features.
inputs = tf.concat([previous_observation_or_prediction, exogenous],
axis=-1)
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=inputs, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction,
exogenous, new_lstm_state)
return new_state_tuple, {"mean": self._scale_back_data(next_prediction)}
def _imputation_step(self, current_times, state):
"""Advance model state across a gap."""
# Does not do anything special if we're jumping across a gap. More advanced
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Save exogenous regressors in model state for use in _prediction_step."""
state_from_time, prediction, _, lstm_state = state
return (state_from_time, prediction,
current_exogenous_regressors, lstm_state)
def train_and_predict(
csv_file_name=_DATA_FILE, training_steps=200, estimator_config=None,
export_directory=None):
"""Train and predict using a custom time series model."""
# Construct an Estimator from our LSTM model.
categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
# Exogenous features are not part of the loss, but can inform
# predictions. In this example the features have no extra information, but
# are included as an API example.
tf.feature_column.numeric_column(
"2d_exogenous_feature", shape=(2,)),
tf.feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(num_features=5, num_units=128,
exogenous_feature_columns=exogenous_feature_columns),
optimizer=tf.train.AdamOptimizer(0.001), config=estimator_config,
# Set state to be saved across windows.
state_manager=state_management.ChainingStateManager())
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5
+ ("2d_exogenous_feature",) * 2
+ ("categorical_exogenous_feature",)),
# Data types other than for `times` need to be specified if they aren't
# float32. In this case one of our exogenous features has string dtype.
column_dtypes=((tf.int64,) + (tf.float32,) * 7 + (tf.string,)))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
predict_exogenous_features = {
"2d_exogenous_feature": numpy.concatenate(
[numpy.ones([1, 100, 1]), numpy.zeros([1, 100, 1])],
axis=-1),
"categorical_exogenous_feature": numpy.array(
["strkey"] * 100)[None, :, None]}
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features=predict_exogenous_features)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
# Export the model in SavedModel format. We include a bit of extra boilerplate
# for "cold starting" as if we didn't have any state from the Estimator, which
# is the case when serving from a SavedModel. If Estimator output is
# available, the result of "Estimator.evaluate" can be passed directly to
# `tf.contrib.timeseries.saved_model_utils.predict_continuation` as the
# `continue_from` argument.
with tf.Graph().as_default():
filter_feature_tensors, _ = evaluation_input_fn()
with tf.train.MonitoredSession() as session:
# Fetch the series to "warm up" our state, which will allow us to make
# predictions for its future values. This is just a dictionary of times,
# values, and exogenous features mapping to numpy arrays. The use of an
# input_fn is just a convenience for the example; they can also be
# specified manually.
filter_features = session.run(filter_feature_tensors)
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_savedmodel(
export_directory, input_receiver_fn)
# Warm up and predict using the SavedModel
with tf.Graph().as_default():
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
state = tf.contrib.timeseries.saved_model_utils.cold_start_filter(
signatures=signatures, session=session, features=filter_features)
saved_model_output = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=state, signatures=signatures,
session=session, steps=100,
exogenous_features=predict_exogenous_features))
# The exported model gives the same results as the Estimator.predict()
# call above.
numpy.testing.assert_allclose(
predictions["mean"],
numpy.squeeze(saved_model_output["mean"], axis=0))
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
rseubert/scikit-learn | sklearn/utils/tests/test_class_weight.py | 6 | 6587 | import numpy as np
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
def test_compute_class_weight():
"""Test (and demo) compute_class_weight."""
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = compute_class_weight("auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
"""Raise error when y does not contain all class labels"""
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
def test_compute_class_weight_auto_negative():
"""Test compute_class_weight when labels are negative"""
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = compute_class_weight("auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = compute_class_weight("auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
def test_compute_class_weight_auto_unordered():
"""Test compute_class_weight when classes are unordered"""
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = compute_class_weight("auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
def test_compute_sample_weight():
"""Test (and demo) compute_sample_weight."""
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = compute_sample_weight("auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = compute_sample_weight("auto", y)
expected = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight("auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = compute_sample_weight("auto", y)
assert_array_almost_equal(sample_weight, expected ** 2)
def test_compute_sample_weight_with_subsample():
"""Test compute_sample_weight with subsamples specified."""
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = compute_sample_weight("auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("auto", y, [0, 1, 1, 2, 2, 3])
expected = np.asarray([1/3., 1/3., 1/3., 5/3., 5/3., 5/3.])
assert_array_almost_equal(sample_weight, expected)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight("auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = compute_sample_weight("auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = compute_sample_weight("auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
"""Test compute_sample_weight raises errors expected."""
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
murrayrm/python-control | control/rlocus.py | 1 | 29512 | # rlocus.py - code for computing a root locus plot
# Code contributed by Ryan Krauss, 2010
#
# Copyright (c) 2010 by Ryan Krauss
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# RMM, 17 June 2010: modified to be a standalone piece of code
# * Added BSD copyright info to file (per Ryan)
# * Added code to convert (num, den) to poly1d's if they aren't already.
# This allows Ryan's code to run on a standard signal.ltisys object
# or a control.TransferFunction object.
# * Added some comments to make sure I understand the code
#
# RMM, 2 April 2011: modified to work with new LTI structure (see ChangeLog)
# * Not tested: should still work on signal.ltisys objects
#
# Sawyer B. Fuller ([email protected]) 21 May 2020:
# * added compatibility with discrete-time systems.
#
# $Id$
# Packages used by this module
from functools import partial
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from numpy import array, poly1d, row_stack, zeros_like, real, imag
import scipy.signal # signal processing toolbox
from .lti import isdtime
from .xferfcn import _convert_to_transfer_function
from .exception import ControlMIMONotImplemented
from .sisotool import _SisotoolUpdate
from .grid import sgrid, zgrid
from . import config
__all__ = ['root_locus', 'rlocus']
# Default values for module parameters
_rlocus_defaults = {
'rlocus.grid': True,
'rlocus.plotstr': 'b' if int(mpl.__version__[0]) == 1 else 'C0',
'rlocus.print_gain': True,
'rlocus.plot': True
}
# Main function: compute a root locus diagram
def root_locus(sys, kvect=None, xlim=None, ylim=None,
plotstr=None, plot=True, print_gain=None, grid=None, ax=None,
**kwargs):
"""Root locus plot
Calculate the root locus by finding the roots of 1+k*TF(s)
where TF is self.num(s)/self.den(s) and each k is an element
of kvect.
Parameters
----------
sys : LTI object
Linear input/output systems (SISO only, for now).
kvect : list or ndarray, optional
List of gains to use in computing diagram.
xlim : tuple or list, optional
Set limits of x axis, normally with tuple
(see :doc:`matplotlib:api/axes_api`).
ylim : tuple or list, optional
Set limits of y axis, normally with tuple
(see :doc:`matplotlib:api/axes_api`).
plotstr : :func:`matplotlib.pyplot.plot` format string, optional
plotting style specification
plot : boolean, optional
If True (default), plot root locus diagram.
print_gain : bool
If True (default), report mouse clicks when close to the root locus
branches, calculate gain, damping and print.
grid : bool
If True plot omega-damping grid. Default is False.
ax : :class:`matplotlib.axes.Axes`
Axes on which to create root locus plot
Returns
-------
rlist : ndarray
Computed root locations, given as a 2D array
klist : ndarray or list
Gains used. Same as klist keyword argument if provided.
"""
# Check to see if legacy 'Plot' keyword was used
if 'Plot' in kwargs:
import warnings
warnings.warn("'Plot' keyword is deprecated in root_locus; "
"use 'plot'", FutureWarning)
# Map 'Plot' keyword to 'plot' keyword
plot = kwargs.pop('Plot')
# Check to see if legacy 'PrintGain' keyword was used
if 'PrintGain' in kwargs:
import warnings
warnings.warn("'PrintGain' keyword is deprecated in root_locus; "
"use 'print_gain'", FutureWarning)
# Map 'PrintGain' keyword to 'print_gain' keyword
print_gain = kwargs.pop('PrintGain')
# Get parameter values
plotstr = config._get_param('rlocus', 'plotstr', plotstr, _rlocus_defaults)
grid = config._get_param('rlocus', 'grid', grid, _rlocus_defaults)
print_gain = config._get_param(
'rlocus', 'print_gain', print_gain, _rlocus_defaults)
sys_loop = sys if sys.issiso() else sys[0,0]
# Convert numerator and denominator to polynomials if they aren't
(nump, denp) = _systopoly1d(sys_loop)
# if discrete-time system and if xlim and ylim are not given,
# that we a view of the unit circle
if xlim is None and isdtime(sys, strict=True):
xlim = (-1.2, 1.2)
if ylim is None and isdtime(sys, strict=True):
xlim = (-1.3, 1.3)
if kvect is None:
start_mat = _RLFindRoots(nump, denp, [1])
kvect, mymat, xlim, ylim = _default_gains(nump, denp, xlim, ylim)
else:
start_mat = _RLFindRoots(nump, denp, [kvect[0]])
mymat = _RLFindRoots(nump, denp, kvect)
mymat = _RLSortRoots(mymat)
# Check for sisotool mode
sisotool = False if 'sisotool' not in kwargs else True
# Create the Plot
if plot:
if sisotool:
fig = kwargs['fig']
ax = fig.axes[1]
else:
if ax is None:
ax = plt.gca()
fig = ax.figure
ax.set_title('Root Locus')
if print_gain and not sisotool:
fig.canvas.mpl_connect(
'button_release_event',
partial(_RLClickDispatcher, sys=sys, fig=fig,
ax_rlocus=fig.axes[0], plotstr=plotstr))
elif sisotool:
fig.axes[1].plot(
[root.real for root in start_mat],
[root.imag for root in start_mat],
'm.', marker='s', markersize=8, zorder=20, label='gain_point')
s = start_mat[0][0]
if isdtime(sys, strict=True):
zeta = -np.cos(np.angle(np.log(s)))
else:
zeta = -1 * s.real / abs(s)
fig.suptitle(
"Clicked at: %10.4g%+10.4gj gain: %10.4g damp: %10.4g" %
(s.real, s.imag, 1, zeta),
fontsize=12 if int(mpl.__version__[0]) == 1 else 10)
fig.canvas.mpl_connect(
'button_release_event',
partial(_RLClickDispatcher, sys=sys, fig=fig,
ax_rlocus=fig.axes[1], plotstr=plotstr,
sisotool=sisotool,
bode_plot_params=kwargs['bode_plot_params'],
tvect=kwargs['tvect']))
# zoom update on xlim/ylim changed, only then data on new limits
# is available, i.e., cannot combine with _RLClickDispatcher
dpfun = partial(
_RLZoomDispatcher, sys=sys, ax_rlocus=ax, plotstr=plotstr)
# TODO: the next too lines seem to take a long time to execute
# TODO: is there a way to speed them up? (RMM, 6 Jun 2019)
ax.callbacks.connect('xlim_changed', dpfun)
ax.callbacks.connect('ylim_changed', dpfun)
# plot open loop poles
poles = array(denp.r)
ax.plot(real(poles), imag(poles), 'x')
# plot open loop zeros
zeros = array(nump.r)
if zeros.size > 0:
ax.plot(real(zeros), imag(zeros), 'o')
# Now plot the loci
for index, col in enumerate(mymat.T):
ax.plot(real(col), imag(col), plotstr, label='rootlocus')
# Set up plot axes and labels
ax.set_xlabel('Real')
ax.set_ylabel('Imaginary')
# Set up the limits for the plot
# Note: need to do this before computing grid lines
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_ylim(ylim)
# Draw the grid
if grid and sisotool:
if isdtime(sys, strict=True):
zgrid(ax=ax)
else:
_sgrid_func(f)
elif grid:
if isdtime(sys, strict=True):
zgrid(ax=ax)
else:
_sgrid_func()
else:
ax.axhline(0., linestyle=':', color='k', linewidth=.75, zorder=-20)
ax.axvline(0., linestyle=':', color='k', linewidth=.75, zorder=-20)
if isdtime(sys, strict=True):
ax.add_patch(plt.Circle(
(0, 0), radius=1.0, linestyle=':', edgecolor='k',
linewidth=0.75, fill=False, zorder=-20))
return mymat, kvect
def _default_gains(num, den, xlim, ylim, zoom_xlim=None, zoom_ylim=None):
"""Unsupervised gains calculation for root locus plot.
References
----------
Ogata, K. (2002). Modern control engineering (4th ed.). Upper
Saddle River, NJ : New Delhi: Prentice Hall..
"""
k_break, real_break = _break_points(num, den)
kmax = _k_max(num, den, real_break, k_break)
kvect = np.hstack((np.linspace(0, kmax, 50), np.real(k_break)))
kvect.sort()
mymat = _RLFindRoots(num, den, kvect)
mymat = _RLSortRoots(mymat)
open_loop_poles = den.roots
open_loop_zeros = num.roots
if open_loop_zeros.size != 0 and \
open_loop_zeros.size < open_loop_poles.size:
open_loop_zeros_xl = np.append(
open_loop_zeros,
np.ones(open_loop_poles.size - open_loop_zeros.size)
* open_loop_zeros[-1])
mymat_xl = np.append(mymat, open_loop_zeros_xl)
else:
mymat_xl = mymat
singular_points = np.concatenate((num.roots, den.roots), axis=0)
important_points = np.concatenate((singular_points, real_break), axis=0)
important_points = np.concatenate((important_points, np.zeros(2)), axis=0)
mymat_xl = np.append(mymat_xl, important_points)
false_gain = float(den.coeffs[0]) / float(num.coeffs[0])
if false_gain < 0 and not den.order > num.order:
# TODO: make error message more understandable
raise ValueError("Not implemented support for 0 degrees root locus "
"with equal order of numerator and denominator.")
if xlim is None and false_gain > 0:
x_tolerance = 0.05 * (np.max(np.real(mymat_xl))
- np.min(np.real(mymat_xl)))
xlim = _ax_lim(mymat_xl)
elif xlim is None and false_gain < 0:
axmin = np.min(np.real(important_points)) \
- (np.max(np.real(important_points))
- np.min(np.real(important_points)))
axmin = np.min(np.array([axmin, np.min(np.real(mymat_xl))]))
axmax = np.max(np.real(important_points)) \
+ np.max(np.real(important_points)) \
- np.min(np.real(important_points))
axmax = np.max(np.array([axmax, np.max(np.real(mymat_xl))]))
xlim = [axmin, axmax]
x_tolerance = 0.05 * (axmax - axmin)
else:
x_tolerance = 0.05 * (xlim[1] - xlim[0])
if ylim is None:
y_tolerance = 0.05 * (np.max(np.imag(mymat_xl))
- np.min(np.imag(mymat_xl)))
ylim = _ax_lim(mymat_xl * 1j)
else:
y_tolerance = 0.05 * (ylim[1] - ylim[0])
# Figure out which points are spaced too far apart
if x_tolerance == 0:
# Root locus is on imaginary axis (rare), use just y distance
tolerance = y_tolerance
elif y_tolerance == 0:
# Root locus is on imaginary axis (common), use just x distance
tolerance = x_tolerance
else:
tolerance = np.min([x_tolerance, y_tolerance])
indexes_too_far = _indexes_filt(mymat, tolerance, zoom_xlim, zoom_ylim)
# Add more points into the root locus for points that are too far apart
while len(indexes_too_far) > 0 and kvect.size < 5000:
for counter, index in enumerate(indexes_too_far):
index = index + counter*3
new_gains = np.linspace(kvect[index], kvect[index + 1], 5)
new_points = _RLFindRoots(num, den, new_gains[1:4])
kvect = np.insert(kvect, index + 1, new_gains[1:4])
mymat = np.insert(mymat, index + 1, new_points, axis=0)
mymat = _RLSortRoots(mymat)
indexes_too_far = _indexes_filt(mymat, tolerance, zoom_xlim, zoom_ylim)
new_gains = kvect[-1] * np.hstack((np.logspace(0, 3, 4)))
new_points = _RLFindRoots(num, den, new_gains[1:4])
kvect = np.append(kvect, new_gains[1:4])
mymat = np.concatenate((mymat, new_points), axis=0)
mymat = _RLSortRoots(mymat)
return kvect, mymat, xlim, ylim
def _indexes_filt(mymat, tolerance, zoom_xlim=None, zoom_ylim=None):
"""Calculate the distance between points and return the indexes.
Filter the indexes so only the resolution of points within the xlim and
ylim is improved when zoom is used.
"""
distance_points = np.abs(np.diff(mymat, axis=0))
indexes_too_far = list(np.unique(np.where(distance_points > tolerance)[0]))
if zoom_xlim is not None and zoom_ylim is not None:
x_tolerance_zoom = 0.05 * (zoom_xlim[1] - zoom_xlim[0])
y_tolerance_zoom = 0.05 * (zoom_ylim[1] - zoom_ylim[0])
tolerance_zoom = np.min([x_tolerance_zoom, y_tolerance_zoom])
indexes_too_far_zoom = list(
np.unique(np.where(distance_points > tolerance_zoom)[0]))
indexes_too_far_filtered = []
for index in indexes_too_far_zoom:
for point in mymat[index]:
if (zoom_xlim[0] <= point.real <= zoom_xlim[1]) and \
(zoom_ylim[0] <= point.imag <= zoom_ylim[1]):
indexes_too_far_filtered.append(index)
break
# Check if zoom box is not overshot & insert points where neccessary
if len(indexes_too_far_filtered) == 0 and len(mymat) < 500:
limits = [zoom_xlim[0], zoom_xlim[1], zoom_ylim[0], zoom_ylim[1]]
for index, limit in enumerate(limits):
if index <= 1:
asign = np.sign(real(mymat)-limit)
else:
asign = np.sign(imag(mymat) - limit)
signchange = ((np.roll(asign, 1, axis=0)
- asign) != 0).astype(int)
signchange[0] = np.zeros((len(mymat[0])))
if len(np.where(signchange == 1)[0]) > 0:
indexes_too_far_filtered.append(
np.where(signchange == 1)[0][0]-1)
if len(indexes_too_far_filtered) > 0:
if indexes_too_far_filtered[0] != 0:
indexes_too_far_filtered.insert(
0, indexes_too_far_filtered[0]-1)
if not indexes_too_far_filtered[-1] + 1 >= len(mymat) - 2:
indexes_too_far_filtered.append(
indexes_too_far_filtered[-1] + 1)
indexes_too_far.extend(indexes_too_far_filtered)
indexes_too_far = list(np.unique(indexes_too_far))
indexes_too_far.sort()
return indexes_too_far
def _break_points(num, den):
"""Extract break points over real axis and gains given these locations"""
# type: (np.poly1d, np.poly1d) -> (np.array, np.array)
dnum = num.deriv(m=1)
dden = den.deriv(m=1)
polynom = den * dnum - num * dden
real_break_pts = polynom.r
# don't care about infinite break points
real_break_pts = real_break_pts[num(real_break_pts) != 0]
k_break = -den(real_break_pts) / num(real_break_pts)
idx = k_break >= 0 # only positives gains
k_break = k_break[idx]
real_break_pts = real_break_pts[idx]
if len(k_break) == 0:
k_break = [0]
real_break_pts = den.roots
return k_break, real_break_pts
def _ax_lim(mymat):
"""Utility to get the axis limits"""
axmin = np.min(np.real(mymat))
axmax = np.max(np.real(mymat))
if axmax != axmin:
deltax = (axmax - axmin) * 0.02
else:
deltax = np.max([1., axmax / 2])
axlim = [axmin - deltax, axmax + deltax]
return axlim
def _k_max(num, den, real_break_points, k_break_points):
""""Calculate the maximum gain for the root locus shown in the figure."""
asymp_number = den.order - num.order
singular_points = np.concatenate((num.roots, den.roots), axis=0)
important_points = np.concatenate(
(singular_points, real_break_points), axis=0)
false_gain = den.coeffs[0] / num.coeffs[0]
if asymp_number > 0:
asymp_center = (np.sum(den.roots) - np.sum(num.roots))/asymp_number
distance_max = 4 * np.max(np.abs(important_points - asymp_center))
asymp_angles = (2 * np.arange(0, asymp_number) - 1) \
* np.pi / asymp_number
if false_gain > 0:
# farthest points over asymptotes
farthest_points = asymp_center \
+ distance_max * np.exp(asymp_angles * 1j)
else:
asymp_angles = asymp_angles + np.pi
# farthest points over asymptotes
farthest_points = asymp_center \
+ distance_max * np.exp(asymp_angles * 1j)
kmax_asymp = np.real(np.abs(den(farthest_points)
/ num(farthest_points)))
else:
kmax_asymp = np.abs([np.abs(den.coeffs[0])
/ np.abs(num.coeffs[0]) * 3])
kmax = np.max(np.concatenate((np.real(kmax_asymp),
np.real(k_break_points)), axis=0))
if np.abs(false_gain) > kmax:
kmax = np.abs(false_gain)
return kmax
def _systopoly1d(sys):
"""Extract numerator and denominator polynomails for a system"""
# Allow inputs from the signal processing toolbox
if (isinstance(sys, scipy.signal.lti)):
nump = sys.num
denp = sys.den
else:
# Convert to a transfer function, if needed
sys = _convert_to_transfer_function(sys)
# Make sure we have a SISO system
if not sys.issiso():
raise ControlMIMONotImplemented()
# Start by extracting the numerator and denominator from system object
nump = sys.num[0][0]
denp = sys.den[0][0]
# Check to see if num, den are already polynomials; otherwise convert
if (not isinstance(nump, poly1d)):
nump = poly1d(nump)
if (not isinstance(denp, poly1d)):
denp = poly1d(denp)
return (nump, denp)
def _RLFindRoots(nump, denp, kvect):
"""Find the roots for the root locus."""
# Convert numerator and denominator to polynomials if they aren't
roots = []
for k in np.array(kvect, ndmin=1):
curpoly = denp + k * nump
curroots = curpoly.r
if len(curroots) < denp.order:
# if I have fewer poles than open loop, it is because i have
# one at infinity
curroots = np.insert(curroots, len(curroots), np.inf)
curroots.sort()
roots.append(curroots)
mymat = row_stack(roots)
return mymat
def _RLSortRoots(mymat):
"""Sort the roots from sys._RLFindRoots, so that the root
locus doesn't show weird pseudo-branches as roots jump from
one branch to another."""
sorted = zeros_like(mymat)
for n, row in enumerate(mymat):
if n == 0:
sorted[n, :] = row
else:
# sort the current row by finding the element with the
# smallest absolute distance to each root in the
# previous row
available = list(range(len(prevrow)))
for elem in row:
evect = elem-prevrow[available]
ind1 = abs(evect).argmin()
ind = available.pop(ind1)
sorted[n, ind] = elem
prevrow = sorted[n, :]
return sorted
def _RLZoomDispatcher(event, sys, ax_rlocus, plotstr):
"""Rootlocus plot zoom dispatcher"""
sys_loop = sys if sys.issiso() else sys[0,0]
nump, denp = _systopoly1d(sys_loop)
xlim, ylim = ax_rlocus.get_xlim(), ax_rlocus.get_ylim()
kvect, mymat, xlim, ylim = _default_gains(
nump, denp, xlim=None, ylim=None, zoom_xlim=xlim, zoom_ylim=ylim)
_removeLine('rootlocus', ax_rlocus)
for i, col in enumerate(mymat.T):
ax_rlocus.plot(real(col), imag(col), plotstr, label='rootlocus',
scalex=False, scaley=False)
def _RLClickDispatcher(event, sys, fig, ax_rlocus, plotstr, sisotool=False,
bode_plot_params=None, tvect=None):
"""Rootlocus plot click dispatcher"""
# Zoom is handled by specialized callback above, only do gain plot
if event.inaxes == ax_rlocus.axes and \
plt.get_current_fig_manager().toolbar.mode not in \
{'zoom rect', 'pan/zoom'}:
# if a point is clicked on the rootlocus plot visually emphasize it
K = _RLFeedbackClicksPoint(event, sys, fig, ax_rlocus, sisotool)
if sisotool and K is not None:
_SisotoolUpdate(sys, fig, K, bode_plot_params, tvect)
# Update the canvas
fig.canvas.draw()
def _RLFeedbackClicksPoint(event, sys, fig, ax_rlocus, sisotool=False):
"""Display root-locus gain feedback point for clicks on root-locus plot"""
sys_loop = sys if sys.issiso() else sys[0,0]
(nump, denp) = _systopoly1d(sys_loop)
xlim = ax_rlocus.get_xlim()
ylim = ax_rlocus.get_ylim()
x_tolerance = 0.1 * abs((xlim[1] - xlim[0]))
y_tolerance = 0.1 * abs((ylim[1] - ylim[0]))
gain_tolerance = np.mean([x_tolerance, y_tolerance])*0.1
# Catch type error when event click is in the figure but not in an axis
try:
s = complex(event.xdata, event.ydata)
K = -1. / sys_loop(s)
K_xlim = -1. / sys_loop(
complex(event.xdata + 0.05 * abs(xlim[1] - xlim[0]), event.ydata))
K_ylim = -1. / sys_loop(
complex(event.xdata, event.ydata + 0.05 * abs(ylim[1] - ylim[0])))
except TypeError:
K = float('inf')
K_xlim = float('inf')
K_ylim = float('inf')
gain_tolerance += 0.1 * max([abs(K_ylim.imag/K_ylim.real),
abs(K_xlim.imag/K_xlim.real)])
if abs(K.real) > 1e-8 and abs(K.imag / K.real) < gain_tolerance and \
event.inaxes == ax_rlocus.axes and K.real > 0.:
if isdtime(sys, strict=True):
zeta = -np.cos(np.angle(np.log(s)))
else:
zeta = -1 * s.real / abs(s)
# Display the parameters in the output window and figure
print("Clicked at %10.4g%+10.4gj gain %10.4g damp %10.4g" %
(s.real, s.imag, K.real, zeta))
fig.suptitle(
"Clicked at: %10.4g%+10.4gj gain: %10.4g damp: %10.4g" %
(s.real, s.imag, K.real, zeta),
fontsize=12 if int(mpl.__version__[0]) == 1 else 10)
# Remove the previous line
_removeLine(label='gain_point', ax=ax_rlocus)
# Visualise clicked point, display all roots for sisotool mode
if sisotool:
mymat = _RLFindRoots(nump, denp, K.real)
ax_rlocus.plot(
[root.real for root in mymat],
[root.imag for root in mymat],
'm.', marker='s', markersize=8, zorder=20, label='gain_point')
else:
ax_rlocus.plot(s.real, s.imag, 'k.', marker='s', markersize=8,
zorder=20, label='gain_point')
return K.real
def _removeLine(label, ax):
"""Remove a line from the ax when a label is specified"""
for line in reversed(ax.lines):
if line.get_label() == label:
line.remove()
del line
def _sgrid_func(fig=None, zeta=None, wn=None):
if fig is None:
fig = plt.gcf()
ax = fig.gca()
else:
ax = fig.axes[1]
# Get locator function for x-axis, y-axis tick marks
xlocator = ax.get_xaxis().get_major_locator()
ylocator = ax.get_yaxis().get_major_locator()
# Decide on the location for the labels (?)
ylim = ax.get_ylim()
ytext_pos_lim = ylim[1] - (ylim[1] - ylim[0]) * 0.03
xlim = ax.get_xlim()
xtext_pos_lim = xlim[0] + (xlim[1] - xlim[0]) * 0.0
# Create a list of damping ratios, if needed
if zeta is None:
zeta = _default_zetas(xlim, ylim)
# Figure out the angles for the different damping ratios
angles = []
for z in zeta:
if (z >= 1e-4) and (z <= 1):
angles.append(np.pi/2 + np.arcsin(z))
else:
zeta.remove(z)
y_over_x = np.tan(angles)
# zeta-constant lines
for index, yp in enumerate(y_over_x):
ax.plot([0, xlocator()[0]], [0, yp * xlocator()[0]], color='gray',
linestyle='dashed', linewidth=0.5)
ax.plot([0, xlocator()[0]], [0, -yp * xlocator()[0]], color='gray',
linestyle='dashed', linewidth=0.5)
an = "%.2f" % zeta[index]
if yp < 0:
xtext_pos = 1/yp * ylim[1]
ytext_pos = yp * xtext_pos_lim
if np.abs(xtext_pos) > np.abs(xtext_pos_lim):
xtext_pos = xtext_pos_lim
else:
ytext_pos = ytext_pos_lim
ax.annotate(an, textcoords='data', xy=[xtext_pos, ytext_pos],
fontsize=8)
ax.plot([0, 0], [ylim[0], ylim[1]],
color='gray', linestyle='dashed', linewidth=0.5)
# omega-constant lines
angles = np.linspace(-90, 90, 20) * np.pi/180
if wn is None:
wn = _default_wn(xlocator(), ylocator())
for om in wn:
if om < 0:
# Generate the lines for natural frequency curves
yp = np.sin(angles) * np.abs(om)
xp = -np.cos(angles) * np.abs(om)
# Plot the natural frequency contours
ax.plot(xp, yp, color='gray', linestyle='dashed', linewidth=0.5)
# Annotate the natural frequencies by listing on x-axis
# Note: need to filter values for proper plotting in Jupyter
if (om > xlim[0]):
an = "%.2f" % -om
ax.annotate(an, textcoords='data', xy=[om, 0], fontsize=8)
def _default_zetas(xlim, ylim):
"""Return default list of damping coefficients
This function computes a list of damping coefficients based on the limits
of the graph. A set of 4 damping coefficients are computed for the x-axis
and a set of three damping coefficients are computed for the y-axis
(corresponding to the normal 4:3 plot aspect ratio in `matplotlib`?).
Parameters
----------
xlim : array_like
List of x-axis limits [min, max]
ylim : array_like
List of y-axis limits [min, max]
Returns
-------
zeta : list
List of default damping coefficients for the plot
"""
# Damping coefficient lines that intersect the x-axis
sep1 = -xlim[0] / 4
ang1 = [np.arctan((sep1*i)/ylim[1]) for i in np.arange(1, 4, 1)]
# Damping coefficient lines that intersection the y-axis
sep2 = ylim[1] / 3
ang2 = [np.arctan(-xlim[0]/(ylim[1]-sep2*i)) for i in np.arange(1, 3, 1)]
# Put the lines together and add one at -pi/2 (negative real axis)
angles = np.concatenate((ang1, ang2))
angles = np.insert(angles, len(angles), np.pi/2)
# Return the damping coefficients corresponding to these angles
zeta = np.sin(angles)
return zeta.tolist()
def _default_wn(xloc, yloc, max_lines=7):
"""Return default wn for root locus plot
This function computes a list of natural frequencies based on the grid
parameters of the graph.
Parameters
----------
xloc : array_like
List of x-axis tick values
ylim : array_like
List of y-axis limits [min, max]
max_lines : int, optional
Maximum number of frequencies to generate (default = 7)
Returns
-------
wn : list
List of default natural frequencies for the plot
"""
sep = xloc[1]-xloc[0] # separation between x-ticks
# Decide whether to use the x or y axis for determining wn
if yloc[-1] / sep > max_lines*10:
# y-axis scale >> x-axis scale
wn = yloc # one frequency per y-axis tick mark
else:
wn = xloc # one frequency per x-axis tick mark
# Insert additional frequencies to span the y-axis
while np.abs(wn[0]) < yloc[-1]:
wn = np.insert(wn, 0, wn[0]-sep)
# If there are too many values, cut them in half
while len(wn) > max_lines:
wn = wn[0:-1:2]
return wn
rlocus = root_locus
| bsd-3-clause |
aosagie/spark | python/pyspark/sql/group.py | 24 | 12490 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import *
__all__ = ["GroupedData"]
def dfapi(f):
def _api(self):
name = f.__name__
jdf = getattr(self._jgd, name)()
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
def df_varargs_api(f):
def _api(self, *cols):
name = f.__name__
jdf = getattr(self._jgd, name)(_to_seq(self.sql_ctx._sc, cols))
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
class GroupedData(object):
"""
A set of methods for aggregations on a :class:`DataFrame`,
created by :func:`DataFrame.groupBy`.
.. note:: Experimental
.. versionadded:: 1.3
"""
def __init__(self, jgd, df):
self._jgd = jgd
self._df = df
self.sql_ctx = df.sql_ctx
@ignore_unicode_prefix
@since(1.3)
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx)
@dfapi
@since(1.3)
def count(self):
"""Counts the number of records for each group.
>>> sorted(df.groupBy(df.age).count().collect())
[Row(age=2, count=1), Row(age=5, count=1)]
"""
@df_varargs_api
@since(1.3)
def mean(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().mean('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().mean('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def avg(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().avg('age').collect()
[Row(avg(age)=3.5)]
>>> df3.groupBy().avg('age', 'height').collect()
[Row(avg(age)=3.5, avg(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def max(self, *cols):
"""Computes the max value for each numeric columns for each group.
>>> df.groupBy().max('age').collect()
[Row(max(age)=5)]
>>> df3.groupBy().max('age', 'height').collect()
[Row(max(age)=5, max(height)=85)]
"""
@df_varargs_api
@since(1.3)
def min(self, *cols):
"""Computes the min value for each numeric column for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().min('age').collect()
[Row(min(age)=2)]
>>> df3.groupBy().min('age', 'height').collect()
[Row(min(age)=2, min(height)=80)]
"""
@df_varargs_api
@since(1.3)
def sum(self, *cols):
"""Compute the sum for each numeric columns for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().sum('age').collect()
[Row(sum(age)=7)]
>>> df3.groupBy().sum('age', 'height').collect()
[Row(sum(age)=7, sum(height)=165)]
"""
@since(1.6)
def pivot(self, pivot_col, values=None):
"""
Pivots a column of the current :class:`DataFrame` and perform the specified aggregation.
There are two versions of pivot function: one that requires the caller to specify the list
of distinct values to pivot on, and one that does not. The latter is more concise but less
efficient, because Spark needs to first compute the list of distinct values internally.
:param pivot_col: Name of the column to pivot.
:param values: List of values that will be translated to columns in the output DataFrame.
# Compute the sum of earnings for each year by course with each course as a separate column
>>> df4.groupBy("year").pivot("course", ["dotNET", "Java"]).sum("earnings").collect()
[Row(year=2012, dotNET=15000, Java=20000), Row(year=2013, dotNET=48000, Java=30000)]
# Or without specifying column values (less efficient)
>>> df4.groupBy("year").pivot("course").sum("earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
>>> df5.groupBy("sales.year").pivot("sales.course").sum("sales.earnings").collect()
[Row(year=2012, Java=20000, dotNET=15000), Row(year=2013, Java=30000, dotNET=48000)]
"""
if values is None:
jgd = self._jgd.pivot(pivot_col)
else:
jgd = self._jgd.pivot(pivot_col, values)
return GroupedData(jgd, self._df)
@since(2.3)
def apply(self, udf):
"""
Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result
as a `DataFrame`.
The user-defined function should take a `pandas.DataFrame` and return another
`pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame`
to the user-function and the returned `pandas.DataFrame` are combined as a
:class:`DataFrame`.
The returned `pandas.DataFrame` can be of arbitrary length and its schema must match the
returnType of the pandas udf.
.. note:: This function requires a full shuffle. all the data of a group will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. note:: Experimental
:param udf: a grouped map user-defined function returned by
:func:`pyspark.sql.functions.pandas_udf`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
.. seealso:: :meth:`pyspark.sql.functions.pandas_udf`
"""
# Columns are special because hasattr always return True
if isinstance(udf, Column) or not hasattr(udf, 'func') \
or udf.evalType != PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type "
"GROUPED_MAP.")
df = self._df
udf_column = udf(*[df[col] for col in df.columns])
jdf = self._jgd.flatMapGroupsInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.group
globs = pyspark.sql.group.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.group tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
Row(name='Bob', age=5, height=85)]).toDF()
globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000),
Row(course="Java", year=2012, earnings=20000),
Row(course="dotNET", year=2012, earnings=5000),
Row(course="dotNET", year=2013, earnings=48000),
Row(course="Java", year=2013, earnings=30000)]).toDF()
globs['df5'] = sc.parallelize([
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=10000)),
Row(training="junior", sales=Row(course="Java", year=2012, earnings=20000)),
Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=5000)),
Row(training="junior", sales=Row(course="dotNET", year=2013, earnings=48000)),
Row(training="expert", sales=Row(course="Java", year=2013, earnings=30000))]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.group, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
roxyboy/scikit-learn | sklearn/neighbors/unsupervised.py | 106 | 4461 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
| bsd-3-clause |
mit-crpg/openmc | openmc/tallies.py | 6 | 123550 | from collections.abc import Iterable, MutableSequence
import copy
from functools import partial, reduce
from itertools import product
from numbers import Integral, Real
import operator
from pathlib import Path
from xml.etree import ElementTree as ET
import h5py
import numpy as np
import pandas as pd
import scipy.sparse as sps
import openmc
import openmc.checkvalue as cv
from ._xml import clean_indentation, reorder_attributes
from .mixin import IDManagerMixin
# The tally arithmetic product types. The tensor product performs the full
# cross product of the data in two tallies with respect to a specified axis
# (filters, nuclides, or scores). The entrywise product performs the arithmetic
# operation entrywise across the entries in two tallies with respect to a
# specified axis.
_PRODUCT_TYPES = ['tensor', 'entrywise']
# The following indicate acceptable types when setting Tally.scores,
# Tally.nuclides, and Tally.filters
_SCORE_CLASSES = (str, openmc.CrossScore, openmc.AggregateScore)
_NUCLIDE_CLASSES = (str, openmc.CrossNuclide, openmc.AggregateNuclide)
_FILTER_CLASSES = (openmc.Filter, openmc.CrossFilter, openmc.AggregateFilter)
# Valid types of estimators
ESTIMATOR_TYPES = ['tracklength', 'collision', 'analog']
class Tally(IDManagerMixin):
"""A tally defined by a set of scores that are accumulated for a list of
nuclides given a set of filters.
Parameters
----------
tally_id : int, optional
Unique identifier for the tally. If none is specified, an identifier
will automatically be assigned
name : str, optional
Name of the tally. If not specified, the name is the empty string.
Attributes
----------
id : int
Unique identifier for the tally
name : str
Name of the tally
filters : list of openmc.Filter
List of specified filters for the tally
nuclides : list of openmc.Nuclide
List of nuclides to score results for
scores : list of str
List of defined scores, e.g. 'flux', 'fission', etc.
estimator : {'analog', 'tracklength', 'collision'}
Type of estimator for the tally
triggers : list of openmc.Trigger
List of tally triggers
num_scores : int
Total number of scores
num_filter_bins : int
Total number of filter bins accounting for all filters
num_bins : int
Total number of bins for the tally
shape : 3-tuple of int
The shape of the tally data array ordered as the number of filter bins,
nuclide bins and score bins
filter_strides : list of int
Stride in memory for each filter
num_realizations : int
Total number of realizations
with_summary : bool
Whether or not a Summary has been linked
sum : numpy.ndarray
An array containing the sum of each independent realization for each bin
sum_sq : numpy.ndarray
An array containing the sum of each independent realization squared for
each bin
mean : numpy.ndarray
An array containing the sample mean for each bin
std_dev : numpy.ndarray
An array containing the sample standard deviation for each bin
derived : bool
Whether or not the tally is derived from one or more other tallies
sparse : bool
Whether or not the tally uses SciPy's LIL sparse matrix format for
compressed data storage
derivative : openmc.TallyDerivative
A material perturbation derivative to apply to all scores in the tally.
"""
next_id = 1
used_ids = set()
def __init__(self, tally_id=None, name=''):
# Initialize Tally class attributes
self.id = tally_id
self.name = name
self._filters = cv.CheckedList(_FILTER_CLASSES, 'tally filters')
self._nuclides = cv.CheckedList(_NUCLIDE_CLASSES, 'tally nuclides')
self._scores = cv.CheckedList(_SCORE_CLASSES, 'tally scores')
self._estimator = None
self._triggers = cv.CheckedList(openmc.Trigger, 'tally triggers')
self._derivative = None
self._num_realizations = 0
self._with_summary = False
self._sum = None
self._sum_sq = None
self._mean = None
self._std_dev = None
self._with_batch_statistics = False
self._derived = False
self._sparse = False
self._sp_filename = None
self._results_read = False
def __repr__(self):
parts = ['Tally']
parts.append('{: <15}=\t{}'.format('ID', self.id))
parts.append('{: <15}=\t{}'.format('Name', self.name))
if self.derivative is not None:
parts.append('{: <15}=\t{}'.format('Derivative ID', self.derivative.id))
filters = ', '.join(type(f).__name__ for f in self.filters)
parts.append('{: <15}=\t{}'.format('Filters', filters))
nuclides = ' '.join(str(nuclide) for nuclide in self.nuclides)
parts.append('{: <15}=\t{}'.format('Nuclides', nuclides))
parts.append('{: <15}=\t{}'.format('Scores', self.scores))
parts.append('{: <15}=\t{}'.format('Estimator', self.estimator))
return '\n\t'.join(parts)
@property
def name(self):
return self._name
@property
def filters(self):
return self._filters
@property
def nuclides(self):
return self._nuclides
@property
def num_nuclides(self):
return len(self._nuclides)
@property
def scores(self):
return self._scores
@property
def num_scores(self):
return len(self._scores)
@property
def num_filters(self):
return len(self.filters)
@property
def num_filter_bins(self):
return reduce(operator.mul, (f.num_bins for f in self.filters), 1)
@property
def num_bins(self):
return self.num_filter_bins * self.num_nuclides * self.num_scores
@property
def shape(self):
return (self.num_filter_bins, self.num_nuclides, self.num_scores)
@property
def estimator(self):
return self._estimator
@property
def triggers(self):
return self._triggers
@property
def num_realizations(self):
return self._num_realizations
@property
def with_summary(self):
return self._with_summary
def _read_results(self):
if self._results_read:
return
# Open the HDF5 statepoint file
with h5py.File(self._sp_filename, 'r') as f:
# Extract Tally data from the file
data = f['tallies/tally {}/results'.format(self.id)]
sum_ = data[:, :, 0]
sum_sq = data[:, :, 1]
# Reshape the results arrays
sum_ = np.reshape(sum_, self.shape)
sum_sq = np.reshape(sum_sq, self.shape)
# Set the data for this Tally
self._sum = sum_
self._sum_sq = sum_sq
# Convert NumPy arrays to SciPy sparse LIL matrices
if self.sparse:
self._sum = sps.lil_matrix(self._sum.flatten(), self._sum.shape)
self._sum_sq = sps.lil_matrix(self._sum_sq.flatten(), self._sum_sq.shape)
# Indicate that Tally results have been read
self._results_read = True
@property
def sum(self):
if not self._sp_filename or self.derived:
return None
# Make sure results have been read
self._read_results()
if self.sparse:
return np.reshape(self._sum.toarray(), self.shape)
else:
return self._sum
@property
def sum_sq(self):
if not self._sp_filename or self.derived:
return None
# Make sure results have been read
self._read_results()
if self.sparse:
return np.reshape(self._sum_sq.toarray(), self.shape)
else:
return self._sum_sq
@property
def mean(self):
if self._mean is None:
if not self._sp_filename:
return None
self._mean = self.sum / self.num_realizations
# Convert NumPy array to SciPy sparse LIL matrix
if self.sparse:
self._mean = sps.lil_matrix(self._mean.flatten(),
self._mean.shape)
if self.sparse:
return np.reshape(self._mean.toarray(), self.shape)
else:
return self._mean
@property
def std_dev(self):
if self._std_dev is None:
if not self._sp_filename:
return None
n = self.num_realizations
nonzero = np.abs(self.mean) > 0
self._std_dev = np.zeros_like(self.mean)
self._std_dev[nonzero] = np.sqrt((self.sum_sq[nonzero]/n -
self.mean[nonzero]**2)/(n - 1))
# Convert NumPy array to SciPy sparse LIL matrix
if self.sparse:
self._std_dev = sps.lil_matrix(self._std_dev.flatten(),
self._std_dev.shape)
self.with_batch_statistics = True
if self.sparse:
return np.reshape(self._std_dev.toarray(), self.shape)
else:
return self._std_dev
@property
def with_batch_statistics(self):
return self._with_batch_statistics
@property
def derived(self):
return self._derived
@property
def derivative(self):
return self._derivative
@property
def sparse(self):
return self._sparse
@estimator.setter
def estimator(self, estimator):
cv.check_value('estimator', estimator, ESTIMATOR_TYPES)
self._estimator = estimator
@triggers.setter
def triggers(self, triggers):
cv.check_type('tally triggers', triggers, MutableSequence)
self._triggers = cv.CheckedList(openmc.Trigger, 'tally triggers',
triggers)
@name.setter
def name(self, name):
cv.check_type('tally name', name, str, none_ok=True)
self._name = name
@derivative.setter
def derivative(self, deriv):
cv.check_type('tally derivative', deriv, openmc.TallyDerivative,
none_ok=True)
self._derivative = deriv
@filters.setter
def filters(self, filters):
cv.check_type('tally filters', filters, MutableSequence)
# If the filter is already in the Tally, raise an error
visited_filters = set()
for f in filters:
if f in visited_filters:
msg = 'Unable to add a duplicate filter "{}" to Tally ID="{}" ' \
'since duplicate filters are not supported in the OpenMC ' \
'Python API'.format(f, self.id)
raise ValueError(msg)
visited_filters.add(f)
self._filters = cv.CheckedList(_FILTER_CLASSES, 'tally filters', filters)
@nuclides.setter
def nuclides(self, nuclides):
cv.check_type('tally nuclides', nuclides, MutableSequence)
# If the nuclide is already in the Tally, raise an error
visited_nuclides = set()
for nuc in nuclides:
if nuc in visited_nuclides:
msg = 'Unable to add a duplicate nuclide "{}" to Tally ID="{}" ' \
'since duplicate nuclides are not supported in the OpenMC ' \
'Python API'.format(nuc, self.id)
raise ValueError(msg)
visited_nuclides.add(nuc)
self._nuclides = cv.CheckedList(_NUCLIDE_CLASSES, 'tally nuclides',
nuclides)
@scores.setter
def scores(self, scores):
cv.check_type('tally scores', scores, MutableSequence)
visited_scores = set()
for i, score in enumerate(scores):
# If the score is already in the Tally, raise an error
if score in visited_scores:
msg = 'Unable to add a duplicate score "{}" to Tally ID="{}" ' \
'since duplicate scores are not supported in the OpenMC ' \
'Python API'.format(score, self.id)
raise ValueError(msg)
visited_scores.add(score)
# If score is a string, strip whitespace
if isinstance(score, str):
# Check to see if scores are deprecated before storing
for deprecated in ['scatter-', 'nu-scatter-', 'scatter-p',
'nu-scatter-p', 'scatter-y', 'nu-scatter-y',
'flux-y', 'total-y']:
if score.strip().startswith(deprecated):
msg = score.strip() + ' is no longer supported.'
raise ValueError(msg)
scores[i] = score.strip()
self._scores = cv.CheckedList(_SCORE_CLASSES, 'tally scores', scores)
@num_realizations.setter
def num_realizations(self, num_realizations):
cv.check_type('number of realizations', num_realizations, Integral)
cv.check_greater_than('number of realizations', num_realizations, 0, True)
self._num_realizations = num_realizations
@with_summary.setter
def with_summary(self, with_summary):
cv.check_type('with_summary', with_summary, bool)
self._with_summary = with_summary
@with_batch_statistics.setter
def with_batch_statistics(self, with_batch_statistics):
cv.check_type('with_batch_statistics', with_batch_statistics, bool)
self._with_batch_statistics = with_batch_statistics
@sum.setter
def sum(self, sum):
cv.check_type('sum', sum, Iterable)
self._sum = sum
@sum_sq.setter
def sum_sq(self, sum_sq):
cv.check_type('sum_sq', sum_sq, Iterable)
self._sum_sq = sum_sq
@sparse.setter
def sparse(self, sparse):
"""Convert tally data from NumPy arrays to SciPy list of lists (LIL)
sparse matrices, and vice versa.
This property may be used to reduce the amount of data in memory during
tally data processing. The tally data will be stored as SciPy LIL
matrices internally within the Tally object. All tally data access
properties and methods will return data as a dense NumPy array.
"""
cv.check_type('sparse', sparse, bool)
# Convert NumPy arrays to SciPy sparse LIL matrices
if sparse and not self.sparse:
if self._sum is not None:
self._sum = sps.lil_matrix(self._sum.flatten(), self._sum.shape)
if self._sum_sq is not None:
self._sum_sq = sps.lil_matrix(self._sum_sq.flatten(),
self._sum_sq.shape)
if self._mean is not None:
self._mean = sps.lil_matrix(self._mean.flatten(),
self._mean.shape)
if self._std_dev is not None:
self._std_dev = sps.lil_matrix(self._std_dev.flatten(),
self._std_dev.shape)
self._sparse = True
# Convert SciPy sparse LIL matrices to NumPy arrays
elif not sparse and self.sparse:
if self._sum is not None:
self._sum = np.reshape(self._sum.toarray(), self.shape)
if self._sum_sq is not None:
self._sum_sq = np.reshape(self._sum_sq.toarray(), self.shape)
if self._mean is not None:
self._mean = np.reshape(self._mean.toarray(), self.shape)
if self._std_dev is not None:
self._std_dev = np.reshape(self._std_dev.toarray(), self.shape)
self._sparse = False
def remove_score(self, score):
"""Remove a score from the tally
Parameters
----------
score : str
Score to remove
"""
if score not in self.scores:
msg = 'Unable to remove score "{}" from Tally ID="{}" since ' \
'the Tally does not contain this score'.format(score, self.id)
raise ValueError(msg)
self._scores.remove(score)
def remove_filter(self, old_filter):
"""Remove a filter from the tally
Parameters
----------
old_filter : openmc.Filter
Filter to remove
"""
if old_filter not in self.filters:
msg = 'Unable to remove filter "{}" from Tally ID="{}" since the ' \
'Tally does not contain this filter'.format(old_filter, self.id)
raise ValueError(msg)
self._filters.remove(old_filter)
def remove_nuclide(self, nuclide):
"""Remove a nuclide from the tally
Parameters
----------
nuclide : openmc.Nuclide
Nuclide to remove
"""
if nuclide not in self.nuclides:
msg = 'Unable to remove nuclide "{}" from Tally ID="{}" since the ' \
'Tally does not contain this nuclide'.format(nuclide, self.id)
raise ValueError(msg)
self._nuclides.remove(nuclide)
def _can_merge_filters(self, other):
"""Determine if another tally's filters can be merged with this one's
The types of filters between the two tallies must match identically.
The bins in all of the filters must match identically, or be mergeable
in only one filter. This is a helper method for the can_merge(...)
and merge(...) methods.
Parameters
----------
other : openmc.Tally
Tally to check for mergeable filters
"""
# Two tallies must have the same number of filters
if len(self.filters) != len(other.filters):
return False
# Return False if only one tally has a delayed group filter
tally1_dg = self.contains_filter(openmc.DelayedGroupFilter)
tally2_dg = other.contains_filter(openmc.DelayedGroupFilter)
if tally1_dg != tally2_dg:
return False
# Look to see if all filters are the same, or one or more can be merged
for filter1 in self.filters:
mergeable = False
for filter2 in other.filters:
if filter1 == filter2 or filter1.can_merge(filter2):
mergeable = True
break
# If no mergeable filter was found, the tallies are not mergeable
if not mergeable:
return False
# Tally filters are mergeable if all conditional checks passed
return True
def _can_merge_nuclides(self, other):
"""Determine if another tally's nuclides can be merged with this one's
The nuclides between the two tallies must be mutually exclusive or
identically matching. This is a helper method for the can_merge(...)
and merge(...) methods.
Parameters
----------
other : openmc.Tally
Tally to check for mergeable nuclides
"""
no_nuclides_match = True
all_nuclides_match = True
# Search for each of this tally's nuclides in the other tally
for nuclide in self.nuclides:
if nuclide not in other.nuclides:
all_nuclides_match = False
else:
no_nuclides_match = False
# Search for each of the other tally's nuclides in this tally
for nuclide in other.nuclides:
if nuclide not in self.nuclides:
all_nuclides_match = False
else:
no_nuclides_match = False
# Either all nuclides should match, or none should
return no_nuclides_match or all_nuclides_match
def _can_merge_scores(self, other):
"""Determine if another tally's scores can be merged with this one's
The scores between the two tallies must be mutually exclusive or
identically matching. This is a helper method for the can_merge(...)
and merge(...) methods.
Parameters
----------
other : openmc.Tally
Tally to check for mergeable scores
"""
no_scores_match = True
all_scores_match = True
# Search for each of this tally's scores in the other tally
for score in self.scores:
if score in other.scores:
no_scores_match = False
# Search for each of the other tally's scores in this tally
for score in other.scores:
if score not in self.scores:
all_scores_match = False
else:
no_scores_match = False
if score == 'current' and score not in self.scores:
return False
# Nuclides cannot be specified on 'flux' scores
if 'flux' in self.scores or 'flux' in other.scores:
if self.nuclides != other.nuclides:
return False
# Either all scores should match, or none should
return no_scores_match or all_scores_match
def can_merge(self, other):
"""Determine if another tally can be merged with this one
If results have been loaded from a statepoint, then tallies are only
mergeable along one and only one of filter bins, nuclides or scores.
Parameters
----------
other : openmc.Tally
Tally to check for merging
"""
if not isinstance(other, Tally):
return False
# Must have same estimator
if self.estimator != other.estimator:
return False
equal_filters = sorted(self.filters) == sorted(other.filters)
equal_nuclides = sorted(self.nuclides) == sorted(other.nuclides)
equal_scores = sorted(self.scores) == sorted(other.scores)
equality = [equal_filters, equal_nuclides, equal_scores]
# If all filters, nuclides and scores match then tallies are mergeable
if all(equality):
return True
# Variables to indicate filter bins, nuclides, and scores that can be merged
can_merge_filters = self._can_merge_filters(other)
can_merge_nuclides = self._can_merge_nuclides(other)
can_merge_scores = self._can_merge_scores(other)
mergeability = [can_merge_filters, can_merge_nuclides, can_merge_scores]
if not all(mergeability):
return False
# If the tally results have been read from the statepoint, at least two
# of filters, nuclides and scores must match
else:
return not self._results_read or sum(equality) >= 2
def merge(self, other):
"""Merge another tally with this one
If results have been loaded from a statepoint, then tallies are only
mergeable along one and only one of filter bins, nuclides or scores.
Parameters
----------
other : openmc.Tally
Tally to merge with this one
Returns
-------
merged_tally : openmc.Tally
Merged tallies
"""
if not self.can_merge(other):
msg = 'Unable to merge tally ID="{}" with "{}"'.format(
other.id, self.id)
raise ValueError(msg)
# Create deep copy of tally to return as merged tally
merged_tally = copy.deepcopy(self)
# Differentiate Tally with a new auto-generated Tally ID
merged_tally.id = None
# Create deep copy of other tally to use for array concatenation
other_copy = copy.deepcopy(other)
# Identify if filters, nuclides and scores are mergeable and/or equal
merge_filters = self._can_merge_filters(other)
merge_nuclides = self._can_merge_nuclides(other)
merge_scores = self._can_merge_scores(other)
equal_filters = sorted(self.filters) == sorted(other.filters)
equal_nuclides = sorted(self.nuclides) == sorted(other.nuclides)
equal_scores = sorted(self.scores) == sorted(other.scores)
# If two tallies can be merged along a filter's bins
if merge_filters and not equal_filters:
# Search for mergeable filters
for i, filter1 in enumerate(self.filters):
for filter2 in other.filters:
if filter1 != filter2 and filter1.can_merge(filter2):
other_copy._swap_filters(other_copy.filters[i], filter2)
merged_tally.filters[i] = filter1.merge(filter2)
join_right = filter1 < filter2
merge_axis = i
break
# If two tallies can be merged along nuclide bins
if merge_nuclides and not equal_nuclides:
merge_axis = self.num_filters
join_right = True
# Add unique nuclides from other tally to merged tally
for nuclide in other.nuclides:
if nuclide not in merged_tally.nuclides:
merged_tally.nuclides.append(nuclide)
# If two tallies can be merged along score bins
if merge_scores and not equal_scores:
merge_axis = self.num_filters + 1
join_right = True
# Add unique scores from other tally to merged tally
for score in other.scores:
if score not in merged_tally.scores:
merged_tally.scores.append(score)
# Add triggers from other tally to merged tally
for trigger in other.triggers:
merged_tally.triggers.append(trigger)
# If results have not been read, then return tally for input generation
if self._results_read is None:
return merged_tally
# Otherwise, this is a derived tally which needs merged results arrays
else:
self._derived = True
# Concatenate sum arrays if present in both tallies
if self.sum is not None and other_copy.sum is not None:
self_sum = self.get_reshaped_data(value='sum')
other_sum = other_copy.get_reshaped_data(value='sum')
if join_right:
merged_sum = np.concatenate((self_sum, other_sum),
axis=merge_axis)
else:
merged_sum = np.concatenate((other_sum, self_sum),
axis=merge_axis)
merged_tally._sum = np.reshape(merged_sum, merged_tally.shape)
# Concatenate sum_sq arrays if present in both tallies
if self.sum_sq is not None and other.sum_sq is not None:
self_sum_sq = self.get_reshaped_data(value='sum_sq')
other_sum_sq = other_copy.get_reshaped_data(value='sum_sq')
if join_right:
merged_sum_sq = np.concatenate((self_sum_sq, other_sum_sq),
axis=merge_axis)
else:
merged_sum_sq = np.concatenate((other_sum_sq, self_sum_sq),
axis=merge_axis)
merged_tally._sum_sq = np.reshape(merged_sum_sq, merged_tally.shape)
# Concatenate mean arrays if present in both tallies
if self.mean is not None and other.mean is not None:
self_mean = self.get_reshaped_data(value='mean')
other_mean = other_copy.get_reshaped_data(value='mean')
if join_right:
merged_mean = np.concatenate((self_mean, other_mean),
axis=merge_axis)
else:
merged_mean = np.concatenate((other_mean, self_mean),
axis=merge_axis)
merged_tally._mean = np.reshape(merged_mean, merged_tally.shape)
# Concatenate std. dev. arrays if present in both tallies
if self.std_dev is not None and other.std_dev is not None:
self_std_dev = self.get_reshaped_data(value='std_dev')
other_std_dev = other_copy.get_reshaped_data(value='std_dev')
if join_right:
merged_std_dev = np.concatenate((self_std_dev, other_std_dev),
axis=merge_axis)
else:
merged_std_dev = np.concatenate((other_std_dev, self_std_dev),
axis=merge_axis)
merged_tally._std_dev = np.reshape(merged_std_dev, merged_tally.shape)
# Sparsify merged tally if both tallies are sparse
merged_tally.sparse = self.sparse and other.sparse
return merged_tally
def to_xml_element(self):
"""Return XML representation of the tally
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing tally data
"""
element = ET.Element("tally")
# Tally ID
element.set("id", str(self.id))
# Optional Tally name
if self.name != '':
element.set("name", self.name)
# Optional Tally filters
if len(self.filters) > 0:
subelement = ET.SubElement(element, "filters")
subelement.text = ' '.join(str(f.id) for f in self.filters)
# Optional Nuclides
if self.nuclides:
subelement = ET.SubElement(element, "nuclides")
subelement.text = ' '.join(str(n) for n in self.nuclides)
# Scores
if len(self.scores) == 0:
msg = 'Unable to get XML for Tally ID="{}" since it does not ' \
'contain any scores'.format(self.id)
raise ValueError(msg)
else:
scores = ''
for score in self.scores:
scores += '{} '.format(score)
subelement = ET.SubElement(element, "scores")
subelement.text = scores.rstrip(' ')
# Tally estimator type
if self.estimator is not None:
subelement = ET.SubElement(element, "estimator")
subelement.text = self.estimator
# Optional Triggers
for trigger in self.triggers:
trigger.get_trigger_xml(element)
# Optional derivatives
if self.derivative is not None:
subelement = ET.SubElement(element, "derivative")
subelement.text = str(self.derivative.id)
return element
def contains_filter(self, filter_type):
"""Looks for a filter in the tally that matches a specified type
Parameters
----------
filter_type : openmc.FilterMeta
Type of the filter, e.g. MeshFilter
Returns
-------
filter_found : bool
True if the tally contains a filter of the requested type;
otherwise false
"""
for test_filter in self.filters:
if type(test_filter) is filter_type:
return True
return False
def find_filter(self, filter_type):
"""Return a filter in the tally that matches a specified type
Parameters
----------
filter_type : openmc.FilterMeta
Type of the filter, e.g. MeshFilter
Returns
-------
filter_found : openmc.Filter
Filter from this tally with matching type, or None if no matching
Filter is found
Raises
------
ValueError
If no matching Filter is found
"""
# Look through all of this Tally's Filters for the type requested
for test_filter in self.filters:
if type(test_filter) is filter_type:
return test_filter
# Also check to see if the desired filter is wrapped up in an
# aggregate
elif isinstance(test_filter, openmc.AggregateFilter):
if isinstance(test_filter.aggregate_filter, filter_type):
return test_filter
# If we did not find the Filter, throw an Exception
msg = 'Unable to find filter type "{}" in Tally ID="{}"'.format(
filter_type, self.id)
raise ValueError(msg)
def get_nuclide_index(self, nuclide):
"""Returns the index in the Tally's results array for a Nuclide bin
Parameters
----------
nuclide : str
The name of the Nuclide (e.g., 'H1', 'U238')
Returns
-------
nuclide_index : int
The index in the Tally data array for this nuclide.
Raises
------
KeyError
When the argument passed to the 'nuclide' parameter cannot be found
in the Tally.
"""
# Look for the user-requested nuclide in all of the Tally's Nuclides
for i, test_nuclide in enumerate(self.nuclides):
# If the Summary was linked, then values are Nuclide objects
if isinstance(test_nuclide, openmc.Nuclide):
if test_nuclide.name == nuclide:
return i
# If the Summary has not been linked, then values are ZAIDs
else:
if test_nuclide == nuclide:
return i
msg = ('Unable to get the nuclide index for Tally since "{}" '
'is not one of the nuclides'.format(nuclide))
raise KeyError(msg)
def get_score_index(self, score):
"""Returns the index in the Tally's results array for a score bin
Parameters
----------
score : str
The score string (e.g., 'absorption', 'nu-fission')
Returns
-------
score_index : int
The index in the Tally data array for this score.
Raises
------
ValueError
When the argument passed to the 'score' parameter cannot be found in
the Tally.
"""
try:
score_index = self.scores.index(score)
except ValueError:
msg = 'Unable to get the score index for Tally since "{}" ' \
'is not one of the scores'.format(score)
raise ValueError(msg)
return score_index
def get_filter_indices(self, filters=[], filter_bins=[]):
"""Get indices into the filter axis of this tally's data arrays.
This is a helper method for the Tally.get_values(...) method to
extract tally data. This method returns the indices into the filter
axis of the tally's data array (axis=0) for particular combinations
of filters and their corresponding bins.
Parameters
----------
filters : Iterable of openmc.FilterMeta
An iterable of filter types
(e.g., [MeshFilter, EnergyFilter]; default is [])
filter_bins : Iterable of tuple
A list of tuples of filter bins corresponding to the filter_types
parameter (e.g., [(1,), ((0., 0.625e-6),)]; default is []). Each
tuple contains bins for the corresponding filter type in the filters
parameter. Each bin is an integer ID for Material-, Surface-,
Cell-, Cellborn-, and Universe- Filters. Each bin is an integer
for the cell instance ID for DistribcellFilters. Each bin is a
2-tuple of floats for Energy- and Energyout- Filters corresponding
to the energy boundaries of the bin of interest. The bin is an
(x,y,z) 3-tuple for MeshFilters corresponding to the mesh cell
of interest. The order of the bins in the list must correspond to
the filter_types parameter.
Returns
-------
numpy.ndarray
A NumPy array of the filter indices
"""
cv.check_type('filters', filters, Iterable, openmc.FilterMeta)
cv.check_type('filter_bins', filter_bins, Iterable, tuple)
# If user did not specify any specific Filters, use them all
if not filters:
return np.arange(self.num_filter_bins)
# Initialize empty list of indices for each bin in each Filter
filter_indices = []
# Loop over all of the Tally's Filters
for i, self_filter in enumerate(self.filters):
# If a user-requested Filter, get the user-requested bins
for j, test_filter in enumerate(filters):
if type(self_filter) is test_filter:
bins = filter_bins[j]
break
else:
# If not a user-requested Filter, get all bins
if isinstance(self_filter, openmc.DistribcellFilter):
# Create list of cell instance IDs for distribcell Filters
bins = list(range(self_filter.num_bins))
elif isinstance(self_filter, openmc.EnergyFunctionFilter):
# EnergyFunctionFilters don't have bins so just add a None
bins = [None]
else:
# Create list of IDs for bins for all other filter types
bins = self_filter.bins
# Add indices for each bin in this Filter to the list
indices = np.array([self_filter.get_bin_index(b) for b in bins])
filter_indices.append(indices)
# Account for stride in each of the previous filters
for indices in filter_indices[:i]:
indices *= self_filter.num_bins
# Apply outer product sum between all filter bin indices
return list(map(sum, product(*filter_indices)))
def get_nuclide_indices(self, nuclides):
"""Get indices into the nuclide axis of this tally's data arrays.
This is a helper method for the Tally.get_values(...) method to
extract tally data. This method returns the indices into the nuclide
axis of the tally's data array (axis=1) for one or more nuclides.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
Returns
-------
numpy.ndarray
A NumPy array of the nuclide indices
"""
cv.check_iterable_type('nuclides', nuclides, str)
# If user did not specify any specific Nuclides, use them all
if not nuclides:
return np.arange(self.num_nuclides)
# Determine the score indices from any of the requested scores
nuclide_indices = np.zeros_like(nuclides, dtype=int)
for i, nuclide in enumerate(nuclides):
nuclide_indices[i] = self.get_nuclide_index(nuclide)
return nuclide_indices
def get_score_indices(self, scores):
"""Get indices into the score axis of this tally's data arrays.
This is a helper method for the Tally.get_values(...) method to
extract tally data. This method returns the indices into the score
axis of the tally's data array (axis=2) for one or more scores.
Parameters
----------
scores : list of str or openmc.CrossScore
A list of one or more score strings
(e.g., ['absorption', 'nu-fission']; default is [])
Returns
-------
numpy.ndarray
A NumPy array of the score indices
"""
for score in scores:
if not isinstance(score, (str, openmc.CrossScore)):
msg = 'Unable to get score indices for score "{}" in Tally ' \
'ID="{}" since it is not a string or CrossScore'\
.format(score, self.id)
raise ValueError(msg)
# Determine the score indices from any of the requested scores
if scores:
score_indices = np.zeros(len(scores), dtype=int)
for i, score in enumerate(scores):
score_indices[i] = self.get_score_index(score)
# If user did not specify any specific scores, use them all
else:
score_indices = np.arange(self.num_scores)
return score_indices
def get_values(self, scores=[], filters=[], filter_bins=[],
nuclides=[], value='mean'):
"""Returns one or more tallied values given a list of scores, filters,
filter bins and nuclides.
This method constructs a 3D NumPy array for the requested Tally data
indexed by filter bin, nuclide bin, and score index. The method will
order the data in the array as specified in the parameter lists.
Parameters
----------
scores : list of str
A list of one or more score strings
(e.g., ['absorption', 'nu-fission']; default is [])
filters : Iterable of openmc.FilterMeta
An iterable of filter types
(e.g., [MeshFilter, EnergyFilter]; default is [])
filter_bins : list of Iterables
A list of tuples of filter bins corresponding to the filter_types
parameter (e.g., [(1,), ((0., 0.625e-6),)]; default is []). Each
tuple contains bins for the corresponding filter type in the filters
parameter. Each bins is the integer ID for 'material', 'surface',
'cell', 'cellborn', and 'universe' Filters. Each bin is an integer
for the cell instance ID for 'distribcell' Filters. Each bin is a
2-tuple of floats for 'energy' and 'energyout' filters corresponding
to the energy boundaries of the bin of interest. The bin is an
(x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell
of interest. The order of the bins in the list must correspond to
the filter_types parameter.
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
value : str
A string for the type of value to return - 'mean' (default),
'std_dev', 'rel_err', 'sum', or 'sum_sq' are accepted
Returns
-------
float or numpy.ndarray
A scalar or NumPy array of the Tally data indexed in the order
each filter, nuclide and score is listed in the parameters.
Raises
------
ValueError
When this method is called before the Tally is populated with data,
or the input parameters do not correspond to the Tally's attributes,
e.g., if the score(s) do not match those in the Tally.
"""
# Ensure that the tally has data
if (value == 'mean' and self.mean is None) or \
(value == 'std_dev' and self.std_dev is None) or \
(value == 'rel_err' and self.mean is None) or \
(value == 'sum' and self.sum is None) or \
(value == 'sum_sq' and self.sum_sq is None):
msg = 'The Tally ID="{}" has no data to return'.format(self.id)
raise ValueError(msg)
# Get filter, nuclide and score indices
filter_indices = self.get_filter_indices(filters, filter_bins)
nuclide_indices = self.get_nuclide_indices(nuclides)
score_indices = self.get_score_indices(scores)
# Construct outer product of all three index types with each other
indices = np.ix_(filter_indices, nuclide_indices, score_indices)
# Return the desired result from Tally
if value == 'mean':
data = self.mean[indices]
elif value == 'std_dev':
data = self.std_dev[indices]
elif value == 'rel_err':
data = self.std_dev[indices] / self.mean[indices]
elif value == 'sum':
data = self.sum[indices]
elif value == 'sum_sq':
data = self.sum_sq[indices]
else:
msg = 'Unable to return results from Tally ID="{}" since the ' \
'the requested value "{}" is not \'mean\', \'std_dev\', ' \
'\'rel_err\', \'sum\', or \'sum_sq\''.format(self.id, value)
raise LookupError(msg)
return data
def get_pandas_dataframe(self, filters=True, nuclides=True, scores=True,
derivative=True, paths=True, float_format='{:.2e}'):
"""Build a Pandas DataFrame for the Tally data.
This method constructs a Pandas DataFrame object for the Tally data
with columns annotated by filter, nuclide and score bin information.
This capability has been tested for Pandas >=0.13.1. However, it is
recommended to use v0.16 or newer versions of Pandas since this method
uses the Multi-index Pandas feature.
Parameters
----------
filters : bool
Include columns with filter bin information (default is True).
nuclides : bool
Include columns with nuclide bin information (default is True).
scores : bool
Include columns with score bin information (default is True).
derivative : bool
Include columns with differential tally info (default is True).
paths : bool, optional
Construct columns for distribcell tally filters (default is True).
The geometric information in the Summary object is embedded into a
Multi-index column with a geometric "path" to each distribcell
instance.
float_format : str
All floats in the DataFrame will be formatted using the given
format string before printing.
Returns
-------
pandas.DataFrame
A Pandas DataFrame with each column annotated by filter, nuclide and
score bin information (if these parameters are True), and the mean
and standard deviation of the Tally's data.
Raises
------
KeyError
When this method is called before the Tally is populated with data
"""
# Ensure that the tally has data
if self.mean is None or self.std_dev is None:
msg = 'The Tally ID="{}" has no data to return'.format(self.id)
raise KeyError(msg)
# Initialize a pandas dataframe for the tally data
df = pd.DataFrame()
# Find the total length of the tally data array
data_size = self.mean.size
# Build DataFrame columns for filters if user requested them
if filters:
# Append each Filter's DataFrame to the overall DataFrame
for f, stride in zip(self.filters, self.filter_strides):
filter_df = f.get_pandas_dataframe(
data_size, stride, paths=paths)
df = pd.concat([df, filter_df], axis=1)
# Include DataFrame column for nuclides if user requested it
if nuclides:
nuclides = []
column_name = 'nuclide'
for nuclide in self.nuclides:
if isinstance(nuclide, openmc.Nuclide):
nuclides.append(nuclide.name)
elif isinstance(nuclide, openmc.AggregateNuclide):
nuclides.append(nuclide.name)
column_name = '{}(nuclide)'.format(nuclide.aggregate_op)
else:
nuclides.append(nuclide)
# Tile the nuclide bins into a DataFrame column
nuclides = np.repeat(nuclides, len(self.scores))
tile_factor = data_size / len(nuclides)
df[column_name] = np.tile(nuclides, int(tile_factor))
# Include column for scores if user requested it
if scores:
scores = []
column_name = 'score'
for score in self.scores:
if isinstance(score, (str, openmc.CrossScore)):
scores.append(str(score))
elif isinstance(score, openmc.AggregateScore):
scores.append(score.name)
column_name = '{}(score)'.format(score.aggregate_op)
tile_factor = data_size / len(self.scores)
df[column_name] = np.tile(scores, int(tile_factor))
# Include columns for derivatives if user requested it
if derivative and (self.derivative is not None):
df['d_variable'] = self.derivative.variable
if self.derivative.material is not None:
df['d_material'] = self.derivative.material
if self.derivative.nuclide is not None:
df['d_nuclide'] = self.derivative.nuclide
# Append columns with mean, std. dev. for each tally bin
df['mean'] = self.mean.ravel()
df['std. dev.'] = self.std_dev.ravel()
df = df.dropna(axis=1)
# Expand the columns into Pandas MultiIndices for readability
if pd.__version__ >= '0.16':
columns = copy.deepcopy(df.columns.values)
# Convert all elements in columns list to tuples
for i, column in enumerate(columns):
if not isinstance(column, tuple):
columns[i] = (column,)
# Make each tuple the same length
max_len_column = len(max(columns, key=len))
for i, column in enumerate(columns):
delta_len = max_len_column - len(column)
if delta_len > 0:
new_column = list(column)
new_column.extend(['']*delta_len)
columns[i] = tuple(new_column)
# Create and set a MultiIndex for the DataFrame's columns, but only
# if any column actually is multi-level (e.g., a mesh filter)
if any(len(c) > 1 for c in columns):
df.columns = pd.MultiIndex.from_tuples(columns)
# Modify the df.to_string method so that it prints formatted strings.
# Credit to http://stackoverflow.com/users/3657742/chrisb for this trick
df.to_string = partial(df.to_string, float_format=float_format.format)
return df
def get_reshaped_data(self, value='mean'):
"""Returns an array of tally data with one dimension per filter.
The tally data in OpenMC is stored as a 3D array with the dimensions
corresponding to filters, nuclides and scores. As a result, tally data
can be opaque for a user to directly index (i.e., without use of
:meth:`openmc.Tally.get_values`) since one must know how to properly use
the number of bins and strides for each filter to index into the first
(filter) dimension.
This builds and returns a reshaped version of the tally data array with
unique dimensions corresponding to each tally filter. For example,
suppose this tally has arrays of data with shape (8,5,5) corresponding
to two filters (2 and 4 bins, respectively), five nuclides and five
scores. This method will return a version of the data array with the
with a new shape of (2,4,5,5) such that the first two dimensions
correspond directly to the two filters with two and four bins.
Parameters
----------
value : str
A string for the type of value to return - 'mean' (default),
'std_dev', 'rel_err', 'sum', or 'sum_sq' are accepted
Returns
-------
numpy.ndarray
The tally data array indexed by filters, nuclides and scores.
"""
# Get the 3D array of data in filters, nuclides and scores
data = self.get_values(value=value)
# Build a new array shape with one dimension per filter
new_shape = tuple(f.num_bins for f in self.filters)
new_shape += (self.num_nuclides, self.num_scores)
# Reshape the data with one dimension for each filter
data = np.reshape(data, new_shape)
return data
def hybrid_product(self, other, binary_op, filter_product=None,
nuclide_product=None, score_product=None):
"""Combines filters, scores and nuclides with another tally.
This is a helper method for the tally arithmetic operator overloaded
methods. It is called a "hybrid product" because it performs a
combination of tensor (or Kronecker) and entrywise (or Hadamard)
products. The filters from both tallies are combined using an entrywise
(or Hadamard) product on matching filters. By default, if all nuclides
are identical in the two tallies, the entrywise product is performed
across nuclides; else the tensor product is performed. By default, if
all scores are identical in the two tallies, the entrywise product is
performed across scores; else the tensor product is performed. Users
can also call the method explicitly and specify the desired product.
Parameters
----------
other : openmc.Tally
The tally on the right hand side of the hybrid product
binary_op : {'+', '-', '*', '/', '^'}
The binary operation in the hybrid product
filter_product : {'tensor', 'entrywise' or None}
The type of product (tensor or entrywise) to be performed between
filter data. The default is the entrywise product. Currently only
the entrywise product is supported since a tally cannot contain
two of the same filter.
nuclide_product : {'tensor', 'entrywise' or None}
The type of product (tensor or entrywise) to be performed between
nuclide data. The default is the entrywise product if all nuclides
between the two tallies are the same; otherwise the default is
the tensor product.
score_product : {'tensor', 'entrywise' or None}
The type of product (tensor or entrywise) to be performed between
score data. The default is the entrywise product if all scores
between the two tallies are the same; otherwise the default is
the tensor product.
Returns
-------
openmc.Tally
A new Tally that is the hybrid product with this one.
Raises
------
ValueError
When this method is called before the other tally is populated
with data.
"""
# Set default value for filter product if it was not set
if filter_product is None:
filter_product = 'entrywise'
elif filter_product == 'tensor':
msg = 'Unable to perform Tally arithmetic with a tensor product' \
'for the filter data as this is not currently supported.'
raise ValueError(msg)
# Set default value for nuclide product if it was not set
if nuclide_product is None:
if self.nuclides == other.nuclides:
nuclide_product = 'entrywise'
else:
nuclide_product = 'tensor'
# Set default value for score product if it was not set
if score_product is None:
if self.scores == other.scores:
score_product = 'entrywise'
else:
score_product = 'tensor'
# Check product types
cv.check_value('filter product', filter_product, _PRODUCT_TYPES)
cv.check_value('nuclide product', nuclide_product, _PRODUCT_TYPES)
cv.check_value('score product', score_product, _PRODUCT_TYPES)
# Check that results have been read
if not other.derived and other.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(other.id)
raise ValueError(msg)
new_tally = Tally()
new_tally._derived = True
new_tally.with_batch_statistics = True
new_tally._num_realizations = self.num_realizations
new_tally._estimator = self.estimator
new_tally._with_summary = self.with_summary
new_tally._sp_filename = self._sp_filename
# Construct a combined derived name from the two tally operands
if self.name != '' and other.name != '':
new_name = '({} {} {})'.format(self.name, binary_op, other.name)
new_tally.name = new_name
# Query the mean and std dev so the tally data is read in from file
# if it has not already been read in.
self.mean, self.std_dev, other.mean, other.std_dev
# Create copies of self and other tallies to rearrange for tally
# arithmetic
self_copy = copy.deepcopy(self)
other_copy = copy.deepcopy(other)
self_copy.sparse = False
other_copy.sparse = False
# Align the tally data based on desired hybrid product
data = self_copy._align_tally_data(other_copy, filter_product,
nuclide_product, score_product)
# Perform tally arithmetic operation
if binary_op == '+':
new_tally._mean = data['self']['mean'] + data['other']['mean']
new_tally._std_dev = np.sqrt(data['self']['std. dev.']**2 +
data['other']['std. dev.']**2)
elif binary_op == '-':
new_tally._mean = data['self']['mean'] - data['other']['mean']
new_tally._std_dev = np.sqrt(data['self']['std. dev.']**2 +
data['other']['std. dev.']**2)
elif binary_op == '*':
with np.errstate(divide='ignore', invalid='ignore'):
self_rel_err = data['self']['std. dev.'] / data['self']['mean']
other_rel_err = data['other']['std. dev.'] / data['other']['mean']
new_tally._mean = data['self']['mean'] * data['other']['mean']
new_tally._std_dev = np.abs(new_tally.mean) * \
np.sqrt(self_rel_err**2 + other_rel_err**2)
elif binary_op == '/':
with np.errstate(divide='ignore', invalid='ignore'):
self_rel_err = data['self']['std. dev.'] / data['self']['mean']
other_rel_err = data['other']['std. dev.'] / data['other']['mean']
new_tally._mean = data['self']['mean'] / data['other']['mean']
new_tally._std_dev = np.abs(new_tally.mean) * \
np.sqrt(self_rel_err**2 + other_rel_err**2)
elif binary_op == '^':
with np.errstate(divide='ignore', invalid='ignore'):
mean_ratio = data['other']['mean'] / data['self']['mean']
first_term = mean_ratio * data['self']['std. dev.']
second_term = \
np.log(data['self']['mean']) * data['other']['std. dev.']
new_tally._mean = data['self']['mean'] ** data['other']['mean']
new_tally._std_dev = np.abs(new_tally.mean) * \
np.sqrt(first_term**2 + second_term**2)
# Convert any infs and nans to zero
new_tally._mean[np.isinf(new_tally._mean)] = 0
new_tally._mean = np.nan_to_num(new_tally._mean)
new_tally._std_dev[np.isinf(new_tally._std_dev)] = 0
new_tally._std_dev = np.nan_to_num(new_tally._std_dev)
# Set tally attributes
if self_copy.estimator == other_copy.estimator:
new_tally.estimator = self_copy.estimator
if self_copy.with_summary and other_copy.with_summary:
new_tally.with_summary = self_copy.with_summary
if self_copy.num_realizations == other_copy.num_realizations:
new_tally.num_realizations = self_copy.num_realizations
# Add filters to the new tally
if filter_product == 'entrywise':
for self_filter in self_copy.filters:
new_tally.filters.append(self_filter)
else:
all_filters = [self_copy.filters, other_copy.filters]
for self_filter, other_filter in product(*all_filters):
new_filter = openmc.CrossFilter(self_filter, other_filter,
binary_op)
new_tally.filters.append(new_filter)
# Add nuclides to the new tally
if nuclide_product == 'entrywise':
for self_nuclide in self_copy.nuclides:
new_tally.nuclides.append(self_nuclide)
else:
all_nuclides = [self_copy.nuclides, other_copy.nuclides]
for self_nuclide, other_nuclide in product(*all_nuclides):
new_nuclide = openmc.CrossNuclide(self_nuclide, other_nuclide,
binary_op)
new_tally.nuclides.append(new_nuclide)
# Define helper function that handles score units appropriately
# depending on the binary operator
def cross_score(score1, score2, binary_op):
if binary_op == '+' or binary_op == '-':
if score1 == score2:
return score1
else:
return openmc.CrossScore(score1, score2, binary_op)
else:
return openmc.CrossScore(score1, score2, binary_op)
# Add scores to the new tally
if score_product == 'entrywise':
for self_score in self_copy.scores:
new_score = cross_score(self_score, self_score, binary_op)
new_tally.scores.append(new_score)
else:
all_scores = [self_copy.scores, other_copy.scores]
for self_score, other_score in product(*all_scores):
new_score = cross_score(self_score, other_score, binary_op)
new_tally.scores.append(new_score)
return new_tally
@property
def filter_strides(self):
all_strides = []
stride = self.num_nuclides * self.num_scores
for self_filter in reversed(self.filters):
all_strides.append(stride)
stride *= self_filter.num_bins
return all_strides[::-1]
def _align_tally_data(self, other, filter_product, nuclide_product,
score_product):
"""Aligns data from two tallies for tally arithmetic.
This is a helper method to construct a dict of dicts of the "aligned"
data arrays from each tally for tally arithmetic. The method analyzes
the filters, scores and nuclides in both tallies and determines how to
appropriately align the data for vectorized arithmetic. For example,
if the two tallies have different filters, this method will use NumPy
'tile' and 'repeat' operations to the new data arrays such that all
possible combinations of the data in each tally's bins will be made
when the arithmetic operation is applied to the arrays.
Parameters
----------
other : openmc.Tally
The tally to outer product with this tally
filter_product : {'entrywise'}
The type of product to be performed between filter data. Currently,
only the entrywise product is supported for the filter product.
nuclide_product : {'tensor', 'entrywise'}
The type of product (tensor or entrywise) to be performed between
nuclide data.
score_product : {'tensor', 'entrywise'}
The type of product (tensor or entrywise) to be performed between
score data.
Returns
-------
dict
A dictionary of dictionaries to "aligned" 'mean' and 'std. dev'
NumPy arrays for each tally's data.
"""
# Get the set of filters that each tally is missing
other_missing_filters = set(self.filters) - set(other.filters)
self_missing_filters = set(other.filters) - set(self.filters)
# Add filters present in self but not in other to other
for other_filter in other_missing_filters:
filter_copy = copy.deepcopy(other_filter)
other._mean = np.repeat(other.mean, filter_copy.num_bins, axis=0)
other._std_dev = np.repeat(other.std_dev, filter_copy.num_bins, axis=0)
other.filters.append(filter_copy)
# Add filters present in other but not in self to self
for self_filter in self_missing_filters:
filter_copy = copy.deepcopy(self_filter)
self._mean = np.repeat(self.mean, filter_copy.num_bins, axis=0)
self._std_dev = np.repeat(self.std_dev, filter_copy.num_bins, axis=0)
self.filters.append(filter_copy)
# Align other filters with self filters
for i, self_filter in enumerate(self.filters):
other_index = other.filters.index(self_filter)
# If necessary, swap other filter
if other_index != i:
other._swap_filters(self_filter, other.filters[i])
# Repeat and tile the data by nuclide in preparation for performing
# the tensor product across nuclides.
if nuclide_product == 'tensor':
self._mean = np.repeat(self.mean, other.num_nuclides, axis=1)
self._std_dev = np.repeat(self.std_dev, other.num_nuclides, axis=1)
other._mean = np.tile(other.mean, (1, self.num_nuclides, 1))
other._std_dev = np.tile(other.std_dev, (1, self.num_nuclides, 1))
# Add nuclides to each tally such that each tally contains the complete
# set of nuclides necessary to perform an entrywise product. New
# nuclides added to a tally will have all their scores set to zero.
else:
# Get the set of nuclides that each tally is missing
other_missing_nuclides = set(self.nuclides) - set(other.nuclides)
self_missing_nuclides = set(other.nuclides) - set(self.nuclides)
# Add nuclides present in self but not in other to other
for nuclide in other_missing_nuclides:
other._mean = np.insert(other.mean, other.num_nuclides, 0, axis=1)
other._std_dev = np.insert(other.std_dev, other.num_nuclides, 0,
axis=1)
other.nuclides.append(nuclide)
# Add nuclides present in other but not in self to self
for nuclide in self_missing_nuclides:
self._mean = np.insert(self.mean, self.num_nuclides, 0, axis=1)
self._std_dev = np.insert(self.std_dev, self.num_nuclides, 0,
axis=1)
self.nuclides.append(nuclide)
# Align other nuclides with self nuclides
for i, nuclide in enumerate(self.nuclides):
other_index = other.get_nuclide_index(nuclide)
# If necessary, swap other nuclide
if other_index != i:
other._swap_nuclides(nuclide, other.nuclides[i])
# Repeat and tile the data by score in preparation for performing
# the tensor product across scores.
if score_product == 'tensor':
self._mean = np.repeat(self.mean, other.num_scores, axis=2)
self._std_dev = np.repeat(self.std_dev, other.num_scores, axis=2)
other._mean = np.tile(other.mean, (1, 1, self.num_scores))
other._std_dev = np.tile(other.std_dev, (1, 1, self.num_scores))
# Add scores to each tally such that each tally contains the complete set
# of scores necessary to perform an entrywise product. New scores added
# to a tally will be set to zero.
else:
# Get the set of scores that each tally is missing
other_missing_scores = set(self.scores) - set(other.scores)
self_missing_scores = set(other.scores) - set(self.scores)
# Add scores present in self but not in other to other
for score in other_missing_scores:
other._mean = np.insert(other.mean, other.num_scores, 0, axis=2)
other._std_dev = np.insert(other.std_dev, other.num_scores, 0, axis=2)
other.scores.append(score)
# Add scores present in other but not in self to self
for score in self_missing_scores:
self._mean = np.insert(self.mean, self.num_scores, 0, axis=2)
self._std_dev = np.insert(self.std_dev, self.num_scores, 0, axis=2)
self.scores.append(score)
# Align other scores with self scores
for i, score in enumerate(self.scores):
other_index = other.scores.index(score)
# If necessary, swap other score
if other_index != i:
other._swap_scores(score, other.scores[i])
data = {}
data['self'] = {}
data['other'] = {}
data['self']['mean'] = self.mean
data['other']['mean'] = other.mean
data['self']['std. dev.'] = self.std_dev
data['other']['std. dev.'] = other.std_dev
return data
def _swap_filters(self, filter1, filter2):
"""Reverse the ordering of two filters in this tally
This is a helper method for tally arithmetic which helps align the data
in two tallies with shared filters. This method reverses the order of
the two filters in place.
Parameters
----------
filter1 : Filter
The filter to swap with filter2
filter2 : Filter
The filter to swap with filter1
Raises
------
ValueError
If this is a derived tally or this method is called before the tally
is populated with data.
"""
cv.check_type('filter1', filter1, _FILTER_CLASSES)
cv.check_type('filter2', filter2, _FILTER_CLASSES)
# Check that the filters exist in the tally and are not the same
if filter1 == filter2:
return
elif filter1 not in self.filters:
msg = 'Unable to swap "{}" filter1 in Tally ID="{}" since it ' \
'does not contain such a filter'.format(filter1.type, self.id)
raise ValueError(msg)
elif filter2 not in self.filters:
msg = 'Unable to swap "{}" filter2 in Tally ID="{}" since it ' \
'does not contain such a filter'.format(filter2.type, self.id)
raise ValueError(msg)
# Construct lists of tuples for the bins in each of the two filters
filters = [type(filter1), type(filter2)]
if isinstance(filter1, openmc.DistribcellFilter):
filter1_bins = [b for b in range(filter1.num_bins)]
elif isinstance(filter1, openmc.EnergyFunctionFilter):
filter1_bins = [None]
else:
filter1_bins = filter1.bins
if isinstance(filter2, openmc.DistribcellFilter):
filter2_bins = [b for b in range(filter2.num_bins)]
elif isinstance(filter2, openmc.EnergyFunctionFilter):
filter2_bins = [None]
else:
filter2_bins = filter2.bins
# Create variables to store views of data in the misaligned structure
mean = {}
std_dev = {}
# Store the data from the misaligned structure
for i, (bin1, bin2) in enumerate(product(filter1_bins, filter2_bins)):
filter_bins = [(bin1,), (bin2,)]
if self.mean is not None:
mean[i] = self.get_values(
filters=filters, filter_bins=filter_bins, value='mean')
if self.std_dev is not None:
std_dev[i] = self.get_values(
filters=filters, filter_bins=filter_bins, value='std_dev')
# Swap the filters in the copied version of this Tally
filter1_index = self.filters.index(filter1)
filter2_index = self.filters.index(filter2)
self.filters[filter1_index] = filter2
self.filters[filter2_index] = filter1
# Realign the data
for i, (bin1, bin2) in enumerate(product(filter1_bins, filter2_bins)):
filter_bins = [(bin1,), (bin2,)]
indices = self.get_filter_indices(filters, filter_bins)
if self.mean is not None:
self.mean[indices, :, :] = mean[i]
if self.std_dev is not None:
self.std_dev[indices, :, :] = std_dev[i]
def _swap_nuclides(self, nuclide1, nuclide2):
"""Reverse the ordering of two nuclides in this tally
This is a helper method for tally arithmetic which helps align the data
in two tallies with shared nuclides. This method reverses the order of
the two nuclides in place.
Parameters
----------
nuclide1 : Nuclide
The nuclide to swap with nuclide2
nuclide2 : Nuclide
The nuclide to swap with nuclide1
Raises
------
ValueError
If this is a derived tally or this method is called before the tally
is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
cv.check_type('nuclide1', nuclide1, _NUCLIDE_CLASSES)
cv.check_type('nuclide2', nuclide2, _NUCLIDE_CLASSES)
# Check that the nuclides exist in the tally and are not the same
if nuclide1 == nuclide2:
msg = 'Unable to swap a nuclide with itself'
raise ValueError(msg)
elif nuclide1 not in self.nuclides:
msg = 'Unable to swap nuclide1 "{}" in Tally ID="{}" since it ' \
'does not contain such a nuclide'\
.format(nuclide1.name, self.id)
raise ValueError(msg)
elif nuclide2 not in self.nuclides:
msg = 'Unable to swap "{}" nuclide2 in Tally ID="{}" since it ' \
'does not contain such a nuclide'\
.format(nuclide2.name, self.id)
raise ValueError(msg)
# Swap the nuclides in the Tally
nuclide1_index = self.get_nuclide_index(nuclide1)
nuclide2_index = self.get_nuclide_index(nuclide2)
self.nuclides[nuclide1_index] = nuclide2
self.nuclides[nuclide2_index] = nuclide1
# Adjust the mean data array to relect the new nuclide order
if self.mean is not None:
nuclide1_mean = self.mean[:, nuclide1_index, :].copy()
nuclide2_mean = self.mean[:, nuclide2_index, :].copy()
self.mean[:, nuclide2_index, :] = nuclide1_mean
self.mean[:, nuclide1_index, :] = nuclide2_mean
# Adjust the std_dev data array to relect the new nuclide order
if self.std_dev is not None:
nuclide1_std_dev = self.std_dev[:, nuclide1_index, :].copy()
nuclide2_std_dev = self.std_dev[:, nuclide2_index, :].copy()
self.std_dev[:, nuclide2_index, :] = nuclide1_std_dev
self.std_dev[:, nuclide1_index, :] = nuclide2_std_dev
def _swap_scores(self, score1, score2):
"""Reverse the ordering of two scores in this tally
This is a helper method for tally arithmetic which helps align the data
in two tallies with shared scores. This method reverses the order
of the two scores in place.
Parameters
----------
score1 : str or CrossScore
The score to swap with score2
score2 : str or CrossScore
The score to swap with score1
Raises
------
ValueError
If this is a derived tally or this method is called before the tally
is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
# Check that the scores are valid
if not isinstance(score1, (str, openmc.CrossScore)):
msg = 'Unable to swap score1 "{}" in Tally ID="{}" since it is ' \
'not a string or CrossScore'.format(score1, self.id)
raise ValueError(msg)
elif not isinstance(score2, (str, openmc.CrossScore)):
msg = 'Unable to swap score2 "{}" in Tally ID="{}" since it is ' \
'not a string or CrossScore'.format(score2, self.id)
raise ValueError(msg)
# Check that the scores exist in the tally and are not the same
if score1 == score2:
msg = 'Unable to swap a score with itself'
raise ValueError(msg)
elif score1 not in self.scores:
msg = 'Unable to swap score1 "{}" in Tally ID="{}" since it ' \
'does not contain such a score'.format(score1, self.id)
raise ValueError(msg)
elif score2 not in self.scores:
msg = 'Unable to swap score2 "{}" in Tally ID="{}" since it ' \
'does not contain such a score'.format(score2, self.id)
raise ValueError(msg)
# Swap the scores in the Tally
score1_index = self.get_score_index(score1)
score2_index = self.get_score_index(score2)
self.scores[score1_index] = score2
self.scores[score2_index] = score1
# Adjust the mean data array to relect the new nuclide order
if self.mean is not None:
score1_mean = self.mean[:, :, score1_index].copy()
score2_mean = self.mean[:, :, score2_index].copy()
self.mean[:, :, score2_index] = score1_mean
self.mean[:, :, score1_index] = score2_mean
# Adjust the std_dev data array to relect the new nuclide order
if self.std_dev is not None:
score1_std_dev = self.std_dev[:, :, score1_index].copy()
score2_std_dev = self.std_dev[:, :, score2_index].copy()
self.std_dev[:, :, score2_index] = score1_std_dev
self.std_dev[:, :, score1_index] = score2_std_dev
def __add__(self, other):
"""Adds this tally to another tally or scalar value.
This method builds a new tally with data that is the sum of this
tally's data and that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
other : openmc.Tally or float
The tally or scalar value to add to this tally
Returns
-------
openmc.Tally
A new derived tally which is the sum of this tally and the other
tally or scalar value in the addition.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self.hybrid_product(other, binary_op='+')
# If both tally operands were sparse, sparsify the new tally
if self.sparse and other.sparse:
new_tally.sparse = True
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.with_batch_statistics = True
new_tally.name = self.name
new_tally._mean = self.mean + other
new_tally._std_dev = self.std_dev
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If this tally operand is sparse, sparsify the new tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to add "{}" to Tally ID="{}"'.format(other, self.id)
raise ValueError(msg)
return new_tally
def __sub__(self, other):
"""Subtracts another tally or scalar value from this tally.
This method builds a new tally with data that is the difference of
this tally's data and that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
other : openmc.Tally or float
The tally or scalar value to subtract from this tally
Returns
-------
openmc.Tally
A new derived tally which is the difference of this tally and the
other tally or scalar value in the subtraction.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self.hybrid_product(other, binary_op='-')
# If both tally operands were sparse, sparsify the new tally
if self.sparse and other.sparse:
new_tally.sparse = True
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self.mean - other
new_tally._std_dev = self.std_dev
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If this tally operand is sparse, sparsify the new tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to subtract "{}" from Tally ID="{}"'.format(other, self.id)
raise ValueError(msg)
return new_tally
def __mul__(self, other):
"""Multiplies this tally with another tally or scalar value.
This method builds a new tally with data that is the product of
this tally's data and that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
other : openmc.Tally or float
The tally or scalar value to multiply with this tally
Returns
-------
openmc.Tally
A new derived tally which is the product of this tally and the
other tally or scalar value in the multiplication.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self.hybrid_product(other, binary_op='*')
# If original tally operands were sparse, sparsify the new tally
if self.sparse and other.sparse:
new_tally.sparse = True
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self.mean * other
new_tally._std_dev = self.std_dev * np.abs(other)
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If this tally operand is sparse, sparsify the new tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to multiply Tally ID="{}" by "{}"'.format(self.id, other)
raise ValueError(msg)
return new_tally
def __truediv__(self, other):
"""Divides this tally by another tally or scalar value.
This method builds a new tally with data that is the dividend of
this tally's data and that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
other : openmc.Tally or float
The tally or scalar value to divide this tally by
Returns
-------
openmc.Tally
A new derived tally which is the dividend of this tally and the
other tally or scalar value in the division.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self.hybrid_product(other, binary_op='/')
# If original tally operands were sparse, sparsify the new tally
if self.sparse and other.sparse:
new_tally.sparse = True
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self.mean / other
new_tally._std_dev = self.std_dev * np.abs(1. / other)
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If this tally operand is sparse, sparsify the new tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to divide Tally ID="{}" by "{}"'.format(self.id, other)
raise ValueError(msg)
return new_tally
def __div__(self, other):
return self.__truediv__(other)
def __pow__(self, power):
"""Raises this tally to another tally or scalar value power.
This method builds a new tally with data that is the power of
this tally's data to that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
power : openmc.Tally or float
The tally or scalar value exponent
Returns
-------
openmc.Tally
A new derived tally which is this tally raised to the power of the
other tally or scalar value in the exponentiation.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(power, Tally):
new_tally = self.hybrid_product(power, binary_op='^')
# If original tally operand was sparse, sparsify the new tally
if self.sparse:
new_tally.sparse = True
elif isinstance(power, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self._mean ** power
self_rel_err = self.std_dev / self.mean
new_tally._std_dev = np.abs(new_tally._mean * power * self_rel_err)
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realizations = self.num_realizations
new_tally.filters = copy.deepcopy(self.filters)
new_tally.nuclides = copy.deepcopy(self.nuclides)
new_tally.scores = copy.deepcopy(self.scores)
# If original tally was sparse, sparsify the exponentiated tally
new_tally.sparse = self.sparse
else:
msg = 'Unable to raise Tally ID="{}" to power "{}"'.format(self.id, power)
raise ValueError(msg)
return new_tally
def __radd__(self, other):
"""Right addition with a scalar value.
This reverses the operands and calls the __add__ method.
Parameters
----------
other : float
The scalar value to add to this tally
Returns
-------
openmc.Tally
A new derived tally of this tally added with the scalar value.
"""
return self + other
def __rsub__(self, other):
"""Right subtraction from a scalar value.
This reverses the operands and calls the __sub__ method.
Parameters
----------
other : float
The scalar value to subtract this tally from
Returns
-------
openmc.Tally
A new derived tally of this tally subtracted from the scalar value.
"""
return -1. * self + other
def __rmul__(self, other):
"""Right multiplication with a scalar value.
This reverses the operands and calls the __mul__ method.
Parameters
----------
other : float
The scalar value to multiply with this tally
Returns
-------
openmc.Tally
A new derived tally of this tally multiplied by the scalar value.
"""
return self * other
def __rdiv__(self, other):
"""Right division with a scalar value.
This reverses the operands and calls the __div__ method.
Parameters
----------
other : float
The scalar value to divide by this tally
Returns
-------
openmc.Tally
A new derived tally of the scalar value divided by this tally.
"""
return other * self**-1
def __abs__(self):
"""The absolute value of this tally.
Returns
-------
openmc.Tally
A new derived tally which is the absolute value of this tally.
"""
new_tally = copy.deepcopy(self)
new_tally._mean = np.abs(new_tally.mean)
return new_tally
def __neg__(self):
"""The negated value of this tally.
Returns
-------
openmc.Tally
A new derived tally which is the negated value of this tally.
"""
new_tally = self * -1
return new_tally
def get_slice(self, scores=[], filters=[], filter_bins=[], nuclides=[],
squeeze=False):
"""Build a sliced tally for the specified filters, scores and nuclides.
This method constructs a new tally to encapsulate a subset of the data
represented by this tally. The subset of data to include in the tally
slice is determined by the scores, filters and nuclides specified in
the input parameters.
Parameters
----------
scores : list of str
A list of one or more score strings (e.g., ['absorption',
'nu-fission']
filters : Iterable of openmc.FilterMeta
An iterable of filter types (e.g., [MeshFilter, EnergyFilter])
filter_bins : list of Iterables
A list of iterables of filter bins corresponding to the specified
filter types (e.g., [(1,), ((0., 0.625e-6),)]). Each iterable
contains bins to slice for the corresponding filter type in the
filters parameter. Each bin is the integer ID for 'material',
'surface', 'cell', 'cellborn', and 'universe' Filters. Each bin is
an integer for the cell instance ID for 'distribcell' Filters. Each
bin is a 2-tuple of floats for 'energy' and 'energyout' filters
corresponding to the energy boundaries of the bin of interest. The
bin is an (x,y,z) 3-tuple for 'mesh' filters corresponding to the
mesh cell of interest. The order of the bins in the list must
correspond to the `filters` argument.
nuclides : list of str
A list of nuclide name strings (e.g., ['U235', 'U238'])
squeeze : bool
Whether to remove filters with only a single bin in the sliced tally
Returns
-------
openmc.Tally
A new tally which encapsulates the subset of data requested in the
order each filter, nuclide and score is listed in the parameters.
Raises
------
ValueError
When this method is called before the Tally is populated with data.
"""
# Ensure that the tally has data
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
# Create deep copy of tally to return as sliced tally
new_tally = copy.deepcopy(self)
new_tally._derived = True
# Differentiate Tally with a new auto-generated Tally ID
new_tally.id = None
new_tally.sparse = False
if not self.derived and self.sum is not None:
new_sum = self.get_values(scores, filters, filter_bins,
nuclides, 'sum')
new_tally.sum = new_sum
if not self.derived and self.sum_sq is not None:
new_sum_sq = self.get_values(scores, filters, filter_bins,
nuclides, 'sum_sq')
new_tally.sum_sq = new_sum_sq
if self.mean is not None:
new_mean = self.get_values(scores, filters, filter_bins,
nuclides, 'mean')
new_tally._mean = new_mean
if self.std_dev is not None:
new_std_dev = self.get_values(scores, filters, filter_bins,
nuclides, 'std_dev')
new_tally._std_dev = new_std_dev
# SCORES
if scores:
score_indices = []
# Determine the score indices from any of the requested scores
for score in self.scores:
if score not in scores:
score_index = self.get_score_index(score)
score_indices.append(score_index)
# Loop over indices in reverse to remove excluded scores
for score_index in reversed(score_indices):
new_tally.remove_score(self.scores[score_index])
# NUCLIDES
if nuclides:
nuclide_indices = []
# Determine the nuclide indices from any of the requested nuclides
for nuclide in self.nuclides:
if nuclide.name not in nuclides:
nuclide_index = self.get_nuclide_index(nuclide.name)
nuclide_indices.append(nuclide_index)
# Loop over indices in reverse to remove excluded Nuclides
for nuclide_index in reversed(nuclide_indices):
new_tally.remove_nuclide(self.nuclides[nuclide_index])
# FILTERS
if filters:
# Determine the filter indices from any of the requested filters
for i, filter_type in enumerate(filters):
f = new_tally.find_filter(filter_type)
# Remove filters with only a single bin if requested
if squeeze:
if len(filter_bins[i]) == 1:
new_tally.filters.remove(f)
continue
else:
raise RuntimeError('Cannot remove sliced filter with '
'more than one bin.')
# Remove and/or reorder filter bins to user specifications
bin_indices = [f.get_bin_index(b)
for b in filter_bins[i]]
bin_indices = np.unique(bin_indices)
# Set bins for sliced filter
new_filter = copy.copy(f)
new_filter.bins = [f.bins[i] for i in bin_indices]
# Set number of bins manually for mesh/distribcell filters
if filter_type is openmc.DistribcellFilter:
new_filter._num_bins = f._num_bins
# Replace existing filter with new one
for j, test_filter in enumerate(new_tally.filters):
if isinstance(test_filter, filter_type):
new_tally.filters[j] = new_filter
# If original tally was sparse, sparsify the sliced tally
new_tally.sparse = self.sparse
return new_tally
def summation(self, scores=[], filter_type=None,
filter_bins=[], nuclides=[], remove_filter=False):
"""Vectorized sum of tally data across scores, filter bins and/or
nuclides using tally aggregation.
This method constructs a new tally to encapsulate the sum of the data
represented by the summation of the data in this tally. The tally data
sum is determined by the scores, filter bins and nuclides specified
in the input parameters.
Parameters
----------
scores : list of str
A list of one or more score strings to sum across
(e.g., ['absorption', 'nu-fission']; default is [])
filter_type : openmc.FilterMeta
Type of the filter, e.g. MeshFilter
filter_bins : Iterable of int or tuple
A list of the filter bins corresponding to the filter_type parameter
Each bin in the list is the integer ID for 'material', 'surface',
'cell', 'cellborn', and 'universe' Filters. Each bin is an integer
for the cell instance ID for 'distribcell' Filters. Each bin is a
2-tuple of floats for 'energy' and 'energyout' filters corresponding
to the energy boundaries of the bin of interest. Each bin is an
(x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell of
interest.
nuclides : list of str
A list of nuclide name strings to sum across
(e.g., ['U235', 'U238']; default is [])
remove_filter : bool
If a filter is being summed over, this bool indicates whether to
remove that filter in the returned tally. Default is False.
Returns
-------
openmc.Tally
A new tally which encapsulates the sum of data requested.
"""
# Create new derived Tally for summation
tally_sum = Tally()
tally_sum._derived = True
tally_sum._estimator = self.estimator
tally_sum._num_realizations = self.num_realizations
tally_sum._with_batch_statistics = self.with_batch_statistics
tally_sum._with_summary = self.with_summary
tally_sum._sp_filename = self._sp_filename
tally_sum._results_read = self._results_read
# Get tally data arrays reshaped with one dimension per filter
mean = self.get_reshaped_data(value='mean')
std_dev = self.get_reshaped_data(value='std_dev')
# Sum across any filter bins specified by the user
if isinstance(filter_type, openmc.FilterMeta):
find_filter = self.find_filter(filter_type)
# If user did not specify filter bins, sum across all bins
if len(filter_bins) == 0:
bin_indices = np.arange(find_filter.num_bins)
if isinstance(find_filter, openmc.DistribcellFilter):
filter_bins = np.arange(find_filter.num_bins)
elif isinstance(find_filter, openmc.EnergyFunctionFilter):
filter_bins = [None]
else:
filter_bins = find_filter.bins
# Only sum across bins specified by the user
else:
bin_indices = \
[find_filter.get_bin_index(bin) for bin in filter_bins]
# Sum across the bins in the user-specified filter
for i, self_filter in enumerate(self.filters):
if type(self_filter) == filter_type:
shape = mean.shape
mean = np.take(mean, indices=bin_indices, axis=i)
std_dev = np.take(std_dev, indices=bin_indices, axis=i)
# NumPy take introduces a new dimension in output array
# for some special cases that must be removed
if len(mean.shape) > len(shape):
mean = np.squeeze(mean, axis=i)
std_dev = np.squeeze(std_dev, axis=i)
mean = np.sum(mean, axis=i, keepdims=True)
std_dev = np.sum(std_dev**2, axis=i, keepdims=True)
std_dev = np.sqrt(std_dev)
# Add AggregateFilter to the tally sum
if not remove_filter:
filter_sum = openmc.AggregateFilter(self_filter,
[tuple(filter_bins)], 'sum')
tally_sum.filters.append(filter_sum)
# Add a copy of each filter not summed across to the tally sum
else:
tally_sum.filters.append(copy.deepcopy(self_filter))
# Add a copy of this tally's filters to the tally sum
else:
tally_sum._filters = copy.deepcopy(self.filters)
# Sum across any nuclides specified by the user
if len(nuclides) != 0:
nuclide_bins = [self.get_nuclide_index(nuclide) for nuclide in nuclides]
axis_index = self.num_filters
mean = np.take(mean, indices=nuclide_bins, axis=axis_index)
std_dev = np.take(std_dev, indices=nuclide_bins, axis=axis_index)
mean = np.sum(mean, axis=axis_index, keepdims=True)
std_dev = np.sum(std_dev**2, axis=axis_index, keepdims=True)
std_dev = np.sqrt(std_dev)
# Add AggregateNuclide to the tally sum
nuclide_sum = openmc.AggregateNuclide(nuclides, 'sum')
tally_sum.nuclides.append(nuclide_sum)
# Add a copy of this tally's nuclides to the tally sum
else:
tally_sum._nuclides = copy.deepcopy(self.nuclides)
# Sum across any scores specified by the user
if len(scores) != 0:
score_bins = [self.get_score_index(score) for score in scores]
axis_index = self.num_filters + 1
mean = np.take(mean, indices=score_bins, axis=axis_index)
std_dev = np.take(std_dev, indices=score_bins, axis=axis_index)
mean = np.sum(mean, axis=axis_index, keepdims=True)
std_dev = np.sum(std_dev**2, axis=axis_index, keepdims=True)
std_dev = np.sqrt(std_dev)
# Add AggregateScore to the tally sum
score_sum = openmc.AggregateScore(scores, 'sum')
tally_sum.scores.append(score_sum)
# Add a copy of this tally's scores to the tally sum
else:
tally_sum._scores = copy.deepcopy(self.scores)
# Reshape condensed data arrays with one dimension for all filters
mean = np.reshape(mean, tally_sum.shape)
std_dev = np.reshape(std_dev, tally_sum.shape)
# Assign tally sum's data with the new arrays
tally_sum._mean = mean
tally_sum._std_dev = std_dev
# If original tally was sparse, sparsify the tally summation
tally_sum.sparse = self.sparse
return tally_sum
def average(self, scores=[], filter_type=None,
filter_bins=[], nuclides=[], remove_filter=False):
"""Vectorized average of tally data across scores, filter bins and/or
nuclides using tally aggregation.
This method constructs a new tally to encapsulate the average of the
data represented by the average of the data in this tally. The tally
data average is determined by the scores, filter bins and nuclides
specified in the input parameters.
Parameters
----------
scores : list of str
A list of one or more score strings to average across
(e.g., ['absorption', 'nu-fission']; default is [])
filter_type : openmc.FilterMeta
Type of the filter, e.g. MeshFilter
filter_bins : Iterable of int or tuple
A list of the filter bins corresponding to the filter_type parameter
Each bin in the list is the integer ID for 'material', 'surface',
'cell', 'cellborn', and 'universe' Filters. Each bin is an integer
for the cell instance ID for 'distribcell' Filters. Each bin is a
2-tuple of floats for 'energy' and 'energyout' filters corresponding
to the energy boundaries of the bin of interest. Each bin is an
(x,y,z) 3-tuple for 'mesh' filters corresponding to the mesh cell of
interest.
nuclides : list of str
A list of nuclide name strings to average across
(e.g., ['U235', 'U238']; default is [])
remove_filter : bool
If a filter is being averaged over, this bool indicates whether to
remove that filter in the returned tally. Default is False.
Returns
-------
openmc.Tally
A new tally which encapsulates the average of data requested.
"""
# Create new derived Tally for average
tally_avg = Tally()
tally_avg._derived = True
tally_avg._estimator = self.estimator
tally_avg._num_realizations = self.num_realizations
tally_avg._with_batch_statistics = self.with_batch_statistics
tally_avg._with_summary = self.with_summary
tally_avg._sp_filename = self._sp_filename
tally_avg._results_read = self._results_read
# Get tally data arrays reshaped with one dimension per filter
mean = self.get_reshaped_data(value='mean')
std_dev = self.get_reshaped_data(value='std_dev')
# Average across any filter bins specified by the user
if isinstance(filter_type, openmc.FilterMeta):
find_filter = self.find_filter(filter_type)
# If user did not specify filter bins, average across all bins
if len(filter_bins) == 0:
bin_indices = np.arange(find_filter.num_bins)
if isinstance(find_filter, openmc.DistribcellFilter):
filter_bins = np.arange(find_filter.num_bins)
elif isinstance(find_filter, openmc.EnergyFunctionFilter):
filter_bins = [None]
else:
filter_bins = find_filter.bins
# Only average across bins specified by the user
else:
bin_indices = \
[find_filter.get_bin_index(bin) for bin in filter_bins]
# Average across the bins in the user-specified filter
for i, self_filter in enumerate(self.filters):
if isinstance(self_filter, filter_type):
shape = mean.shape
mean = np.take(mean, indices=bin_indices, axis=i)
std_dev = np.take(std_dev, indices=bin_indices, axis=i)
# NumPy take introduces a new dimension in output array
# for some special cases that must be removed
if len(mean.shape) > len(shape):
mean = np.squeeze(mean, axis=i)
std_dev = np.squeeze(std_dev, axis=i)
mean = np.nanmean(mean, axis=i, keepdims=True)
std_dev = np.nanmean(std_dev**2, axis=i, keepdims=True)
std_dev /= len(bin_indices)
std_dev = np.sqrt(std_dev)
# Add AggregateFilter to the tally avg
if not remove_filter:
filter_sum = openmc.AggregateFilter(self_filter,
[tuple(filter_bins)], 'avg')
tally_avg.filters.append(filter_sum)
# Add a copy of each filter not averaged across to the tally avg
else:
tally_avg.filters.append(copy.deepcopy(self_filter))
# Add a copy of this tally's filters to the tally avg
else:
tally_avg._filters = copy.deepcopy(self.filters)
# Sum across any nuclides specified by the user
if len(nuclides) != 0:
nuclide_bins = [self.get_nuclide_index(nuclide) for nuclide in nuclides]
axis_index = self.num_filters
mean = np.take(mean, indices=nuclide_bins, axis=axis_index)
std_dev = np.take(std_dev, indices=nuclide_bins, axis=axis_index)
mean = np.nanmean(mean, axis=axis_index, keepdims=True)
std_dev = np.nanmean(std_dev**2, axis=axis_index, keepdims=True)
std_dev /= len(nuclide_bins)
std_dev = np.sqrt(std_dev)
# Add AggregateNuclide to the tally avg
nuclide_avg = openmc.AggregateNuclide(nuclides, 'avg')
tally_avg.nuclides.append(nuclide_avg)
# Add a copy of this tally's nuclides to the tally avg
else:
tally_avg._nuclides = copy.deepcopy(self.nuclides)
# Sum across any scores specified by the user
if len(scores) != 0:
score_bins = [self.get_score_index(score) for score in scores]
axis_index = self.num_filters + 1
mean = np.take(mean, indices=score_bins, axis=axis_index)
std_dev = np.take(std_dev, indices=score_bins, axis=axis_index)
mean = np.nanmean(mean, axis=axis_index, keepdims=True)
std_dev = np.nanmean(std_dev**2, axis=axis_index, keepdims=True)
std_dev /= len(score_bins)
std_dev = np.sqrt(std_dev)
# Add AggregateScore to the tally avg
score_sum = openmc.AggregateScore(scores, 'avg')
tally_avg.scores.append(score_sum)
# Add a copy of this tally's scores to the tally avg
else:
tally_avg._scores = copy.deepcopy(self.scores)
# Reshape condensed data arrays with one dimension for all filters
mean = np.reshape(mean, tally_avg.shape)
std_dev = np.reshape(std_dev, tally_avg.shape)
# Assign tally avg's data with the new arrays
tally_avg._mean = mean
tally_avg._std_dev = std_dev
# If original tally was sparse, sparsify the tally average
tally_avg.sparse = self.sparse
return tally_avg
def diagonalize_filter(self, new_filter, filter_position=-1):
"""Diagonalize the tally data array along a new axis of filter bins.
This is a helper method for the tally arithmetic methods. This method
adds the new filter to a derived tally constructed copied from this one.
The data in the derived tally arrays is "diagonalized" along the bins in
the new filter. This functionality is used by the openmc.mgxs module; to
transport-correct scattering matrices by subtracting a 'scatter-P1'
reaction rate tally with an energy filter from a 'scatter' reaction
rate tally with both energy and energyout filters.
Parameters
----------
new_filter : Filter
The filter along which to diagonalize the data in the new
filter_position : int
Where to place the new filter in the Tally.filters list. Defaults
to last position.
Returns
-------
openmc.Tally
A new derived Tally with data diagaonalized along the new filter.
"""
cv.check_type('new_filter', new_filter, _FILTER_CLASSES)
cv.check_type('filter_position', filter_position, Integral)
if new_filter in self.filters:
msg = 'Unable to diagonalize Tally ID="{}" which already ' \
'contains a "{}" filter'.format(self.id, type(new_filter))
raise ValueError(msg)
# Add the new filter to a copy of this Tally
new_tally = copy.deepcopy(self)
new_tally.filters.insert(filter_position, new_filter)
# Determine "base" indices along the new "diagonal", and the factor
# by which the "base" indices should be repeated to account for all
# other filter bins in the diagonalized tally
indices = np.arange(0, new_filter.num_bins**2, new_filter.num_bins+1)
diag_factor = self.num_filter_bins // new_filter.num_bins
diag_indices = np.zeros(self.num_filter_bins, dtype=int)
# Determine the filter indices along the new "diagonal"
for i in range(diag_factor):
start = i * new_filter.num_bins
end = (i+1) * new_filter.num_bins
diag_indices[start:end] = indices + (i * new_filter.num_bins**2)
# Inject this Tally's data along the diagonal of the diagonalized Tally
if not self.derived and self.sum is not None:
new_tally._sum = np.zeros(new_tally.shape, dtype=np.float64)
new_tally._sum[diag_indices, :, :] = self.sum
if not self.derived and self.sum_sq is not None:
new_tally._sum_sq = np.zeros(new_tally.shape, dtype=np.float64)
new_tally._sum_sq[diag_indices, :, :] = self.sum_sq
if self.mean is not None:
new_tally._mean = np.zeros(new_tally.shape, dtype=np.float64)
new_tally._mean[diag_indices, :, :] = self.mean
if self.std_dev is not None:
new_tally._std_dev = np.zeros(new_tally.shape, dtype=np.float64)
new_tally._std_dev[diag_indices, :, :] = self.std_dev
# If original tally was sparse, sparsify the diagonalized tally
new_tally.sparse = self.sparse
return new_tally
class Tallies(cv.CheckedList):
"""Collection of Tallies used for an OpenMC simulation.
This class corresponds directly to the tallies.xml input file. It can be
thought of as a normal Python list where each member is a :class:`Tally`. It
behaves like a list as the following example demonstrates:
>>> t1 = openmc.Tally()
>>> t2 = openmc.Tally()
>>> t3 = openmc.Tally()
>>> tallies = openmc.Tallies([t1])
>>> tallies.append(t2)
>>> tallies += [t3]
Parameters
----------
tallies : Iterable of openmc.Tally
Tallies to add to the collection
"""
def __init__(self, tallies=None):
super().__init__(Tally, 'tallies collection')
if tallies is not None:
self += tallies
def append(self, tally, merge=False):
"""Append tally to collection
Parameters
----------
tally : openmc.Tally
Tally to append
merge : bool
Indicate whether the tally should be merged with an existing tally,
if possible. Defaults to False.
"""
if not isinstance(tally, Tally):
msg = 'Unable to add a non-Tally "{}" to the ' \
'Tallies instance'.format(tally)
raise TypeError(msg)
if merge:
merged = False
# Look for a tally to merge with this one
for i, tally2 in enumerate(self):
# If a mergeable tally is found
if tally2.can_merge(tally):
# Replace tally2 with the merged tally
merged_tally = tally2.merge(tally)
self[i] = merged_tally
merged = True
break
# If no mergeable tally was found, simply add this tally
if not merged:
super().append(tally)
else:
super().append(tally)
def insert(self, index, item):
"""Insert tally before index
Parameters
----------
index : int
Index in list
item : openmc.Tally
Tally to insert
"""
super().insert(index, item)
def merge_tallies(self):
"""Merge any mergeable tallies together. Note that n-way merges are
possible.
"""
for i, tally1 in enumerate(self):
for j, tally2 in enumerate(self):
# Do not merge the same tally with itself
if i == j:
continue
# If the two tallies are mergeable
if tally1.can_merge(tally2):
# Replace tally 1 with the merged tally
merged_tally = tally1.merge(tally2)
self[i] = merged_tally
# Remove tally 2 since it is no longer needed
self.pop(j)
# Continue iterating from the first loop
break
def _create_tally_subelements(self, root_element):
for tally in self:
root_element.append(tally.to_xml_element())
def _create_mesh_subelements(self, root_element):
already_written = set()
for tally in self:
for f in tally.filters:
if isinstance(f, openmc.MeshFilter):
if f.mesh.id not in already_written:
if len(f.mesh.name) > 0:
root_element.append(ET.Comment(f.mesh.name))
root_element.append(f.mesh.to_xml_element())
already_written.add(f.mesh.id)
def _create_filter_subelements(self, root_element):
already_written = dict()
for tally in self:
for f in tally.filters:
if f not in already_written:
root_element.append(f.to_xml_element())
already_written[f] = f.id
elif f.id != already_written[f]:
# Set the IDs of identical filters with different
# user-defined IDs to the same value
f.id = already_written[f]
def _create_derivative_subelements(self, root_element):
# Get a list of all derivatives referenced in a tally.
derivs = []
for tally in self:
deriv = tally.derivative
if deriv is not None and deriv not in derivs:
derivs.append(deriv)
# Add the derivatives to the XML tree.
for d in derivs:
root_element.append(d.to_xml_element())
def export_to_xml(self, path='tallies.xml'):
"""Create a tallies.xml file that can be used for a simulation.
Parameters
----------
path : str
Path to file to write. Defaults to 'tallies.xml'.
"""
root_element = ET.Element("tallies")
self._create_mesh_subelements(root_element)
self._create_filter_subelements(root_element)
self._create_tally_subelements(root_element)
self._create_derivative_subelements(root_element)
# Clean the indentation in the file to be user-readable
clean_indentation(root_element)
# Check if path is a directory
p = Path(path)
if p.is_dir():
p /= 'tallies.xml'
# Write the XML Tree to the tallies.xml file
reorder_attributes(root_element) # TODO: Remove when support is Python 3.8+
tree = ET.ElementTree(root_element)
tree.write(str(p), xml_declaration=True, encoding='utf-8')
| mit |
macks22/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/backend_tools.py | 8 | 27963 | """
Abstract base classes define the primitives for Tools.
These tools are used by `matplotlib.backend_managers.ToolManager`
:class:`ToolBase`
Simple stateless tool
:class:`ToolToggleBase`
Tool that has two states, only one Toggle tool can be
active at any given time for the same
`matplotlib.backend_managers.ToolManager`
"""
from matplotlib import rcParams
from matplotlib._pylab_helpers import Gcf
import matplotlib.cbook as cbook
from weakref import WeakKeyDictionary
import numpy as np
from matplotlib.externals import six
import warnings
class Cursors(object):
"""Simple namespace for cursor reference"""
HAND, POINTER, SELECT_REGION, MOVE = list(range(4))
cursors = Cursors()
# Views positions tool
_views_positions = 'viewpos'
class ToolBase(object):
"""
Base tool class
A base tool, only implements `trigger` method or not method at all.
The tool is instantiated by `matplotlib.backend_managers.ToolManager`
Attributes
----------
toolmanager: `matplotlib.backend_managers.ToolManager`
ToolManager that controls this Tool
figure: `FigureCanvas`
Figure instance that is affected by this Tool
name: String
Used as **Id** of the tool, has to be unique among tools of the same
ToolManager
"""
default_keymap = None
"""
Keymap to associate with this tool
**String**: List of comma separated keys that will be used to call this
tool when the keypress event of *self.figure.canvas* is emited
"""
description = None
"""
Description of the Tool
**String**: If the Tool is included in the Toolbar this text is used
as a Tooltip
"""
image = None
"""
Filename of the image
**String**: Filename of the image to use in the toolbar. If None, the
`name` is used as a label in the toolbar button
"""
def __init__(self, toolmanager, name):
warnings.warn('Treat the new Tool classes introduced in v1.5 as ' +
'experimental for now, the API will likely change in ' +
'version 2.1, and some tools might change name')
self._name = name
self._figure = None
self.toolmanager = toolmanager
self.figure = toolmanager.canvas.figure
@property
def figure(self):
return self._figure
def trigger(self, sender, event, data=None):
"""
Called when this tool gets used
This method is called by
`matplotlib.backend_managers.ToolManager.trigger_tool`
Parameters
----------
event: `Event`
The Canvas event that caused this tool to be called
sender: object
Object that requested the tool to be triggered
data: object
Extra data
"""
pass
@figure.setter
def figure(self, figure):
"""
Set the figure
Set the figure to be affected by this tool
Parameters
----------
figure: `Figure`
"""
self._figure = figure
@property
def name(self):
"""Tool Id"""
return self._name
def destroy(self):
"""
Destroy the tool
This method is called when the tool is removed by
`matplotlib.backend_managers.ToolManager.remove_tool`
"""
pass
class ToolToggleBase(ToolBase):
"""
Toggleable tool
Every time it is triggered, it switches between enable and disable
"""
radio_group = None
"""Attribute to group 'radio' like tools (mutually exclusive)
**String** that identifies the group or **None** if not belonging to a
group
"""
cursor = None
"""Cursor to use when the tool is active"""
def __init__(self, *args, **kwargs):
ToolBase.__init__(self, *args, **kwargs)
self._toggled = False
def trigger(self, sender, event, data=None):
"""Calls `enable` or `disable` based on `toggled` value"""
if self._toggled:
self.disable(event)
else:
self.enable(event)
self._toggled = not self._toggled
def enable(self, event=None):
"""
Enable the toggle tool
`trigger` calls this method when `toggled` is False
"""
pass
def disable(self, event=None):
"""
Disable the toggle tool
`trigger` call this method when `toggled` is True.
This can happen in different circumstances
* Click on the toolbar tool button
* Call to `matplotlib.backend_managers.ToolManager.trigger_tool`
* Another `ToolToggleBase` derived tool is triggered
(from the same `ToolManager`)
"""
pass
@property
def toggled(self):
"""State of the toggled tool"""
return self._toggled
class SetCursorBase(ToolBase):
"""
Change to the current cursor while inaxes
This tool, keeps track of all `ToolToggleBase` derived tools, and calls
set_cursor when a tool gets triggered
"""
def __init__(self, *args, **kwargs):
ToolBase.__init__(self, *args, **kwargs)
self._idDrag = self.figure.canvas.mpl_connect(
'motion_notify_event', self._set_cursor_cbk)
self._cursor = None
self._default_cursor = cursors.POINTER
self._last_cursor = self._default_cursor
self.toolmanager.toolmanager_connect('tool_added_event',
self._add_tool_cbk)
# process current tools
for tool in self.toolmanager.tools.values():
self._add_tool(tool)
def _tool_trigger_cbk(self, event):
if event.tool.toggled:
self._cursor = event.tool.cursor
else:
self._cursor = None
self._set_cursor_cbk(event.canvasevent)
def _add_tool(self, tool):
"""set the cursor when the tool is triggered"""
if getattr(tool, 'cursor', None) is not None:
self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name,
self._tool_trigger_cbk)
def _add_tool_cbk(self, event):
"""Process every newly added tool"""
if event.tool is self:
return
self._add_tool(event.tool)
def _set_cursor_cbk(self, event):
if not event:
return
if not getattr(event, 'inaxes', False) or not self._cursor:
if self._last_cursor != self._default_cursor:
self.set_cursor(self._default_cursor)
self._last_cursor = self._default_cursor
elif self._cursor:
cursor = self._cursor
if cursor and self._last_cursor != cursor:
self.set_cursor(cursor)
self._last_cursor = cursor
def set_cursor(self, cursor):
"""
Set the cursor
This method has to be implemented per backend
"""
raise NotImplementedError
class ToolCursorPosition(ToolBase):
"""
Send message with the current pointer position
This tool runs in the background reporting the position of the cursor
"""
def __init__(self, *args, **kwargs):
ToolBase.__init__(self, *args, **kwargs)
self._idDrag = self.figure.canvas.mpl_connect(
'motion_notify_event', self.send_message)
def send_message(self, event):
"""Call `matplotlib.backend_managers.ToolManager.message_event`"""
if self.toolmanager.messagelock.locked():
return
message = ' '
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
message = s
self.toolmanager.message_event(message, self)
class RubberbandBase(ToolBase):
"""Draw and remove rubberband"""
def trigger(self, sender, event, data):
"""Call `draw_rubberband` or `remove_rubberband` based on data"""
if not self.figure.canvas.widgetlock.available(sender):
return
if data is not None:
self.draw_rubberband(*data)
else:
self.remove_rubberband()
def draw_rubberband(self, *data):
"""
Draw rubberband
This method must get implemented per backend
"""
raise NotImplementedError
def remove_rubberband(self):
"""
Remove rubberband
This method should get implemented per backend
"""
pass
class ToolQuit(ToolBase):
"""Tool to call the figure manager destroy method"""
description = 'Quit the figure'
default_keymap = rcParams['keymap.quit']
def trigger(self, sender, event, data=None):
Gcf.destroy_fig(self.figure)
class ToolEnableAllNavigation(ToolBase):
"""Tool to enable all axes for toolmanager interaction"""
description = 'Enables all axes toolmanager'
default_keymap = rcParams['keymap.all_axes']
def trigger(self, sender, event, data=None):
if event.inaxes is None:
return
for a in self.figure.get_axes():
if (event.x is not None and event.y is not None
and a.in_axes(event)):
a.set_navigate(True)
class ToolEnableNavigation(ToolBase):
"""Tool to enable a specific axes for toolmanager interaction"""
description = 'Enables one axes toolmanager'
default_keymap = (1, 2, 3, 4, 5, 6, 7, 8, 9)
def trigger(self, sender, event, data=None):
if event.inaxes is None:
return
n = int(event.key) - 1
for i, a in enumerate(self.figure.get_axes()):
if (event.x is not None and event.y is not None
and a.in_axes(event)):
a.set_navigate(i == n)
class ToolGrid(ToolToggleBase):
"""Tool to toggle the grid of the figure"""
description = 'Toogle Grid'
default_keymap = rcParams['keymap.grid']
def trigger(self, sender, event, data=None):
if event.inaxes is None:
return
ToolToggleBase.trigger(self, sender, event, data)
def enable(self, event):
event.inaxes.grid(True)
self.figure.canvas.draw_idle()
def disable(self, event):
event.inaxes.grid(False)
self.figure.canvas.draw_idle()
class ToolFullScreen(ToolToggleBase):
"""Tool to toggle full screen"""
description = 'Toogle Fullscreen mode'
default_keymap = rcParams['keymap.fullscreen']
def enable(self, event):
self.figure.canvas.manager.full_screen_toggle()
def disable(self, event):
self.figure.canvas.manager.full_screen_toggle()
class AxisScaleBase(ToolToggleBase):
"""Base Tool to toggle between linear and logarithmic"""
def trigger(self, sender, event, data=None):
if event.inaxes is None:
return
ToolToggleBase.trigger(self, sender, event, data)
def enable(self, event):
self.set_scale(event.inaxes, 'log')
self.figure.canvas.draw_idle()
def disable(self, event):
self.set_scale(event.inaxes, 'linear')
self.figure.canvas.draw_idle()
class ToolYScale(AxisScaleBase):
"""Tool to toggle between linear and logarithmic scales on the Y axis"""
description = 'Toogle Scale Y axis'
default_keymap = rcParams['keymap.yscale']
def set_scale(self, ax, scale):
ax.set_yscale(scale)
class ToolXScale(AxisScaleBase):
"""Tool to toggle between linear and logarithmic scales on the X axis"""
description = 'Toogle Scale X axis'
default_keymap = rcParams['keymap.xscale']
def set_scale(self, ax, scale):
ax.set_xscale(scale)
class ToolViewsPositions(ToolBase):
"""
Auxiliary Tool to handle changes in views and positions
Runs in the background and should get used by all the tools that
need to access the figure's history of views and positions, e.g.
* `ToolZoom`
* `ToolPan`
* `ToolHome`
* `ToolBack`
* `ToolForward`
"""
def __init__(self, *args, **kwargs):
self.views = WeakKeyDictionary()
self.positions = WeakKeyDictionary()
ToolBase.__init__(self, *args, **kwargs)
def add_figure(self):
"""Add the current figure to the stack of views and positions"""
if self.figure not in self.views:
self.views[self.figure] = cbook.Stack()
self.positions[self.figure] = cbook.Stack()
# Define Home
self.push_current()
# Adding the clear method as axobserver, removes this burden from
# the backend
self.figure.add_axobserver(self.clear)
def clear(self, figure):
"""Reset the axes stack"""
if figure in self.views:
self.views[figure].clear()
self.positions[figure].clear()
def update_view(self):
"""
Update the viewlim and position from the view and
position stack for each axes
"""
views = self.views[self.figure]()
if views is None:
return
pos = self.positions[self.figure]()
if pos is None:
return
for i, a in enumerate(self.figure.get_axes()):
a._set_view(views[i])
# Restore both the original and modified positions
a.set_position(pos[i][0], 'original')
a.set_position(pos[i][1], 'active')
self.figure.canvas.draw_idle()
def push_current(self):
"""push the current view limits and position onto the stack"""
views = []
pos = []
for a in self.figure.get_axes():
views.append(a._get_view())
# Store both the original and modified positions
pos.append((
a.get_position(True).frozen(),
a.get_position().frozen()))
self.views[self.figure].push(views)
self.positions[self.figure].push(pos)
def refresh_locators(self):
"""Redraw the canvases, update the locators"""
for a in self.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
zaxis = getattr(a, 'zaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
if zaxis is not None:
locators.append(zaxis.get_major_locator())
locators.append(zaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.figure.canvas.draw_idle()
def home(self):
"""Recall the first view and position from the stack"""
self.views[self.figure].home()
self.positions[self.figure].home()
def back(self):
"""Back one step in the stack of views and positions"""
self.views[self.figure].back()
self.positions[self.figure].back()
def forward(self):
"""Forward one step in the stack of views and positions"""
self.views[self.figure].forward()
self.positions[self.figure].forward()
class ViewsPositionsBase(ToolBase):
"""Base class for `ToolHome`, `ToolBack` and `ToolForward`"""
_on_trigger = None
def trigger(self, sender, event, data=None):
self.toolmanager.get_tool(_views_positions).add_figure()
getattr(self.toolmanager.get_tool(_views_positions),
self._on_trigger)()
self.toolmanager.get_tool(_views_positions).update_view()
class ToolHome(ViewsPositionsBase):
"""Restore the original view lim"""
description = 'Reset original view'
image = 'home.png'
default_keymap = rcParams['keymap.home']
_on_trigger = 'home'
class ToolBack(ViewsPositionsBase):
"""Move back up the view lim stack"""
description = 'Back to previous view'
image = 'back.png'
default_keymap = rcParams['keymap.back']
_on_trigger = 'back'
class ToolForward(ViewsPositionsBase):
"""Move forward in the view lim stack"""
description = 'Forward to next view'
image = 'forward.png'
default_keymap = rcParams['keymap.forward']
_on_trigger = 'forward'
class ConfigureSubplotsBase(ToolBase):
"""Base tool for the configuration of subplots"""
description = 'Configure subplots'
image = 'subplots.png'
class SaveFigureBase(ToolBase):
"""Base tool for figure saving"""
description = 'Save the figure'
image = 'filesave.png'
default_keymap = rcParams['keymap.save']
class ZoomPanBase(ToolToggleBase):
"""Base class for `ToolZoom` and `ToolPan`"""
def __init__(self, *args):
ToolToggleBase.__init__(self, *args)
self._button_pressed = None
self._xypress = None
self._idPress = None
self._idRelease = None
self._idScroll = None
self.base_scale = 2.
def enable(self, event):
"""Connect press/release events and lock the canvas"""
self.figure.canvas.widgetlock(self)
self._idPress = self.figure.canvas.mpl_connect(
'button_press_event', self._press)
self._idRelease = self.figure.canvas.mpl_connect(
'button_release_event', self._release)
self._idScroll = self.figure.canvas.mpl_connect(
'scroll_event', self.scroll_zoom)
def disable(self, event):
"""Release the canvas and disconnect press/release events"""
self._cancel_action()
self.figure.canvas.widgetlock.release(self)
self.figure.canvas.mpl_disconnect(self._idPress)
self.figure.canvas.mpl_disconnect(self._idRelease)
self.figure.canvas.mpl_disconnect(self._idScroll)
def trigger(self, sender, event, data=None):
self.toolmanager.get_tool(_views_positions).add_figure()
ToolToggleBase.trigger(self, sender, event, data)
def scroll_zoom(self, event):
# https://gist.github.com/tacaswell/3144287
if event.inaxes is None:
return
ax = event.inaxes
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
# set the range
cur_xrange = (cur_xlim[1] - cur_xlim[0])*.5
cur_yrange = (cur_ylim[1] - cur_ylim[0])*.5
xdata = event.xdata # get event x location
ydata = event.ydata # get event y location
if event.button == 'up':
# deal with zoom in
scale_factor = 1 / self.base_scale
elif event.button == 'down':
# deal with zoom out
scale_factor = self.base_scale
else:
# deal with something that should never happen
scale_factor = 1
# set new limits
ax.set_xlim([xdata - cur_xrange*scale_factor,
xdata + cur_xrange*scale_factor])
ax.set_ylim([ydata - cur_yrange*scale_factor,
ydata + cur_yrange*scale_factor])
self.figure.canvas.draw_idle() # force re-draw
class ToolZoom(ZoomPanBase):
"""Zoom to rectangle"""
description = 'Zoom to rectangle'
image = 'zoom_to_rect.png'
default_keymap = rcParams['keymap.zoom']
cursor = cursors.SELECT_REGION
radio_group = 'default'
def __init__(self, *args):
ZoomPanBase.__init__(self, *args)
self._ids_zoom = []
def _cancel_action(self):
for zoom_id in self._ids_zoom:
self.figure.canvas.mpl_disconnect(zoom_id)
self.toolmanager.trigger_tool('rubberband', self)
self.toolmanager.get_tool(_views_positions).refresh_locators()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
def _press(self, event):
"""the _press mouse button in zoom to rect mode callback"""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom != []:
self._cancel_action()
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._cancel_action()
return
x, y = event.x, event.y
self._xypress = []
for i, a in enumerate(self.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a._get_view()))
id1 = self.figure.canvas.mpl_connect(
'motion_notify_event', self._mouse_move)
id2 = self.figure.canvas.mpl_connect(
'key_press_event', self._switch_on_zoom_mode)
id3 = self.figure.canvas.mpl_connect(
'key_release_event', self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self._mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self._mouse_move(event)
def _mouse_move(self, event):
"""the drag callback in zoom mode"""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, _ind, _view = self._xypress[0]
# adjust x, last, y, last
x1, y1, x2, y2 = a.bbox.extents
x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)
y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)
if self._zoom_mode == "x":
x1, y1, x2, y2 = a.bbox.extents
y, lasty = y1, y2
elif self._zoom_mode == "y":
x1, y1, x2, y2 = a.bbox.extents
x, lastx = x1, x2
self.toolmanager.trigger_tool('rubberband',
self,
data=(x, y, lastx, lasty))
def _release(self, event):
"""the release mouse button callback in zoom to rect mode"""
for zoom_id in self._ids_zoom:
self.figure.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
if not self._xypress:
self._cancel_action()
return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, _ind, view = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x - lastx) < 5 or abs(y - lasty) < 5:
self._cancel_action()
return
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a, la):
twinx = True
if a.get_shared_y_axes().joined(a, la):
twiny = True
last_a.append(a)
if self._button_pressed == 1:
direction = 'in'
elif self._button_pressed == 3:
direction = 'out'
else:
continue
a._set_view_from_bbox((lastx, lasty, x, y), direction,
self._zoom_mode, twinx, twiny)
self._zoom_mode = None
self.toolmanager.get_tool(_views_positions).push_current()
self._cancel_action()
class ToolPan(ZoomPanBase):
"""Pan axes with left mouse, zoom with right"""
default_keymap = rcParams['keymap.pan']
description = 'Pan axes with left mouse, zoom with right'
image = 'move.png'
cursor = cursors.MOVE
radio_group = 'default'
def __init__(self, *args):
ZoomPanBase.__init__(self, *args)
self._idDrag = None
def _cancel_action(self):
self._button_pressed = None
self._xypress = []
self.figure.canvas.mpl_disconnect(self._idDrag)
self.toolmanager.messagelock.release(self)
self.toolmanager.get_tool(_views_positions).refresh_locators()
def _press(self, event):
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._cancel_action()
return
x, y = event.x, event.y
self._xypress = []
for i, a in enumerate(self.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.toolmanager.messagelock(self)
self._idDrag = self.figure.canvas.mpl_connect(
'motion_notify_event', self._mouse_move)
def _release(self, event):
if self._button_pressed is None:
self._cancel_action()
return
self.figure.canvas.mpl_disconnect(self._idDrag)
self.toolmanager.messagelock.release(self)
for a, _ind in self._xypress:
a.end_pan()
if not self._xypress:
self._cancel_action()
return
self.toolmanager.get_tool(_views_positions).push_current()
self._cancel_action()
def _mouse_move(self, event):
for a, _ind in self._xypress:
# safer to use the recorded button at the _press than current
# button: # multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.toolmanager.canvas.draw_idle()
default_tools = {'home': ToolHome, 'back': ToolBack, 'forward': ToolForward,
'zoom': ToolZoom, 'pan': ToolPan,
'subplots': 'ToolConfigureSubplots',
'save': 'ToolSaveFigure',
'grid': ToolGrid,
'fullscreen': ToolFullScreen,
'quit': ToolQuit,
'allnav': ToolEnableAllNavigation,
'nav': ToolEnableNavigation,
'xscale': ToolXScale,
'yscale': ToolYScale,
'position': ToolCursorPosition,
_views_positions: ToolViewsPositions,
'cursor': 'ToolSetCursor',
'rubberband': 'ToolRubberband',
}
"""Default tools"""
default_toolbar_tools = [['navigation', ['home', 'back', 'forward']],
['zoompan', ['pan', 'zoom']],
['layout', ['subplots']],
['io', ['save']]]
"""Default tools in the toolbar"""
def add_tools_to_manager(toolmanager, tools=default_tools):
"""
Add multiple tools to `ToolManager`
Parameters
----------
toolmanager: ToolManager
`backend_managers.ToolManager` object that will get the tools added
tools : {str: class_like}, optional
The tools to add in a {name: tool} dict, see `add_tool` for more
info.
"""
for name, tool in six.iteritems(tools):
toolmanager.add_tool(name, tool)
def add_tools_to_container(container, tools=default_toolbar_tools):
"""
Add multiple tools to the container.
Parameters
----------
container: Container
`backend_bases.ToolContainerBase` object that will get the tools added
tools : list, optional
List in the form
[[group1, [tool1, tool2 ...]], [group2, [...]]]
Where the tools given by tool1, and tool2 will display in group1.
See `add_tool` for details.
"""
for group, grouptools in tools:
for position, tool in enumerate(grouptools):
container.add_tool(tool, group, position)
| mit |
GuessWhoSamFoo/pandas | pandas/tests/indexing/multiindex/test_loc.py | 2 | 13320 | import itertools
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
from pandas.util import testing as tm
@pytest.fixture
def single_level_multiindex():
"""single level MultiIndex"""
return MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
codes=[[0, 1, 2, 3]], names=['first'])
@pytest.fixture
def frame_random_data_integer_multi_index():
levels = [[0, 1], [0, 1, 2]]
codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, codes=codes)
return DataFrame(np.random.randn(6, 2), index=index)
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
class TestMultiIndexLoc(object):
def test_loc_getitem_series(self):
# GH14730
# passing a series as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = Series([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
result = x.loc[[1, 3]]
tm.assert_series_equal(result, expected)
# GH15424
y1 = Series([1, 3], index=[1, 2])
result = x.loc[y1]
tm.assert_series_equal(result, expected)
empty = Series(data=[], dtype=np.float64)
expected = Series([], index=MultiIndex(
levels=index.levels, codes=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
def test_loc_getitem_array(self):
# GH15434
# passing an array as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = np.array([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
# empty array:
empty = np.array([])
expected = Series([], index=MultiIndex(
levels=index.levels, codes=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
# 0-dim array (scalar):
scalar = np.int64(1)
expected = Series(
data=[0, 1, 2],
index=['A', 'B', 'C'],
dtype=np.float64)
result = x.loc[scalar]
tm.assert_series_equal(result, expected)
def test_loc_multiindex(self):
mi_labels = DataFrame(np.random.randn(3, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]])
# the first row
rs = mi_labels.loc['i']
with catch_warnings(record=True):
xp = mi_labels.ix['i']
tm.assert_frame_equal(rs, xp)
# 2nd (last) columns
rs = mi_labels.loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# corner column
rs = mi_labels.loc['j'].loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix['j'].ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# with a tuple
rs = mi_labels.loc[('i', 'X')]
with catch_warnings(record=True):
xp = mi_labels.ix[('i', 'X')]
tm.assert_frame_equal(rs, xp)
rs = mi_int.loc[4]
with catch_warnings(record=True):
xp = mi_int.ix[4]
tm.assert_frame_equal(rs, xp)
# missing label
with pytest.raises(KeyError, match=r"^2L?$"):
mi_int.loc[2]
with catch_warnings(record=True):
# GH 21593
with pytest.raises(KeyError, match=r"^2L?$"):
mi_int.ix[2]
def test_loc_multiindex_indexer_none(self):
# GH6788
# multi-index indexer is None (meaning take all)
attributes = ['Attribute' + str(i) for i in range(1)]
attribute_values = ['Value' + str(i) for i in range(5)]
index = MultiIndex.from_product([attributes, attribute_values])
df = 0.1 * np.random.randn(10, 1 * 5) + 0.5
df = DataFrame(df, columns=index)
result = df[attributes]
tm.assert_frame_equal(result, df)
# GH 7349
# loc with a multi-index seems to be doing fallback
df = DataFrame(np.arange(12).reshape(-1, 1),
index=MultiIndex.from_product([[1, 2, 3, 4],
[1, 2, 3]]))
expected = df.loc[([1, 2], ), :]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_incomplete(self):
# GH 7399
# incomplete indexers
s = Series(np.arange(15, dtype='int64'),
MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.loc[:, 'a':'c']
result = s.loc[0:4, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[:4, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[0:, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
# GH 7400
# multiindexer gettitem with list of indexers skips wrong element
s = Series(np.arange(15, dtype='int64'),
MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.iloc[[6, 7, 8, 12, 13, 14]]
result = s.loc[2:4:2, 'a':'c']
tm.assert_series_equal(result, expected)
def test_get_loc_single_level(self, single_level_multiindex):
single_level = single_level_multiindex
s = Series(np.random.randn(len(single_level)),
index=single_level)
for k in single_level.values:
s[k]
def test_loc_getitem_int_slice(self):
# GH 3053
# loc should treat integer slices like label slices
index = MultiIndex.from_tuples([t for t in itertools.product(
[6, 7, 8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8, :]
expected = df
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([t
for t in itertools.product(
[10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30, :]
expected = df.iloc[2:]
tm.assert_frame_equal(result, expected)
# doc examples
result = df.loc[10, :]
expected = df.iloc[0:2]
expected.index = ['a', 'b']
tm.assert_frame_equal(result, expected)
result = df.loc[:, 10]
# expected = df.ix[:,10] (this fails)
expected = df[10]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'indexer_type_1',
(list, tuple, set, slice, np.ndarray, Series, Index))
@pytest.mark.parametrize(
'indexer_type_2',
(list, tuple, set, slice, np.ndarray, Series, Index))
def test_loc_getitem_nested_indexer(self, indexer_type_1, indexer_type_2):
# GH #19686
# .loc should work with nested indexers which can be
# any list-like objects (see `pandas.api.types.is_list_like`) or slices
def convert_nested_indexer(indexer_type, keys):
if indexer_type == np.ndarray:
return np.array(keys)
if indexer_type == slice:
return slice(*keys)
return indexer_type(keys)
a = [10, 20, 30]
b = [1, 2, 3]
index = MultiIndex.from_product([a, b])
df = DataFrame(
np.arange(len(index), dtype='int64'),
index=index, columns=['Data'])
keys = ([10, 20], [2, 3])
types = (indexer_type_1, indexer_type_2)
# check indexers with all the combinations of nested objects
# of all the valid types
indexer = tuple(
convert_nested_indexer(indexer_type, k)
for indexer_type, k in zip(types, keys))
result = df.loc[indexer, 'Data']
expected = Series(
[1, 2, 4, 5], name='Data',
index=MultiIndex.from_product(keys))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer, is_level1, expected_error', [
([], False, None), # empty ok
(['A'], False, None),
(['A', 'D'], False, None),
(['D'], False, r"\['D'\] not in index"), # not any values found
(pd.IndexSlice[:, ['foo']], True, None),
(pd.IndexSlice[:, ['foo', 'bah']], True, None)
])
def test_loc_getitem_duplicates_multiindex_missing_indexers(indexer, is_level1,
expected_error):
# GH 7866
# multi-index slicing with missing indexers
idx = MultiIndex.from_product([['A', 'B', 'C'],
['foo', 'bar', 'baz']],
names=['one', 'two'])
s = Series(np.arange(9, dtype='int64'), index=idx).sort_index()
if indexer == []:
expected = s.iloc[[]]
elif is_level1:
expected = Series([0, 3, 6], index=MultiIndex.from_product(
[['A', 'B', 'C'], ['foo']], names=['one', 'two'])).sort_index()
else:
exp_idx = MultiIndex.from_product([['A'], ['foo', 'bar', 'baz']],
names=['one', 'two'])
expected = Series(np.arange(3, dtype='int64'),
index=exp_idx).sort_index()
if expected_error is not None:
with pytest.raises(KeyError, match=expected_error):
s.loc[indexer]
else:
result = s.loc[indexer]
tm.assert_series_equal(result, expected)
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
@pytest.mark.parametrize('indexer', [
lambda s: s.loc[[(2000, 3, 10), (2000, 3, 13)]],
lambda s: s.ix[[(2000, 3, 10), (2000, 3, 13)]]
])
def test_series_loc_getitem_fancy(
multiindex_year_month_day_dataframe_random_data, indexer):
s = multiindex_year_month_day_dataframe_random_data['A']
expected = s.reindex(s.index[49:51])
result = indexer(s)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns_indexer', [
([], slice(None)),
(['foo'], [])
])
def test_loc_getitem_duplicates_multiindex_empty_indexer(columns_indexer):
# GH 8737
# empty indexer
multi_index = MultiIndex.from_product((['foo', 'bar', 'baz'],
['alpha', 'beta']))
df = DataFrame(np.random.randn(5, 6), index=range(5), columns=multi_index)
df = df.sort_index(level=0, axis=1)
expected = DataFrame(index=range(5), columns=multi_index.reindex([])[0])
result = df.loc[:, columns_indexer]
tm.assert_frame_equal(result, expected)
def test_loc_getitem_duplicates_multiindex_non_scalar_type_object():
# regression from < 0.14.0
# GH 7914
df = DataFrame([[np.mean, np.median], ['mean', 'median']],
columns=MultiIndex.from_tuples([('functs', 'mean'),
('functs', 'median')]),
index=['function', 'name'])
result = df.loc['function', ('functs', 'mean')]
expected = np.mean
assert result == expected
def test_loc_getitem_tuple_plus_slice():
# GH 671
df = DataFrame({'a': np.arange(10),
'b': np.arange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)}
).set_index(['a', 'b'])
expected = df.loc[0, 0]
result = df.loc[(0, 0), :]
tm.assert_series_equal(result, expected)
def test_loc_getitem_int(frame_random_data_integer_multi_index):
df = frame_random_data_integer_multi_index
result = df.loc[1]
expected = df[-3:]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
def test_loc_getitem_int_raises_exception(
frame_random_data_integer_multi_index):
df = frame_random_data_integer_multi_index
with pytest.raises(KeyError, match=r"^3L?$"):
df.loc[3]
def test_loc_getitem_lowerdim_corner(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
# test setup - check key not in dataframe
with pytest.raises(KeyError, match=r"^11L?$"):
df.loc[('bar', 'three'), 'B']
# in theory should be inserting in a sorted space????
df.loc[('bar', 'three'), 'B'] = 0
expected = 0
result = df.sort_index().loc[('bar', 'three'), 'B']
assert result == expected
| bsd-3-clause |