repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
xubenben/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 150 | 3651 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_,ms2.cluster_centers_)
assert_array_equal(ms1.labels_,ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
nguyentu1602/statsmodels | examples/run_all.py | 34 | 1740 | """run all examples to make sure we don't get an exception
Note:
If an example contaings plt.show(), then all plot windows have to be closed
manually, at least in my setup.
uncomment plt.show() to show all plot windows
"""
from __future__ import print_function
from statsmodels.compat import input
stop_on_error = True
filelist = ['example_glsar.py', 'example_wls.py', 'example_gls.py',
'example_glm.py', 'example_ols_tftest.py', # 'example_rpy.py',
'example_ols.py', 'example_rlm.py',
'example_discrete.py', 'example_predict.py',
'example_ols_table.py',
# time series
'tsa/ex_arma2.py', 'tsa/ex_dates.py']
if __name__ == '__main__':
#temporarily disable show
import matplotlib.pyplot as plt
plt_show = plt.show
def noop(*args):
pass
plt.show = noop
msg = """Are you sure you want to run all of the examples?
This is done mainly to check that they are up to date.
(y/n) >>> """
cont = input(msg)
if 'y' in cont.lower():
for run_all_f in filelist:
try:
print('\n\nExecuting example file', run_all_f)
print('-----------------------' + '-' * len(run_all_f))
exec(open(run_all_f).read())
except:
# f might be overwritten in the executed file
print('**********************' + '*' * len(run_all_f))
print('ERROR in example file', run_all_f)
print('**********************' + '*' * len(run_all_f))
if stop_on_error:
raise
# reenable show after closing windows
plt.close('all')
plt.show = plt_show
plt.show()
| bsd-3-clause |
reichelu/copasul | src/copasul_plot.py | 1 | 30100 |
# author: Uwe Reichel, Budapest, 2016
import mylib as myl
import matplotlib as mpl
import matplotlib.pyplot as plt
import re
import sys
import numpy as np
import math
import copy as cp
# browsing through copa
# calling plot_main for selected feature sets
# do clst first (single plot)
def plot_browse(copa):
c = copa['data']
opt = copa['config']
o = opt['plot']['browse']
# not for online usage
if o['time'] != 'final': return
### clustering ###########
if o['type']['clst'] and o['type']['clst']['contours']:
plot_main({'call':'browse','state':'final','type':'clst','set':'contours','fit':copa},opt)
### stylization ##########
## domains
for x in sorted(o['type'].keys()):
if x=='clst': continue
## featsets
for y in o['type'][x]:
if not o['type'][x][y]: continue
## files
for ii in myl.numkeys(c):
## channels
for i in myl.numkeys(c[ii]):
# check grouping constraints ("and"-connected)
if "grp" in o:
do_plot = True
for g in o["grp"]:
if ((g not in c[ii][i]["grp"]) or
(c[ii][i]["grp"][g] != o["grp"][g])):
do_plot = False
break
if not do_plot:
continue
plot_browse_channel(copa,x,y,ii,i)
# channelwise processing of plot_browse()
# IN:
# copa
# typ type 'glob'|'loc'|'rhy_f0'|'complex'|...
# s set 'decl'|'acc'|'rhy'|'superpos'|'gestalt'|'bnd'...
# ii fileIdx
# i channelIdx
def plot_browse_channel(copa,typ,s,ii,i):
c = copa['data']
po = copa['config']['plot']['browse']
# time, f0, f0-residual
t = c[ii][i]['f0']['t']
y = c[ii][i]['f0']['y']
if 'r' in c[ii][i]['f0']:
r = c[ii][i]['f0']['r']
else:
r = c[ii][i]['f0']['y']
# for all complex plots range over global segments
if typ == 'complex':
if re.search('^bnd',s):
dom = 'bnd'
else:
dom = 'glob'
else:
dom = typ
# file stem to be displayed
myStm = c[ii][i]['fsys']['f0']['stm']
## segments
for j in myl.numkeys(c[ii][i][dom]):
# verbose to find plot again
if myl.ext_true(po,'verbose'):
print("file_i={}, channel_i={}, segment_i={}".format(ii,i,j))
# skip all but certain segment is reached
if ('single_plot' in po) and myl.ext_true(po['single_plot'],'active'):
if (ii != po['single_plot']['file_i'] or
i != po['single_plot']['channel_i'] or
j != po['single_plot']['segment_i']):
continue
if typ != 'complex':
if re.search('^rhy_',typ):
## tiers
for k in myl.numkeys(c[ii][i][dom][j]):
myFit = c[ii][i][dom][j][k][s]
myInfx = "{}-{}-{}-{}".format(ii,i,c[ii][i][dom][j][k]['tier'],k)
myTim = c[ii][i][dom][j][k]['t']
myTier = c[ii][i][dom][j][k]['tier']
myLoc = "{}:{}:[{} {}]".format(myStm,myTier,myTim[0],myTim[1])
obj = {'call':'browse','state':'final',
'fit':myFit,'type':typ,'set':s,
'infx':myInfx,'local':myLoc}
## sgc on
#sgc_rhy_wrp(obj,c[ii][i]['rhy_en'][j][k][s],
# c[ii][i]['rhy_f0'][j][k][s],copa['config'])
#continue
## sgc off
plot_main(obj,copa['config'])
else:
myFit = cp.deepcopy(c[ii][i][dom][j][s])
myLoc = plot_loc(c,ii,i,dom,j,myStm)
if dom == "glob" and s == "decl" and "eou" in c[ii][i][dom][j]:
myFit["eou"] = c[ii][i][dom][j]["eou"]
if s=='acc':
ys = myl.copa_yseg(copa,dom,ii,i,j,t,r)
else:
ys = myl.copa_yseg(copa,dom,ii,i,j,t,y)
obj = {'call':'browse','state':'final',
'fit':myFit,'type':typ,'set':s,'y':ys,
'infx':"{}-{}-{}".format(ii,i,j),'local':myLoc}
plot_main(obj,copa['config'])
else:
if re.search('^bnd',s):
# get key depending on s
if s=='bnd':
z='std'
elif s=='bnd_win':
z='win'
else:
z='trend'
## tiers
for k in myl.numkeys(c[ii][i][dom][j]):
if z not in c[ii][i][dom][j][k] or 'plot' not in c[ii][i][dom][j][k][z]:
continue
myObj = c[ii][i][dom][j][k][z]['plot']
if 'lab' in c[ii][i][dom][j][k][z]:
myLab = c[ii][i][dom][j][k][z]['lab']
else:
myLab = ''
myInfx = "{}-{}-{}-{}".format(ii,i,c[ii][i][dom][j][k]['tier'],k)
myTim = c[ii][i][dom][j][k]['t']
myTier = c[ii][i][dom][j][k]['tier']
myLoc = "{}:{}:[{} {}]".format(myStm,myTier,myTim[0],myTim[1])
obj = {'call': 'browse', 'state': 'final',
'type': 'complex', 'set': s,
'fit': myObj['fit'],
'y': myObj['y'],
't': myObj['t'],
'infx': myInfx,
'local': myLoc}
plot_main(obj,copa['config'])
else:
myLoc = plot_loc(c,ii,i,dom,j,myStm)
if 'lab' in c[ii][i][dom][j]:
myLab = c[ii][i][dom][j]['lab']
else:
myLab = ''
obj = {'call':'browse','state':'final','fit':copa,'type':'complex',
'set':s,'i':[ii,i,j],'infx':"{}-{}-{}".format(ii,i,j),'local':myLoc,
'lab': myLab, 't_glob': c[ii][i]['glob'][j]['to'], 'stm': myStm}
plot_main(obj,copa['config'])
return
def plot_loc(c,ii,i,dom,j,myStm):
myTim = c[ii][i][dom][j]['t']
if 'tier' in c[ii][i][dom][j]:
myTier = c[ii][i][dom][j]['tier']
myLoc = "{}:{}:[{} {}]".format(myStm,myTier,myTim[0],myTim[1])
else:
myLoc = "{}:[{} {}]".format(myStm,myTim[0],myTim[1])
return myLoc
# wrapper around plotting
# IN:
# obj dict depending on caller function
# 'call' - 'browse'|'grp'
# 'browse': iterate through copa file x channel x segment. 1 Plot per iter step
# + 1 plot for clustering
# 'grp': mean contours etc according to groupings, 1 Plot per Group)
# 'state' - 'online'|'final'|'group' for unique file names
# 'type' - 'glob'|'loc'|'clst'|'rhy_f0'|'rhy_en'|'complex'
# 'set' - 'decl'|'acc'|'rhy'|'superpos'|'gestalt'
# ... restrictions in combining 'type' and 'set'
# ... for several types ('clst', 'rhy_*') only one set is available,
# thus set spec just serves the purpose of uniform processing
# but is not (yet) used in plot_* below
# 'fit' - fit object depending on styl domain (can be entire copa dict)
# ... depending on calling function, e.g. to generate unique file names
# opt copa['config']
def plot_main(obj,opt):
po = opt['plot']
if plot_doNothing(obj,opt): return
if obj['call']=='browse':
# display location
if 'local' in obj:
print(obj['local'])
elif 'infx' in obj:
print(obj['infx'])
if obj['type']=='clst':
fig = plot_clst(obj,opt)
elif re.search('^(glob|loc)$',obj['type']):
fig = plot_styl_cont(obj,opt['fsys']['pic'])
elif re.search('^rhy',obj['type']):
fig = plot_styl_rhy(obj,opt)
elif obj['type']=='complex':
if re.search('(superpos|gestalt)',obj['set']):
fig = plot_styl_complex(obj,opt)
elif re.search('^bnd',obj['set']):
fig = plot_styl_bnd(obj,opt)
# save plot
if (po['browse']['save'] and fig):
# output file name
fs = opt['fsys']['pic']
fb = "{}/{}".format(fs['dir'],fs['stm'])
if 'infx' in obj:
fo = "{}_{}_{}_{}_{}.png".format(fb,obj['state'],obj['type'],obj['set'],obj['infx'])
else:
fo = "{}_{}_{}_{}.png".format(fb,obj['state'],obj['type'],obj['set'])
fig.savefig(fo)
elif obj['call']=='grp':
plot_grp(obj,opt)
return
# checks type-set compliance
# IN:
# obj - object passed to plot_main() by caller
# opt - copa[config]
# OUT:
# True if type/set not compliant, else False
def plot_doNothing(obj,opt):
if not opt['navigate']['do_plot']:
return True
po = opt['plot']
# final vs online
if ((obj['call'] != 'grp') and (po[obj['call']]['time'] != obj['state'])):
return True
# type not specified
if not po[obj['call']]['type'][obj['type']]:
return True
# type/set not compliant
if obj['set'] not in po[obj['call']]['type'][obj['type']]:
return True
# type-set set to 0 in config
if not po[obj['call']]['type'][obj['type']][obj['set']]:
return True
#### customized func to skip specified data portions ##########
# ! comment if not needed !
#return plot_doNothing_custom_senta_coop(obj,opt)
###############################################################
return False
def plot_doNothing_custom_senta_coop(obj,opt):
stm, t = '16-02-116-216-Coop', 33.4
if not obj['stm'] == stm:
return True
if obj['t_glob'][0] < t:
return True
return False
def plot_doNothing_custom_senta_comp(obj,opt):
if not obj['stm'] == '10-04-110-210-Comp':
return True
if obj['t_glob'][0] < 98.3:
return True
return False
def plot_doNothing_custom2(obj,opt):
cond = 'Coop'
if not re.search(cond,obj['local']):
return True
if not re.search('RW',obj['lab']):
return True
return False
def plot_doNothing_custom(obj,opt):
if not re.search('hun003',obj['local']):
return True
return False
# to be customized by user: criteria to skip certain segments
def plot_doNothing_custom1(obj,opt):
if (('local' in obj) and (not re.search('fra',obj['local']))):
return True
return False
# plot 3 declination objects underlying boundary features
def plot_styl_bnd(obj,opt):
if 'fit' not in obj:
return
# new figure
fig = plot_newfig()
# segment a.b
bid = plot_styl_cont({'fit':obj['fit']['ab'],'type':'glob','set':'decl','y':obj['y']['ab'],
't':obj['t']['ab'],'tnrm':False,'show':False,'newfig':False},opt)
# segment a
bid = plot_styl_cont({'fit':obj['fit']['a'],'type':'glob','set':'decl',
't':obj['t']['a'],'tnrm':False,'show':False,'newfig':False},opt)
# segment b
bid = plot_styl_cont({'fit':obj['fit']['b'],'type':'glob','set':'decl',
't':obj['t']['b'],'tnrm':False,'show':False,'newfig':False},opt)
plt.show()
return fig
# plotting mean contours by grouping
# only supported for glob|loc
# principally not supported for varying number of variables, since
# then mean values cannot be derived (e.g. gestalt: n AGs per IP)
def plot_grp(obj,opt):
dom = obj['type']
mySet = obj['set']
if not re.search('^(glob|loc)$',dom):
sys.exit("plot by grouping is supported only for glob and loc feature set")
c = obj['fit']['data']
h = plot_harvest(c,dom,mySet,opt['plot']['grp']['grouping'])
# output file stem
fb = "{}/{}".format(opt['fsys']['pic']['dir'],opt['fsys']['pic']['stm'])
# normalized time
t = np.linspace(opt['styl'][dom]['nrm']['rng'][0],opt['styl'][dom]['nrm']['rng'][1],100)
## over groupings
# pickle file storing the underlying data
# as dict:
# 'x': normalized time
# 'ylim': [minF0, maxF0] over all groupings for unifying the plots ylims
# 'y':
# myGroup:
# if mySet=="acc":
# F0 array
# if mySet=="decl":
# 'bl'|'ml'|'tl':
# F0 array
# getting ylim over all groups for unified yrange
fo_data = "{}_{}_{}_{}_grpPlotData.pickle".format(fb,obj['call'],obj['type'],obj['set'])
plot_data = {'x': t, 'ylim': [], 'y': {}}
all_y = []
for x in h:
if mySet=='acc':
y = np.polyval(np.mean(h[x],axis=0),t)
plot_data["y"][x] = y
all_y.extend(y)
elif mySet=='decl':
plot_data["y"][x] = {}
for reg in ['bl','ml','tl']:
y = np.polyval(np.mean(h[x][reg],axis=0),t)
plot_data["y"][x][reg] = y
all_y.extend(y)
plot_data['ylim'] = [np.min(all_y)-0.2, np.max(all_y)+0.2]
# again over all groups, this time plotting
for x in h:
fo = "{}_{}_{}_{}_{}.png".format(fb,obj['call'],obj['type'],obj['set'],x)
if mySet=='acc':
fig = plot_styl_cont({'fit':{'tn':t, 'y':plot_data["y"][x]},
'type':dom,'set':mySet,'show':False,
'ylim':plot_data['ylim']},opt)
fig.savefig(fo)
plt.close()
elif mySet=='decl':
o = {'fit':{'tn':t},'type':dom,'set':mySet,'show':False,'ylim':plot_data['ylim']}
for reg in ['bl','ml','tl']:
o['fit'][reg]={}
o['fit'][reg]['y'] = plot_data["y"][x][reg]
fig = plot_styl_cont(o,opt)
fig.savefig(fo)
plt.close()
myl.output_wrapper(plot_data,fo_data,'pickle')
# returns dict with feature matrices of glob|loc etc for each grouping key
# IN:
# c copa['data']
# dom 'glob'|'loc'|...
# mySet 'acc'|'decl'|...
# grp list of grouping fields in opt['plot']['grp']['grouping']
# OUT:
# h[myGrpKey] -> myFeatMatrix or mySubdict
# ... mySet=='decl' (for dom: glob and loc)
# -> ['bl'|'ml'|'tl'] -> [coefMatrix]
# mySet=='acc'
# -> [coefMatrix]
def plot_harvest(c,dom,mySet,grp):
h={}
# files
for ii in myl.numkeys(c):
# channels
for i in myl.numkeys(c[ii]):
# segments
for j in myl.numkeys(c[ii][i][dom]):
# grouping key
gk = copa_grp_key(grp,c,dom,ii,i,j)
# decl set (same for dom='glob'|'loc')
if mySet == 'decl':
# new key
if gk not in h:
h[gk]={}
for x in ['bl','ml','tl']:
h[gk][x]=np.asarray([])
for x in ['bl','ml','tl']:
h[gk][x] = myl.push(h[gk][x],c[ii][i][dom][j][mySet][x]['c'])
elif mySet == 'acc':
# new key
if gk not in h:
h[gk]=np.asarray([])
h[gk] = myl.push(h[gk],c[ii][i][dom][j][mySet]['c'])
return h
# grouping key string
# IN:
# grp list of grouping fields
# c copa['data']
# dom 'glob'|'loc'...
# ii fileIdx
# i channelIdx
# j segmentIdx
# OUT:
# string of groupingValues separated by '_'
# variable values can be taken from:
# 1. 'file': ii
# 2. 'channel': i
# 3. 'lab'|'lab_(pnt|int)': c[ii][i][dom][j]['lab']
# 4. 'lab_next': c[ii][i][dom][j+1]['lab'] if available, else '#'
# 5. myVar: c[ii][i]['grp'][myVar]
# includes no grouping (if grp==[])
def copa_grp_key(grp,c,dom,ii,i,j):
key = []
for x in grp:
if x=='file':
key.append(str(ii))
elif x=='channel':
key.append(str(i))
else:
if (x=='lab' or re.search('lab_(pnt|int)',x)):
z = c[ii][i][dom][j]
elif x=='lab_next':
if j+1 in c[ii][i][dom]:
z = c[ii][i][dom][j+1]
else:
z = {}
else:
z = c[ii][i]['grp']
if x in z:
key.append(str(z[x]))
else:
key.append('#')
return "_".join(key)
# init new figure with onclick->next, keypress->exit
# figsize can be customized
# IN:
# fs tuple <()>
# OUT:
# figure object
# OUT:
# figureHandle
def plot_newfig(fs=()):
if len(fs)==0:
fig = plt.figure()
else:
fig = plt.figure(figsize=fs)
cid1 = fig.canvas.mpl_connect('button_press_event', onclick_next)
cid2 = fig.canvas.mpl_connect('key_press_event', onclick_exit)
return fig
def plot_newfig_big():
fig = plt.figure(figsize=(15,15))
cid1 = fig.canvas.mpl_connect('button_press_event', onclick_next)
cid2 = fig.canvas.mpl_connect('key_press_event', onclick_exit)
return fig
def plot_newfig_verybig():
fig = plt.figure(figsize=(25,25))
cid1 = fig.canvas.mpl_connect('button_press_event', onclick_next)
cid2 = fig.canvas.mpl_connect('key_press_event', onclick_exit)
return fig
# plots IP-AG superposition or gestalt
# IN:
# obj dict
# 'fit' -> copa
# 'i' -> [myFileIdx myChannelIdx myGlobSegIdx]
# 'infx' -> myFileIdx_myChannelIdx_myGlobSegIdx
# opt
def plot_styl_complex(obj,opt):
c = obj['fit']['data']
ii,i,j = obj['i']
# global segment
gs = c[ii][i]['glob'][j]
# orig time and f0
t = c[ii][i]['f0']['t']
y = c[ii][i]['f0']['y']
tg = c[ii][i]['glob'][j]['t']
yi = myl.find_interval(t,tg)
ys = y[yi]
# unnormalized time
ttg = np.linspace(tg[0],tg[1],len(ys))
# new figure
fig = plot_newfig()
# plot F0 and global styl
bid = plot_styl_cont({'fit':gs['decl'],'type':'glob','set':'decl','y':ys,
't':ttg,'tnrm':False,'show':False,'newfig':False},opt)
# register to be added to loc contours
reg = gs['decl'][opt['styl']['register']]['y']
# over local segments
for jj in c[ii][i]['glob'][j]['ri']:
ls = cp.deepcopy(c[ii][i]['loc'][jj])
tl = ls['t'][0:2]
ttl = np.linspace(tl[0],tl[1],len(ls['acc']['y']))
if obj['set']=='superpos':
# add/denormFor register
if re.search('^(bl|ml|tl)$',opt['styl']['register']):
# part in local segment
regs = np.asarray(reg[myl.find_interval(ttg,tl)])
while len(regs) < len(ttl):
regs = myl.push(regs,regs[-1])
ls['acc']['y'] = ls['acc']['y']+regs
elif opt['styl']['register'] == 'rng':
bl_seg = gs['decl']['bl']['y'][myl.find_interval(ttg,tl)]
tl_seg = gs['decl']['tl']['y'][myl.find_interval(ttg,tl)]
yy = ls['acc']['y']
zz = np.asarray([])
for u in range(len(bl_seg)):
zz = myl.push(zz,bl_seg[u]+yy[u]*(tl_seg[u]-bl_seg[u]))
ls['acc']['y'] = zz
bid = plot_styl_cont({'fit':ls['acc'],'type':'loc','set':'acc','t':ttl,
'tnrm':False,'show':False,'newfig':False},opt)
else:
ls['decl']['t']=np.linspace(ls['t'][0],ls['t'][1],len(ls['acc']['y']))
bid = plot_styl_cont({'fit':ls['decl'],'type':'loc','set':'decl','t':ttl,
'tnrm':False,'show':False,'newfig':False},opt)
plt.show()
return fig
# plotting register and local contours
# IN:
# OBJ
# .fit dict returned by styl_decl_fit or styl_loc_fit
# .type 'glob'|'loc'
# .y original y values
# OPT copa['config']
def plot_styl_cont(obj,opt):
# to allow embedded calls (e.g. complex-superpos/gestalt)
for x in ['show','newfig','tnrm']:
if x not in obj:
obj[x]=True
if obj['newfig']:
fig = plot_newfig()
else:
fig = 0
if obj['tnrm']:
tn = obj['fit']['tn']
else:
tn = obj['t']
# type=='glob' --> 'set'=='decl'
if obj['set']=='decl':
bl = obj['fit']['bl']['y']
ml = obj['fit']['ml']['y']
tl = obj['fit']['tl']['y']
tbl,ybl = phal(tn,bl)
tml,yml = phal(tn,ml)
ttl,ytl = phal(tn,tl)
# + original f0
if 'y' in obj:
myTn,myY = phal(tn,obj['y'])
plt.plot(myTn,myY,'k.',linewidth=1)
# color specs
if ("plot" in opt and myl.ext_true(opt['plot'],'color')) or myl.ext_true(opt,'color'):
cc = ['-g','-r','-c']
lw = [4,4,4]
else:
#cc = ['--k','-k','--k']
cc = ['-k','-k','-k']
if obj['type']=='glob':
#cc = ['--k','-k','--k']
lw = [3,4,3]
else:
#cc = ['-k','-k','-k']
lw = [5,5,5]
plt.plot(tbl,ybl,cc[0],linewidth=lw[0])
plt.plot(tml,yml,cc[1],linewidth=lw[1])
plt.plot(ttl,ytl,cc[2],linewidth=lw[2])
#plt.plot(tbl,ybl,cc[0],tml,yml,cc[1],ttl,ytl,cc[2],linewidth=4)
# plot line crossings #!v
#if "eou" in obj["fit"]:
# z = obj["fit"]["eou"]
# plt.plot([z["tl_ml_cross_t"],z["tl_bl_cross_t"],z["ml_bl_cross_t"]],
# [z["tl_ml_cross_f0"],z["tl_bl_cross_f0"],z["ml_bl_cross_f0"]],"or",linewidth=20)
else:
if 'y' in obj:
myTn,myY = phal(tn,obj['y'])
plt.plot(myTn,myY,'k.',linewidth=1)
myTn,myY = phal(tn,obj['fit']['y'])
# color specs
if ("plot" in opt and myl.ext_true(opt['plot'],'color')) or myl.ext_true(opt,'color'):
cc = '-b'
else:
cc = '-k'
plt.plot(myTn,myY,cc,linewidth=6)
# ylim
if 'ylim' in obj:
plt.ylim((obj['ylim'][0], obj['ylim'][1]))
if obj['tnrm']:
plt.xlabel('time (nrm)')
else:
plt.xlabel('time (s)')
plt.ylabel('f (ST)')
if obj['show']:
plt.show()
return fig
# hacky length adjustment
def phal(t,y):
return myl.hal(cp.deepcopy(t),cp.deepcopy(y))
# speech rhythm verbose
# x - frequency
# y - coef
def plot_styl_rhy(obj,opt):
rhy = obj['fit']
if len(rhy['f'])==0 or len(rhy['c'])==0:
return
rb = opt['styl'][obj['type']]['rhy']['wgt']['rb']
# color
if opt['plot']['color']:
doco = True
else:
doco = False
#if 'SYL_2' in rhy['wgt']: #!csl
# del rhy['wgt']['SYL_2'] #!csl
fig, spl = plt.subplots(len(rhy['wgt'].keys()),1,squeeze=False)
cid1 = fig.canvas.mpl_connect('button_press_event', onclick_next)
cid2 = fig.canvas.mpl_connect('key_press_event', onclick_exit)
fig.subplots_adjust(hspace=0.8)
# domain-influence window
i=0
c_sum = sum(abs(rhy['c']))
# tiers
for x in sorted(rhy['wgt'].keys()):
if doco:
spl[i,0].stem(rhy['f'],abs(rhy['c'])/c_sum)
else:
mla,sla,bla = spl[i,0].stem(rhy['f'],abs(rhy['c'])/c_sum, '-.')
plt.setp(sla, 'color', 'k', 'linewidth', 2)
tit = x
#tit = 'influence on f0' #!csl
#spl[i,0].title.set_text(tit, fontsize=18)
spl[i,0].set_title(tit, fontsize=18)
r = rhy['wgt'][x]['rate']
b = [max([0,r-rb]), r+rb]
w = myl.intersect(myl.find(rhy['f'],'>=',b[0]),
myl.find(rhy['f'],'<=',b[1]))
if len(w)==0:
continue
ml,sl,bl = spl[i,0].stem(rhy['f'][w],abs(rhy['c'][w])/c_sum)
if doco:
plt.setp(sl, 'color', 'r', 'linewidth', 3)
else:
plt.setp(sl, 'color', 'k', 'linewidth', 4)
# local maxima (green lines)
#if 'f_lmax' in rhy:
# for fm in rhy['f_lmax']:
# spl[i,0].plot([fm,fm],[0,rhy['c_cog']/c_sum],'-g',linewidth=5)
# 1st spectral moment (thick black vertical line)
spl[i,0].plot([rhy['sm'][0],rhy['sm'][0]],[0,rhy['c_cog']/c_sum],'-k',linewidth=5)
#plt.ylim([0,0.4]) #!csl
spl[i,0].set_xlabel('f (Hz)', fontsize=18)
spl[i,0].set_ylabel('|coef|', fontsize=18)
i+=1
plt.show()
return fig
# IN:
# copa dict
# opt copa['config']
# OUT:
# fig object
# pickle file output to "opt[fsys][dir]/opt[fsys][stm]_clstPlotData.pickle"
# that stores dict of the following structure:
# {"glob"|"loc"}
# "ylim": ylim for uniform y-range in all subplots
# "x": array of normalized time
# "y":
# myClassIndex: F0 Array
def plot_clst(obj,opt):
copa = obj['fit']
## return
if (('cntr' not in copa['clst']['glob']) or
('cntr' not in copa['clst']['loc'])):
print('no clustering result to plot. Apply clustering first!')
return False
## color
if opt['plot']['color']:
cc = 'b'
else:
cc = 'k'
## glob
# time
rg = copa['config']['styl']['glob']['nrm']['rng'];
tg = np.linspace(rg[0],rg[1],100)
# coefs
cg = copa['clst']['glob']['cntr']
## loc
# time
rl = copa['config']['styl']['loc']['nrm']['rng'];
tl = np.linspace(rl[0],rl[1],100)
# coefs
cl = copa['clst']['loc']['cntr']
# number of subplots/rows, columns
nsp = len(cl)+1
nrow, ncol = nn_subplots(nsp)
fig, spl = plt.subplots(nrow,ncol) #,figsize=(15,60))
cid1 = fig.canvas.mpl_connect('button_press_event', onclick_next)
cid2 = fig.canvas.mpl_connect('key_press_event', onclick_exit)
#fig.tight_layout()
fig.subplots_adjust(hspace=0.8)
i_row=0
i_col=0
# glob and loc y values
(yg, yl) = ([], [])
for i in range(len(cg)):
yg = myl.push(yg,np.polyval([cg[i,:],0],tg))
for i in range(len(cl)):
yl = myl.push(yl,np.polyval(cl[i,:],tl))
ylim_g = [int(math.floor(np.min(yg))),
int(math.ceil(np.max(yg)))]
ylim_l = [int(math.floor(np.min(np.min(yl)))),
int(math.ceil(np.max(np.max(yl))))]
plot_data = {"glob": {"ylim": ylim_g, "x": tg, "y": {}},
"loc": {"ylim": ylim_l, "x": tl, "y": {}}}
for i in range(nsp):
if i==0:
for j in range(len(yg)):
spl[i_row,i_col].plot(tg,yg[j,:],cc,label="{}".format(j+1))
plot_data["glob"]["y"][j] = yg[j,:]
spl[i_row,i_col].set_title("g_*")
spl[i_row,i_col].set_ylim(ylim_g)
spl[i_row,i_col].set_xlabel('time (nrm)')
spl[i_row,i_col].set_ylabel('f (ST)')
spl[i_row,i_col].legend(loc='best', fontsize=8)
else:
spl[i_row,i_col].plot(tl,yl[i-1,:],cc)
spl[i_row,i_col].set_title("l_{}".format(i))
spl[i_row,i_col].set_ylim(ylim_l)
plot_data["loc"]["y"][i-1] = yl[i-1,:]
if i>1:
spl[i_row,i_col].set_xticks([])
spl[i_row,i_col].set_yticks([])
if i_col==ncol-1:
i_row+=1
i_col=0
else:
i_col+=1
fo_data = "{}/{}_clstPlotData.pickle".format(opt["fsys"]["pic"]["dir"],
opt["fsys"]["pic"]["stm"])
myl.output_wrapper(plot_data,fo_data,"pickle")
plt.show()
return fig
# returns optimal nrow, ncol depending on num of subplots (inpur)
def nn_subplots(n):
if n <= 4:
ncol = 2
nrow = 2
else:
ncol = 3
nrow = int(math.ceil(n/ncol))
return nrow, ncol
# klick on plot -> next one
def onclick_next(event):
plt.close()
# press key -> exit
def onclick_exit(event):
sys.exit()
############# customized functions for publications etc #################
# for slovak game corpus visualization only!
# syl rate influence on energy and f0 contour
# tier SYL_1 only
def sgc_rhy_wrp(obj,rhy_en,rhy_f0,opt):
if not re.search('turns',obj['local']):
return
fig, spl = plt.subplots(1,2)
cid1 = fig.canvas.mpl_connect('button_press_event', onclick_next)
cid2 = fig.canvas.mpl_connect('key_press_event', onclick_exit)
fig.subplots_adjust(hspace=0.8)
# energy
rhy = rhy_en
if len(rhy['f'])==0 or len(rhy['c'])==0:
return
c_sum = sum(abs(rhy['c']))
r = rhy['wgt']['SYL_1']['rate']
mla,sla,bla = spl[0].stem(rhy['f'],abs(rhy['c'])/c_sum, '-.')
plt.setp(sla, 'color', 'k', 'linewidth', 3)
spl[0].title.set_text('influence on energy')
rb = opt['styl']['rhy_en']['rhy']['wgt']['rb']
b = [max([0,r-rb]), r+rb]
w = myl.intersect(myl.find(rhy['f'],'>=',b[0]),
myl.find(rhy['f'],'<=',b[1]))
if len(w)==0:
return
ml,sl,bl = spl[0].stem(rhy['f'][w],abs(rhy['c'][w])/c_sum)
plt.setp(sl, 'color', 'k', 'linewidth', 4)
spl[0].set_xlabel('f (Hz)')
spl[0].set_ylabel('|coef|')
# f0
rhy = rhy_f0
if len(rhy['f'])==0 or len(rhy['c'])==0:
return
c_sum = sum(abs(rhy['c']))
r = rhy['wgt']['SYL_1']['rate']
mla,sla,bla = spl[1].stem(rhy['f'],abs(rhy['c'])/c_sum, '-.')
plt.setp(sla, 'color', 'k', 'linewidth', 3)
spl[1].title.set_text('influence on F0')
rb = opt['styl']['rhy_f0']['rhy']['wgt']['rb']
b = [max([0,r-rb]), r+rb]
w = myl.intersect(myl.find(rhy['f'],'>=',b[0]),
myl.find(rhy['f'],'<=',b[1]))
if len(w)==0:
return
ml,sl,bl = spl[1].stem(rhy['f'][w],abs(rhy['c'][w])/c_sum)
plt.setp(sl, 'color', 'k', 'linewidth', 4)
spl[1].set_xlabel('f (Hz)')
spl[1].set_ylabel('|coef|')
plt.show()
| mit |
pratapvardhan/scikit-learn | sklearn/preprocessing/data.py | 5 | 68513 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.fixes import bincount
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_* instead of deprecated *data_min*.
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_* instead of deprecated *data_max*.
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_* instead of deprecated *data_range*.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* is recommended instead of deprecated *std_*.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute ``std_`` will be removed in 0.19. Use ``scale_`` instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be in ``range(n_values[i])``
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
roxyboy/scikit-learn | sklearn/linear_model/coordinate_descent.py | 12 | 75078 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
check_input = 'check_input' not in params or params['check_input']
pre_fit = 'check_input' not in params or params['pre_fit']
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F',
copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if pre_fit:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False,
copy=False, Xy_precompute_order='F')
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, 'csc', dtype=np.float64,
order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False, Xy_precompute_order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False,
pre_fit=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
vinodkc/spark | python/pyspark/pandas/strings.py | 14 | 71898 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
String functions on pandas-on-Spark Series
"""
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Union,
TYPE_CHECKING,
cast,
no_type_check,
)
import numpy as np
import pandas as pd
from pyspark.sql.types import StringType, BinaryType, ArrayType, LongType, MapType
from pyspark.sql import functions as F
from pyspark.sql.functions import pandas_udf
from pyspark.pandas.spark import functions as SF
if TYPE_CHECKING:
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
class StringMethods(object):
"""String methods for pandas-on-Spark Series"""
def __init__(self, series: "ps.Series"):
if not isinstance(series.spark.data_type, (StringType, BinaryType, ArrayType)):
raise ValueError("Cannot call StringMethods on type {}".format(series.spark.data_type))
self._data = series
# Methods
def capitalize(self) -> "ps.Series":
"""
Convert Strings in the series to be capitalized.
Examples
--------
>>> s = ps.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.capitalize()
0 Lower
1 Capitals
2 This is a sentence
3 Swapcase
dtype: object
"""
@no_type_check
def pandas_capitalize(s) -> "ps.Series[str]":
return s.str.capitalize()
return self._data.pandas_on_spark.transform_batch(pandas_capitalize)
def title(self) -> "ps.Series":
"""
Convert Strings in the series to be titlecase.
Examples
--------
>>> s = ps.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.title()
0 Lower
1 Capitals
2 This Is A Sentence
3 Swapcase
dtype: object
"""
@no_type_check
def pandas_title(s) -> "ps.Series[str]":
return s.str.title()
return self._data.pandas_on_spark.transform_batch(pandas_title)
def lower(self) -> "ps.Series":
"""
Convert strings in the Series/Index to all lowercase.
Examples
--------
>>> s = ps.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.lower()
0 lower
1 capitals
2 this is a sentence
3 swapcase
dtype: object
"""
return self._data.spark.transform(F.lower)
def upper(self) -> "ps.Series":
"""
Convert strings in the Series/Index to all uppercase.
Examples
--------
>>> s = ps.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object
"""
return self._data.spark.transform(F.upper)
def swapcase(self) -> "ps.Series":
"""
Convert strings in the Series/Index to be swapcased.
Examples
--------
>>> s = ps.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.swapcase()
0 LOWER
1 capitals
2 THIS IS A SENTENCE
3 sWaPcAsE
dtype: object
"""
@no_type_check
def pandas_swapcase(s) -> "ps.Series[str]":
return s.str.swapcase()
return self._data.pandas_on_spark.transform_batch(pandas_swapcase)
def startswith(self, pattern: str, na: Optional[Any] = None) -> "ps.Series":
"""
Test if the start of each string element matches a pattern.
Equivalent to :func:`str.startswith`.
Parameters
----------
pattern : str
Character sequence. Regular expressions are not accepted.
na : object, default None
Object shown if element is not a string. NaN converted to None.
Returns
-------
Series of bool or object
pandas-on-Spark Series of booleans indicating whether the given pattern
matches the start of each string element.
Examples
--------
>>> s = ps.Series(['bat', 'Bear', 'cat', np.nan])
>>> s
0 bat
1 Bear
2 cat
3 None
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 None
dtype: object
Specifying na to be False instead of None.
>>> s.str.startswith('b', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
@no_type_check
def pandas_startswith(s) -> "ps.Series[bool]":
return s.str.startswith(pattern, na)
return self._data.pandas_on_spark.transform_batch(pandas_startswith)
def endswith(self, pattern: str, na: Optional[Any] = None) -> "ps.Series":
"""
Test if the end of each string element matches a pattern.
Equivalent to :func:`str.endswith`.
Parameters
----------
pattern : str
Character sequence. Regular expressions are not accepted.
na : object, default None
Object shown if element is not a string. NaN converted to None.
Returns
-------
Series of bool or object
pandas-on-Spark Series of booleans indicating whether the given pattern
matches the end of each string element.
Examples
--------
>>> s = ps.Series(['bat', 'Bear', 'cat', np.nan])
>>> s
0 bat
1 Bear
2 cat
3 None
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 True
3 None
dtype: object
Specifying na to be False instead of None.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 True
3 False
dtype: bool
"""
@no_type_check
def pandas_endswith(s) -> "ps.Series[bool]":
return s.str.endswith(pattern, na)
return self._data.pandas_on_spark.transform_batch(pandas_endswith)
def strip(self, to_strip: Optional[str] = None) -> "ps.Series":
"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified
characters from each string in the Series/Index from left and
right sides. Equivalent to :func:`str.strip`.
Parameters
----------
to_strip : str
Specifying the set of characters to be removed. All combinations
of this set of characters will be stripped. If None then
whitespaces are removed.
Returns
-------
Series of objects
Examples
--------
>>> s = ps.Series(['1. Ant.', '2. Bee!\\t', None])
>>> s
0 1. Ant.
1 2. Bee!\\t
2 None
dtype: object
>>> s.str.strip()
0 1. Ant.
1 2. Bee!
2 None
dtype: object
>>> s.str.strip('12.')
0 Ant
1 Bee!\\t
2 None
dtype: object
>>> s.str.strip('.!\\t')
0 1. Ant
1 2. Bee
2 None
dtype: object
"""
@no_type_check
def pandas_strip(s) -> "ps.Series[str]":
return s.str.strip(to_strip)
return self._data.pandas_on_spark.transform_batch(pandas_strip)
def lstrip(self, to_strip: Optional[str] = None) -> "ps.Series":
"""
Remove leading characters.
Strip whitespaces (including newlines) or a set of specified
characters from each string in the Series/Index from left side.
Equivalent to :func:`str.lstrip`.
Parameters
----------
to_strip : str
Specifying the set of characters to be removed. All combinations
of this set of characters will be stripped. If None then
whitespaces are removed.
Returns
-------
Series of object
Examples
--------
>>> s = ps.Series(['1. Ant.', '2. Bee!\\t', None])
>>> s
0 1. Ant.
1 2. Bee!\\t
2 None
dtype: object
>>> s.str.lstrip('12.')
0 Ant.
1 Bee!\\t
2 None
dtype: object
"""
@no_type_check
def pandas_lstrip(s) -> "ps.Series[str]":
return s.str.lstrip(to_strip)
return self._data.pandas_on_spark.transform_batch(pandas_lstrip)
def rstrip(self, to_strip: Optional[str] = None) -> "ps.Series":
"""
Remove trailing characters.
Strip whitespaces (including newlines) or a set of specified
characters from each string in the Series/Index from right side.
Equivalent to :func:`str.rstrip`.
Parameters
----------
to_strip : str
Specifying the set of characters to be removed. All combinations
of this set of characters will be stripped. If None then
whitespaces are removed.
Returns
-------
Series of object
Examples
--------
>>> s = ps.Series(['1. Ant.', '2. Bee!\\t', None])
>>> s
0 1. Ant.
1 2. Bee!\\t
2 None
dtype: object
>>> s.str.rstrip('.!\\t')
0 1. Ant
1 2. Bee
2 None
dtype: object
"""
@no_type_check
def pandas_rstrip(s) -> "ps.Series[str]":
return s.str.rstrip(to_strip)
return self._data.pandas_on_spark.transform_batch(pandas_rstrip)
def get(self, i: int) -> "ps.Series":
"""
Extract element from each string or string list/tuple in the Series
at the specified position.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series of objects
Examples
--------
>>> s1 = ps.Series(["String", "123"])
>>> s1
0 String
1 123
dtype: object
>>> s1.str.get(1)
0 t
1 2
dtype: object
>>> s1.str.get(-1)
0 g
1 3
dtype: object
>>> s2 = ps.Series([["a", "b", "c"], ["x", "y"]])
>>> s2
0 [a, b, c]
1 [x, y]
dtype: object
>>> s2.str.get(0)
0 a
1 x
dtype: object
>>> s2.str.get(2)
0 c
1 None
dtype: object
"""
@no_type_check
def pandas_get(s) -> "ps.Series[str]":
return s.str.get(i)
return self._data.pandas_on_spark.transform_batch(pandas_get)
def isalnum(self) -> "ps.Series":
"""
Check whether all characters in each string are alphanumeric.
This is equivalent to running the Python string method
:func:`str.isalnum` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
Examples
--------
>>> s1 = ps.Series(['one', 'one1', '1', ''])
>>> s1.str.isalnum()
0 True
1 True
2 True
3 False
dtype: bool
Note that checks against characters mixed with any additional
punctuation or whitespace will evaluate to false for an alphanumeric
check.
>>> s2 = ps.Series(['A B', '1.5', '3,000'])
>>> s2.str.isalnum()
0 False
1 False
2 False
dtype: bool
"""
@no_type_check
def pandas_isalnum(s) -> "ps.Series[bool]":
return s.str.isalnum()
return self._data.pandas_on_spark.transform_batch(pandas_isalnum)
def isalpha(self) -> "ps.Series":
"""
Check whether all characters in each string are alphabetic.
This is equivalent to running the Python string method
:func:`str.isalpha` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
Examples
--------
>>> s1 = ps.Series(['one', 'one1', '1', ''])
>>> s1.str.isalpha()
0 True
1 False
2 False
3 False
dtype: bool
"""
@no_type_check
def pandas_isalpha(s) -> "ps.Series[bool]":
return s.str.isalpha()
return self._data.pandas_on_spark.transform_batch(pandas_isalpha)
def isdigit(self) -> "ps.Series":
"""
Check whether all characters in each string are digits.
This is equivalent to running the Python string method
:func:`str.isdigit` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
Examples
--------
>>> s = ps.Series(['23', '³', '⅕', ''])
The s.str.isdecimal method checks for characters used to form numbers
in base 10.
>>> s.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
The s.str.isdigit method is the same as s.str.isdecimal but also
includes special digits, like superscripted and subscripted digits in
unicode.
>>> s.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
The s.str.isnumeric method is the same as s.str.isdigit but also
includes other characters that can represent quantities such as unicode
fractions.
>>> s.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
"""
@no_type_check
def pandas_isdigit(s) -> "ps.Series[bool]":
return s.str.isdigit()
return self._data.pandas_on_spark.transform_batch(pandas_isdigit)
def isspace(self) -> "ps.Series":
"""
Check whether all characters in each string are whitespaces.
This is equivalent to running the Python string method
:func:`str.isspace` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
Examples
--------
>>> s = ps.Series([' ', '\\t\\r\\n ', ''])
>>> s.str.isspace()
0 True
1 True
2 False
dtype: bool
"""
@no_type_check
def pandas_isspace(s) -> "ps.Series[bool]":
return s.str.isspace()
return self._data.pandas_on_spark.transform_batch(pandas_isspace)
def islower(self) -> "ps.Series":
"""
Check whether all characters in each string are lowercase.
This is equivalent to running the Python string method
:func:`str.islower` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
Examples
--------
>>> s = ps.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s.str.islower()
0 True
1 False
2 False
3 False
dtype: bool
"""
@no_type_check
def pandas_isspace(s) -> "ps.Series[bool]":
return s.str.islower()
return self._data.pandas_on_spark.transform_batch(pandas_isspace)
def isupper(self) -> "ps.Series":
"""
Check whether all characters in each string are uppercase.
This is equivalent to running the Python string method
:func:`str.isupper` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
Examples
--------
>>> s = ps.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s.str.isupper()
0 False
1 False
2 True
3 False
dtype: bool
"""
@no_type_check
def pandas_isspace(s) -> "ps.Series[bool]":
return s.str.isupper()
return self._data.pandas_on_spark.transform_batch(pandas_isspace)
def istitle(self) -> "ps.Series":
"""
Check whether all characters in each string are titlecase.
This is equivalent to running the Python string method
:func:`str.istitle` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
Examples
--------
>>> s = ps.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
The s.str.istitle method checks for whether all words are in title
case (whether only the first letter of each word is capitalized).
Words are assumed to be as any sequence of non-numeric characters
separated by whitespace characters.
>>> s.str.istitle()
0 False
1 True
2 False
3 False
dtype: bool
"""
@no_type_check
def pandas_istitle(s) -> "ps.Series[bool]":
return s.str.istitle()
return self._data.pandas_on_spark.transform_batch(pandas_istitle)
def isnumeric(self) -> "ps.Series":
"""
Check whether all characters in each string are numeric.
This is equivalent to running the Python string method
:func:`str.isnumeric` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
Examples
--------
>>> s1 = ps.Series(['one', 'one1', '1', ''])
>>> s1.str.isnumeric()
0 False
1 False
2 True
3 False
dtype: bool
>>> s2 = ps.Series(['23', '³', '⅕', ''])
The s2.str.isdecimal method checks for characters used to form numbers
in base 10.
>>> s2.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
The s2.str.isdigit method is the same as s2.str.isdecimal but also
includes special digits, like superscripted and subscripted digits in
unicode.
>>> s2.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
The s2.str.isnumeric method is the same as s2.str.isdigit but also
includes other characters that can represent quantities such as unicode
fractions.
>>> s2.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
"""
@no_type_check
def pandas_isnumeric(s) -> "ps.Series[bool]":
return s.str.isnumeric()
return self._data.pandas_on_spark.transform_batch(pandas_isnumeric)
def isdecimal(self) -> "ps.Series":
"""
Check whether all characters in each string are decimals.
This is equivalent to running the Python string method
:func:`str.isdecimal` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
Examples
--------
>>> s = ps.Series(['23', '³', '⅕', ''])
The s.str.isdecimal method checks for characters used to form numbers
in base 10.
>>> s.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
The s.str.isdigit method is the same as s.str.isdecimal but also
includes special digits, like superscripted and subscripted digits in
unicode.
>>> s.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
The s.str.isnumeric method is the same as s.str.isdigit but also
includes other characters that can represent quantities such as unicode
fractions.
>>> s.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
"""
@no_type_check
def pandas_isdecimal(s) -> "ps.Series[bool]":
return s.str.isdecimal()
return self._data.pandas_on_spark.transform_batch(pandas_isdecimal)
@no_type_check
def cat(self, others=None, sep=None, na_rep=None, join=None) -> "ps.Series":
"""
Not supported.
"""
raise NotImplementedError()
def center(self, width: int, fillchar: str = " ") -> "ps.Series":
"""
Filling left and right side of strings in the Series/Index with an
additional character. Equivalent to :func:`str.center`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with fillchar.
fillchar : str
Additional character for filling, default is whitespace.
Returns
-------
Series of objects
Examples
--------
>>> s = ps.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: object
>>> s.str.center(width=10, fillchar='-')
0 -caribou--
1 --tiger---
dtype: object
"""
@no_type_check
def pandas_center(s) -> "ps.Series[str]":
return s.str.center(width, fillchar)
return self._data.pandas_on_spark.transform_batch(pandas_center)
def contains(
self, pat: str, case: bool = True, flags: int = 0, na: Any = None, regex: bool = True
) -> "ps.Series":
"""
Test if pattern or regex is contained within a string of a Series.
Return boolean Series based on whether a given pattern or regex is
contained within a string of a Series.
Analogous to :func:`match`, but less strict, relying on
:func:`re.search` instead of :func:`re.match`.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
na : default None
Fill value for missing values. NaN converted to None.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series of boolean values or object
A Series of boolean values indicating whether the given pattern is
contained within the string of each element of the Series.
Examples
--------
Returning a Series of booleans using only a literal pattern.
>>> s1 = ps.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 None
dtype: object
Specifying case sensitivity using case.
>>> s1.str.contains('oG', case=True, regex=True)
0 False
1 False
2 False
3 False
4 None
dtype: object
Specifying na to be False instead of NaN replaces NaN values with
False. If Series does not contain NaN values the resultant dtype will
be bool, otherwise, an object dtype.
>>> s1.str.contains('og', na=False, regex=True)
0 False
1 True
2 False
3 False
4 False
dtype: bool
Returning ‘house’ or ‘dog’ when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 None
dtype: object
Ignoring case sensitivity using flags with regex.
>>> import re
>>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)
0 False
1 False
2 True
3 False
4 None
dtype: object
Returning any digit using regular expression.
>>> s1.str.contains('[0-9]', regex=True)
0 False
1 False
2 False
3 True
4 None
dtype: object
Ensure pat is a not a literal pattern when regex is set to True.
Note in the following example one might expect only s2[1] and s2[3]
to return True. However, ‘.0’ as a regex matches any character followed
by a 0.
>>> s2 = ps.Series(['40','40.0','41','41.0','35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
"""
@no_type_check
def pandas_contains(s) -> "ps.Series[bool]":
return s.str.contains(pat, case, flags, na, regex)
return self._data.pandas_on_spark.transform_batch(pandas_contains)
def count(self, pat: str, flags: int = 0) -> "ps.Series":
"""
Count occurrences of pattern in each string of the Series.
This function is used to count the number of times a particular regex
pattern is repeated in each of the string elements of the Series.
Parameters
----------
pat : str
Valid regular expression.
flags : int, default 0 (no flags)
Flags for the re module.
Returns
-------
Series of int
A Series containing the integer counts of pattern matches.
Examples
--------
>>> s = ps.Series(['A', 'B', 'Aaba', 'Baca', np.NaN, 'CABA', 'cat'])
>>> s.str.count('a')
0 0.0
1 0.0
2 2.0
3 2.0
4 NaN
5 0.0
6 1.0
dtype: float64
Escape '$' to find the literal dollar sign.
>>> s = ps.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
>>> s.str.count('\\$')
0 1
1 0
2 1
3 2
4 2
5 0
dtype: int64
"""
@no_type_check
def pandas_count(s) -> "ps.Series[int]":
return s.str.count(pat, flags)
return self._data.pandas_on_spark.transform_batch(pandas_count)
@no_type_check
def decode(self, encoding, errors="strict") -> "ps.Series":
"""
Not supported.
"""
raise NotImplementedError()
@no_type_check
def encode(self, encoding, errors="strict") -> "ps.Series":
"""
Not supported.
"""
raise NotImplementedError()
@no_type_check
def extract(self, pat, flags=0, expand=True) -> "ps.Series":
"""
Not supported.
"""
raise NotImplementedError()
@no_type_check
def extractall(self, pat, flags=0) -> "ps.Series":
"""
Not supported.
"""
raise NotImplementedError()
def find(self, sub: str, start: int = 0, end: Optional[int] = None) -> "ps.Series":
"""
Return lowest indexes in each strings in the Series where the
substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :func:`str.find`.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series of int
Series of lowest matching indexes.
Examples
--------
>>> s = ps.Series(['apple', 'oranges', 'bananas'])
>>> s.str.find('a')
0 0
1 2
2 1
dtype: int64
>>> s.str.find('a', start=2)
0 -1
1 2
2 3
dtype: int64
>>> s.str.find('a', end=1)
0 0
1 -1
2 -1
dtype: int64
>>> s.str.find('a', start=2, end=2)
0 -1
1 -1
2 -1
dtype: int64
"""
@no_type_check
def pandas_find(s) -> "ps.Series[int]":
return s.str.find(sub, start, end)
return self._data.pandas_on_spark.transform_batch(pandas_find)
def findall(self, pat: str, flags: int = 0) -> "ps.Series":
"""
Find all occurrences of pattern or regular expression in the Series.
Equivalent to applying :func:`re.findall` to all the elements in
the Series.
Parameters
----------
pat : str
Pattern or regular expression.
flags : int, default 0 (no flags)
`re` module flags, e.g. `re.IGNORECASE`.
Returns
-------
Series of object
All non-overlapping matches of pattern or regular expression in
each string of this Series.
Examples
--------
>>> s = ps.Series(['Lion', 'Monkey', 'Rabbit'])
The search for the pattern ‘Monkey’ returns one match:
>>> s.str.findall('Monkey')
0 []
1 [Monkey]
2 []
dtype: object
On the other hand, the search for the pattern ‘MONKEY’ doesn’t return
any match:
>>> s.str.findall('MONKEY')
0 []
1 []
2 []
dtype: object
Flags can be added to the pattern or regular expression. For instance,
to find the pattern ‘MONKEY’ ignoring the case:
>>> import re
>>> s.str.findall('MONKEY', flags=re.IGNORECASE)
0 []
1 [Monkey]
2 []
dtype: object
When the pattern matches more than one string in the Series, all
matches are returned:
>>> s.str.findall('on')
0 [on]
1 [on]
2 []
dtype: object
Regular expressions are supported too. For instance, the search for all
the strings ending with the word ‘on’ is shown next:
>>> s.str.findall('on$')
0 [on]
1 []
2 []
dtype: object
If the pattern is found more than once in the same string, then a list
of multiple strings is returned:
>>> s.str.findall('b')
0 []
1 []
2 [b, b]
dtype: object
"""
# type hint does not support to specify array type yet.
@pandas_udf(returnType=ArrayType(StringType(), containsNull=True)) # type: ignore
def pudf(s: pd.Series) -> pd.Series:
return s.str.findall(pat, flags)
return self._data._with_new_scol(scol=pudf(self._data.spark.column))
def index(self, sub: str, start: int = 0, end: Optional[int] = None) -> "ps.Series":
"""
Return lowest indexes in each strings where the substring is fully
contained between [start:end].
This is the same as :func:`str.find` except instead of returning -1,
it raises a ValueError when the substring is not found. Equivalent to
standard :func:`str.index`.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series of int
Series of lowest matching indexes.
Examples
--------
>>> s = ps.Series(['apple', 'oranges', 'bananas'])
>>> s.str.index('a')
0 0
1 2
2 1
dtype: int64
The following expression throws an exception:
>>> s.str.index('a', start=2) # doctest: +SKIP
"""
@no_type_check
def pandas_index(s) -> "ps.Series[np.int64]":
return s.str.index(sub, start, end)
return self._data.pandas_on_spark.transform_batch(pandas_index)
def join(self, sep: str) -> "ps.Series":
"""
Join lists contained as elements in the Series with passed delimiter.
If the elements of a Series are lists themselves, join the content of
these lists using the delimiter passed to the function. This function
is an equivalent to calling :func:`str.join` on the lists.
Parameters
----------
sep : str
Delimiter to use between list entries.
Returns
-------
Series of object
Series with list entries concatenated by intervening occurrences of
the delimiter.
See Also
--------
str.split : Split strings around given separator/delimiter.
str.rsplit : Splits string around given separator/delimiter,
starting from the right.
Examples
--------
Example with a list that contains a None element.
>>> s = ps.Series([['lion', 'elephant', 'zebra'],
... ['cat', None, 'dog']])
>>> s
0 [lion, elephant, zebra]
1 [cat, None, dog]
dtype: object
Join all lists using a ‘-‘. The list containing None will produce None.
>>> s.str.join('-')
0 lion-elephant-zebra
1 None
dtype: object
"""
@no_type_check
def pandas_join(s) -> "ps.Series[str]":
return s.str.join(sep)
return self._data.pandas_on_spark.transform_batch(pandas_join)
def len(self) -> "ps.Series":
"""
Computes the length of each element in the Series.
The element may be a sequence (such as a string, tuple or list).
Returns
-------
Series of int
A Series of integer values indicating the length of each element in
the Series.
Examples
--------
Returns the length (number of characters) in a string. Returns the
number of entries for lists or tuples.
>>> s1 = ps.Series(['dog', 'monkey'])
>>> s1.str.len()
0 3
1 6
dtype: int64
>>> s2 = ps.Series([["a", "b", "c"], []])
>>> s2.str.len()
0 3
1 0
dtype: int64
"""
if isinstance(self._data.spark.data_type, (ArrayType, MapType)):
return self._data.spark.transform(lambda c: F.size(c).cast(LongType()))
else:
return self._data.spark.transform(lambda c: F.length(c).cast(LongType()))
def ljust(self, width: int, fillchar: str = " ") -> "ps.Series":
"""
Filling right side of strings in the Series with an additional
character. Equivalent to :func:`str.ljust`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with `fillchar`.
fillchar : str
Additional character for filling, default is whitespace.
Returns
-------
Series of object
Examples
--------
>>> s = ps.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: object
>>> s.str.ljust(width=10, fillchar='-')
0 caribou---
1 tiger-----
dtype: object
"""
@no_type_check
def pandas_ljust(s) -> "ps.Series[str]":
return s.str.ljust(width, fillchar)
return self._data.pandas_on_spark.transform_batch(pandas_ljust)
def match(self, pat: str, case: bool = True, flags: int = 0, na: Any = np.NaN) -> "ps.Series":
"""
Determine if each string matches a regular expression.
Analogous to :func:`contains`, but more strict, relying on
:func:`re.match` instead of :func:`re.search`.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
Returns
-------
Series of boolean values or object
A Series of boolean values indicating whether the given pattern can
be matched in the string of each element of the Series.
Examples
--------
>>> s = ps.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])
>>> s.str.match('dog')
0 False
1 True
2 False
3 False
4 None
dtype: object
>>> s.str.match('mouse|dog', case=False)
0 True
1 True
2 False
3 False
4 None
dtype: object
>>> s.str.match('.+and.+', na=True)
0 False
1 False
2 True
3 False
4 True
dtype: bool
>>> import re
>>> s.str.match('MOUSE', flags=re.IGNORECASE)
0 True
1 False
2 False
3 False
4 None
dtype: object
"""
@no_type_check
def pandas_match(s) -> "ps.Series[bool]":
return s.str.match(pat, case, flags, na)
return self._data.pandas_on_spark.transform_batch(pandas_match)
def normalize(self, form: str) -> "ps.Series":
"""
Return the Unicode normal form for the strings in the Series.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {‘NFC’, ‘NFKC’, ‘NFD’, ‘NFKD’}
Unicode form.
Returns
-------
Series of objects
A Series of normalized strings.
"""
@no_type_check
def pandas_normalize(s) -> "ps.Series[str]":
return s.str.normalize(form)
return self._data.pandas_on_spark.transform_batch(pandas_normalize)
def pad(self, width: int, side: str = "left", fillchar: str = " ") -> "ps.Series":
"""
Pad strings in the Series up to width.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with character defined in `fillchar`.
side : {‘left’, ‘right’, ‘both’}, default ‘left’
Side from which to fill resulting string.
fillchar : str, default ' '
Additional character for filling, default is whitespace.
Returns
-------
Series of object
Returns Series with minimum number of char in object.
Examples
--------
>>> s = ps.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10, side='right', fillchar='-')
0 caribou---
1 tiger-----
dtype: object
>>> s.str.pad(width=10, side='both', fillchar='-')
0 -caribou--
1 --tiger---
dtype: object
"""
@no_type_check
def pandas_pad(s) -> "ps.Series[str]":
return s.str.pad(width, side, fillchar)
return self._data.pandas_on_spark.transform_batch(pandas_pad)
def partition(self, sep: str = " ", expand: bool = True) -> "ps.Series":
"""
Not supported.
"""
raise NotImplementedError()
def repeat(self, repeats: int) -> "ps.Series":
"""
Duplicate each string in the Series.
Parameters
----------
repeats : int
Repeat the string given number of times (int). Sequence of int
is not supported.
Returns
-------
Series of object
Series or Index of repeated string objects specified by input
parameter repeats.
Examples
--------
>>> s = ps.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
Single int repeats string in Series
>>> s.str.repeat(repeats=2)
0 aa
1 bb
2 cc
dtype: object
"""
if not isinstance(repeats, int):
raise TypeError("repeats expects an int parameter")
return self._data.spark.transform(lambda c: SF.repeat(col=c, n=repeats))
def replace(
self,
pat: str,
repl: Union[str, Callable[[str], str]],
n: int = -1,
case: Optional[bool] = None,
flags: int = 0,
regex: bool = True,
) -> "ps.Series":
"""
Replace occurrences of pattern/regex in the Series with some other
string. Equivalent to :func:`str.replace` or :func:`re.sub`.
Parameters
----------
pat : str or compiled regex
String can be a character sequence or regular expression.
repl : str or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used. See
:func:`re.sub`.
n : int, default -1 (all)
Number of replacements to make from start.
case : boolean, default None
If True, case sensitive (the default if pat is a string).
Set to False for case insensitive.
Cannot be set if pat is a compiled regex.
flags: int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE.
Cannot be set if pat is a compiled regex.
regex : boolean, default True
If True, assumes the passed-in pattern is a regular expression.
If False, treats the pattern as a literal string.
Cannot be set to False if pat is a compile regex or repl is a
callable.
Returns
-------
Series of object
A copy of the string with all matching occurrences of pat replaced
by repl.
Examples
--------
When pat is a string and regex is True (the default), the given pat is
compiled as a regex. When repl is a string, it replaces matching regex
patterns as with :func:`re.sub`. NaN value(s) in the Series are changed
to None:
>>> ps.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True)
0 bao
1 baz
2 None
dtype: object
When pat is a string and regex is False, every pat is replaced with
repl as with :func:`str.replace`:
>>> ps.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False)
0 bao
1 fuz
2 None
dtype: object
When repl is a callable, it is called on every pat using
:func:`re.sub`. The callable should expect one positional argument (a
regex object) and return a string.
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
>>> ps.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)
0 oof 123
1 rab zab
2 None
dtype: object
Using regex groups (extract second group and swap case):
>>> pat = r"(?P<one>\\w+) (?P<two>\\w+) (?P<three>\\w+)"
>>> repl = lambda m: m.group('two').swapcase()
>>> ps.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)
0 tWO
1 bAR
dtype: object
Using a compiled regex with flags:
>>> import re
>>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)
>>> ps.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')
0 foo
1 bar
2 None
dtype: object
"""
@no_type_check
def pandas_replace(s) -> "ps.Series[str]":
return s.str.replace(pat, repl, n=n, case=case, flags=flags, regex=regex)
return self._data.pandas_on_spark.transform_batch(pandas_replace)
def rfind(self, sub: str, start: int = 0, end: Optional[int] = None) -> "ps.Series":
"""
Return highest indexes in each strings in the Series where the
substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :func:`str.rfind`.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series of int
Series of highest matching indexes.
Examples
--------
>>> s = ps.Series(['apple', 'oranges', 'bananas'])
>>> s.str.rfind('a')
0 0
1 2
2 5
dtype: int64
>>> s.str.rfind('a', start=2)
0 -1
1 2
2 5
dtype: int64
>>> s.str.rfind('a', end=1)
0 0
1 -1
2 -1
dtype: int64
>>> s.str.rfind('a', start=2, end=2)
0 -1
1 -1
2 -1
dtype: int64
"""
@no_type_check
def pandas_rfind(s) -> "ps.Series[int]":
return s.str.rfind(sub, start, end)
return self._data.pandas_on_spark.transform_batch(pandas_rfind)
def rindex(self, sub: str, start: int = 0, end: Optional[int] = None) -> "ps.Series":
"""
Return highest indexes in each strings where the substring is fully
contained between [start:end].
This is the same as :func:`str.rfind` except instead of returning -1,
it raises a ValueError when the substring is not found. Equivalent to
standard :func:`str.rindex`.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series of int
Series of highest matching indexes.
Examples
--------
>>> s = ps.Series(['apple', 'oranges', 'bananas'])
>>> s.str.rindex('a')
0 0
1 2
2 5
dtype: int64
The following expression throws an exception:
>>> s.str.rindex('a', start=2) # doctest: +SKIP
"""
@no_type_check
def pandas_rindex(s) -> "ps.Series[np.int64]":
return s.str.rindex(sub, start, end)
return self._data.pandas_on_spark.transform_batch(pandas_rindex)
def rjust(self, width: int, fillchar: str = " ") -> "ps.Series":
"""
Filling left side of strings in the Series with an additional
character. Equivalent to :func:`str.rjust`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be
filled with `fillchar`.
fillchar : str
Additional character for filling, default is whitespace.
Returns
-------
Series of object
Examples
--------
>>> s = ps.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: object
>>> s.str.rjust(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.rjust(width=10, fillchar='-')
0 ---caribou
1 -----tiger
dtype: object
"""
@no_type_check
def pandas_rjust(s) -> "ps.Series[str]":
return s.str.rjust(width, fillchar)
return self._data.pandas_on_spark.transform_batch(pandas_rjust)
def rpartition(self, sep: str = " ", expand: bool = True) -> "ps.Series":
"""
Not supported.
"""
raise NotImplementedError()
def slice(
self, start: Optional[int] = None, stop: Optional[int] = None, step: Optional[int] = None
) -> "ps.Series":
"""
Slice substrings from each element in the Series.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series of object
Series from sliced substrings from original string objects.
Examples
--------
>>> s = ps.Series(["koala", "fox", "chameleon"])
>>> s
0 koala
1 fox
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 ox
2 hameleon
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 fo
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 fx
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 f
2 cm
dtype: object
"""
@no_type_check
def pandas_slice(s) -> "ps.Series[str]":
return s.str.slice(start, stop, step)
return self._data.pandas_on_spark.transform_batch(pandas_slice)
def slice_replace(
self, start: Optional[int] = None, stop: Optional[int] = None, repl: Optional[str] = None
) -> "ps.Series":
"""
Slice substrings from each element in the Series.
Parameters
----------
start : int, optional
Start position for slice operation. If not specified (None), the
slice is unbounded on the left, i.e. slice from the start of the
string.
stop : int, optional
Stop position for slice operation. If not specified (None), the
slice is unbounded on the right, i.e. slice until the end of the
string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
Series of object
Series from sliced substrings from original string objects.
Examples
--------
>>> s = ps.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just start, meaning replace start until the end of the string
with repl.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just stop, meaning the start of the string to stop is replaced
with repl, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify start and stop, meaning the slice from start to stop is
replaced with repl. Everything before or after start and stop is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
"""
@no_type_check
def pandas_slice_replace(s) -> "ps.Series[str]":
return s.str.slice_replace(start, stop, repl)
return self._data.pandas_on_spark.transform_batch(pandas_slice_replace)
def split(
self, pat: Optional[str] = None, n: int = -1, expand: bool = False
) -> Union["ps.Series", "ps.DataFrame"]:
"""
Split strings around given separator/delimiter.
Splits the string in the Series from the beginning, at the specified
delimiter string. Equivalent to :func:`str.split`.
Parameters
----------
pat : str, optional
String or regular expression to split on. If not specified, split
on whitespace.
n : int, default -1 (all)
Limit number of splits in output. None, 0 and -1 will be
interpreted as return all splits.
expand : bool, default False
Expand the splitted strings into separate columns.
* If ``True``, `n` must be a positive integer, and return DataFrame expanding
dimensionality.
* If ``False``, return Series, containing lists of strings.
Returns
-------
Series, DataFrame
Type matches caller unless `expand=True` (see Notes).
See Also
--------
str.rsplit : Splits string around given separator/delimiter,
starting from the right.
str.join : Join lists contained as elements in the Series/Index
with passed delimiter.
Notes
-----
The handling of the `n` keyword depends on the number of found splits:
- If found splits > `n`, make first `n` splits only
- If found splits <= `n`, make all splits
- If for a certain row the number of found splits < `n`,
append `None` for padding up to `n` if ``expand=True``
If using ``expand=True``, Series callers return DataFrame objects with `n + 1` columns.
.. note:: Even if `n` is much larger than found splits, the number of columns does NOT
shrink unlike pandas.
Examples
--------
>>> s = ps.Series(["this is a regular sentence",
... "https://docs.python.org/3/tutorial/index.html",
... np.nan])
In the default setting, the string is split by whitespace.
>>> s.str.split()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 None
dtype: object
Without the n parameter, the outputs of rsplit and split are identical.
>>> s.str.rsplit()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 None
dtype: object
The n parameter can be used to limit the number of splits on the
delimiter. The outputs of split and rsplit are different.
>>> s.str.split(n=2)
0 [this, is, a regular sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 None
dtype: object
>>> s.str.rsplit(n=2)
0 [this is a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 None
dtype: object
The pat parameter can be used to split by other characters.
>>> s.str.split(pat = "/")
0 [this is a regular sentence]
1 [https:, , docs.python.org, 3, tutorial, index...
2 None
dtype: object
When using ``expand=True``, the split elements will expand out into
separate columns. If NaN is present, it is propagated throughout
the columns during the split.
>>> s.str.split(n=4, expand=True)
0 1 2 3 4
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html None None None None
2 None None None None None
For slightly more complex use cases like splitting the html document name
from a url, a combination of parameter settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence None
1 https://docs.python.org/3/tutorial index.html
2 None None
Remember to escape special characters when explicitly using regular
expressions.
>>> s = ps.Series(["1+1=2"])
>>> s.str.split(r"\\+|=", n=2, expand=True)
0 1 2
0 1 1 2
"""
from pyspark.pandas.frame import DataFrame
if expand and n <= 0:
raise NotImplementedError("expand=True is currently only supported with n > 0.")
# type hint does not support to specify array type yet.
return_type = ArrayType(StringType(), containsNull=True)
@pandas_udf(returnType=return_type) # type: ignore
def pudf(s: pd.Series) -> pd.Series:
return s.str.split(pat, n)
psser = self._data._with_new_scol(
pudf(self._data.spark.column).alias(self._data._internal.data_spark_column_names[0]),
field=self._data._internal.data_fields[0].copy(spark_type=return_type, nullable=True),
)
if expand:
psdf = psser.to_frame()
scol = psdf._internal.data_spark_columns[0]
spark_columns = [scol[i].alias(str(i)) for i in range(n + 1)]
column_labels = [(i,) for i in range(n + 1)]
internal = psdf._internal.with_new_columns(
spark_columns,
column_labels=cast(Optional[List], column_labels),
data_fields=[
self._data._internal.data_fields[0].copy(name=str(i), nullable=True)
for i in range(n + 1)
],
)
return DataFrame(internal)
else:
return psser
def rsplit(
self, pat: Optional[str] = None, n: int = -1, expand: bool = False
) -> Union["ps.Series", "ps.DataFrame"]:
"""
Split strings around given separator/delimiter.
Splits the string in the Series from the end, at the specified
delimiter string. Equivalent to :func:`str.rsplit`.
Parameters
----------
pat : str, optional
String or regular expression to split on. If not specified, split
on whitespace.
n : int, default -1 (all)
Limit number of splits in output. None, 0 and -1 will be
interpreted as return all splits.
expand : bool, default False
Expand the splitted strings into separate columns.
* If ``True``, `n` must be a positive integer, and return DataFrame expanding
dimensionality.
* If ``False``, return Series, containing lists of strings.
Returns
-------
Series, DataFrame
Type matches caller unless `expand=True` (see Notes).
See Also
--------
str.split : Split strings around given separator/delimiter.
str.join : Join lists contained as elements in the Series/Index
with passed delimiter.
Notes
-----
The handling of the `n` keyword depends on the number of found splits:
- If found splits > `n`, make first `n` splits only
- If found splits <= `n`, make all splits
- If for a certain row the number of found splits < `n`,
append `None` for padding up to `n` if ``expand=True``
If using ``expand=True``, Series callers return DataFrame objects with `n + 1` columns.
.. note:: Even if `n` is much larger than found splits, the number of columns does NOT
shrink unlike pandas.
Examples
--------
>>> s = ps.Series(["this is a regular sentence",
... "https://docs.python.org/3/tutorial/index.html",
... np.nan])
In the default setting, the string is split by whitespace.
>>> s.str.split()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 None
dtype: object
Without the n parameter, the outputs of rsplit and split are identical.
>>> s.str.rsplit()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 None
dtype: object
The n parameter can be used to limit the number of splits on the
delimiter. The outputs of split and rsplit are different.
>>> s.str.split(n=2)
0 [this, is, a regular sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 None
dtype: object
>>> s.str.rsplit(n=2)
0 [this is a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 None
dtype: object
When using ``expand=True``, the split elements will expand out into
separate columns. If NaN is present, it is propagated throughout
the columns during the split.
>>> s.str.split(n=4, expand=True)
0 1 2 3 4
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html None None None None
2 None None None None None
For slightly more complex use cases like splitting the html document name
from a url, a combination of parameter settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence None
1 https://docs.python.org/3/tutorial index.html
2 None None
Remember to escape special characters when explicitly using regular
expressions.
>>> s = ps.Series(["1+1=2"])
>>> s.str.split(r"\\+|=", n=2, expand=True)
0 1 2
0 1 1 2
"""
from pyspark.pandas.frame import DataFrame
if expand and n <= 0:
raise NotImplementedError("expand=True is currently only supported with n > 0.")
# type hint does not support to specify array type yet.
return_type = ArrayType(StringType(), containsNull=True)
@pandas_udf(returnType=return_type) # type: ignore
def pudf(s: pd.Series) -> pd.Series:
return s.str.rsplit(pat, n)
psser = self._data._with_new_scol(
pudf(self._data.spark.column).alias(self._data._internal.data_spark_column_names[0]),
field=self._data._internal.data_fields[0].copy(spark_type=return_type, nullable=True),
)
if expand:
psdf = psser.to_frame()
scol = psdf._internal.data_spark_columns[0]
spark_columns = [scol[i].alias(str(i)) for i in range(n + 1)]
column_labels = [(i,) for i in range(n + 1)]
internal = psdf._internal.with_new_columns(
spark_columns,
column_labels=cast(Optional[List], column_labels),
data_fields=[
self._data._internal.data_fields[0].copy(name=str(i), nullable=True)
for i in range(n + 1)
],
)
return DataFrame(internal)
else:
return psser
def translate(self, table: Dict) -> "ps.Series":
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :func:`str.translate`.
Parameters
----------
table : dict
Table is a mapping of Unicode ordinals to Unicode ordinals,
strings, or None. Unmapped characters are left untouched.
Characters mapped to None are deleted. :func:`str.maketrans` is a
helper function for making translation tables.
Returns
-------
Series of object
Series with translated strings.
Examples
--------
>>> s = ps.Series(["dog", "cat", "bird"])
>>> m = str.maketrans({'a': 'X', 'i': 'Y', 'o': None})
>>> s.str.translate(m)
0 dg
1 cXt
2 bYrd
dtype: object
"""
@no_type_check
def pandas_translate(s) -> "ps.Series[str]":
return s.str.translate(table)
return self._data.pandas_on_spark.transform_batch(pandas_translate)
def wrap(self, width: int, **kwargs: bool) -> "ps.Series":
"""
Wrap long strings in the Series to be formatted in paragraphs with
length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line-width. Lines separated with newline char.
expand_tabs : bool, optional
If true, tab characters will be expanded to spaces (default: True).
replace_whitespace : bool, optional
If true, each whitespace character remaining after tab expansion
will be replaced by a single space (default: True).
drop_whitespace : bool, optional
If true, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True).
break_long_words : bool, optional
If true, then words longer than width will be broken in order to
ensure that no lines are longer than width. If it is false, long
words will not be broken, and some lines may be longer than width
(default: True).
break_on_hyphens : bool, optional
If true, wrapping will occur preferably on whitespace and right
after hyphens in compound words, as it is customary in English.
If false, only whitespaces will be considered as potentially good
places for line breaks, but you need to set break_long_words to
false if you want truly insecable words (default: True).
Returns
-------
Series of object
Series with wrapped strings.
Examples
--------
>>> s = ps.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\\nwrapped
1 another line\\nto be\\nwrapped
dtype: object
"""
@no_type_check
def pandas_wrap(s) -> "ps.Series[str]":
return s.str.wrap(width, **kwargs)
return self._data.pandas_on_spark.transform_batch(pandas_wrap)
def zfill(self, width: int) -> "ps.Series":
"""
Pad strings in the Series by prepending ‘0’ characters.
Strings in the Series are padded with ‘0’ characters on the left of the
string to reach a total string length width. Strings in the Series with
length greater or equal to width are unchanged.
Differs from :func:`str.zfill` which has special handling for ‘+’/’-‘
in the string.
Parameters
----------
width : int
Minimum length of resulting string; strings with length less than
width be prepended with ‘0’ characters.
Returns
-------
Series of object
Series with '0' left-padded strings.
Examples
--------
>>> s = ps.Series(['-1', '1', '1000', np.nan])
>>> s
0 -1
1 1
2 1000
3 None
dtype: object
Note that NaN is not a string, therefore it is converted to NaN. The
minus sign in '-1' is treated as a regular character and the zero is
added to the left of it (:func:`str.zfill` would have moved it to the
left). 1000 remains unchanged as it is longer than width.
>>> s.str.zfill(3)
0 0-1
1 001
2 1000
3 None
dtype: object
"""
@no_type_check
def pandas_zfill(s) -> "ps.Series[str]":
return s.str.zfill(width)
return self._data.pandas_on_spark.transform_batch(pandas_zfill)
@no_type_check
def get_dummies(self, sep: str = "|") -> "ps.DataFrame":
"""
Not supported.
"""
raise NotImplementedError()
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.strings
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.strings.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.strings tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.strings,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
DotStar-Game-of-Stocks/Website-Backend | generateHistogramData.py | 1 | 2530 | # -*- coding: utf-8 -*-
# generateHistogramData.py
"""
Version: 03.18.2015
A function that returns data to plot a smooth histogram of the stock price for
the previous year. Provided with sample plotting code.
@author: Luke_Wortsmann
"""
import datetime as dt
import numpy as np
from yahoo_finance import Share
from scipy.interpolate import spline
# Only needed for plotting:
import matplotlib.pyplot as plt
def gatherHistogramData(stockName, pastXdays=365, bin0=15, returnCurrent = True):
"""
Takes a stock ticker as a string, returns an array containing:
* X-axis values or the stock price
* Y-axis values that are unitless and normed
* An array containing the points of a line at the current stock price, provided
returnCurrent is True.
"""
# Get date range:
today = dt.date.today()
oneYear = dt.timedelta(days = pastXdays)
today-oneYear
d2 = today.isoformat()
d1 = (today-oneYear).isoformat()
# Get stock data:
stock = Share(stockName)
importedData = stock.get_historical(d1, d2)
closeData = [float(dateSet['Adj_Close']) for dateSet in importedData]
# Build histogram:
bD = np.histogram(closeData, bins = bin0, normed=True)
xMids = np.array([((bD[1][k]+bD[1][k+1])/2.0) for k in xrange(len(bD[1])-1)])
deltaX =(xMids[1] - xMids[0])/2.0
# Build Spline:
xNew = np.linspace(xMids.min(),xMids.max(),100)
yNew = spline(xMids,bD[0],xNew)
# Add endpoints:
yNew = np.insert(yNew,0,0)
yNew = np.insert(yNew,yNew.shape[0],0)
xNew = np.insert(xNew,0,xNew[0]-deltaX)
xNew = np.insert(xNew,xNew.shape[0],xNew[-1]+deltaX)
# Get current/return
if returnCurrent:
currentP = float(stock.get_price())
minval = min(enumerate(xNew),key = lambda x: abs(x[1] - currentP))[0]
ycurrent = yNew[minval]
currentVal = [[currentP,currentP],[0,ycurrent]]
return [list(xNew),list(yNew),currentVal]
else:
return [xNew,yNew]
# Basic example of concept:
stockName = 'goog'
histData = gatherHistogramData(stockName)
plt.plot(histData[2][0],histData[2][1],linewidth = 3.0,color='darkblue')
plt.plot(histData[0],histData[1],linewidth = 5.0,color='black')
# Fix Axes:
frame1 = plt.gca()
frame1.axes.get_yaxis().set_visible(False)
frame1.spines['top'].set_visible(False)
frame1.spines['right'].set_visible(False)
frame1.spines['left'].set_visible(False)
plt.tick_params(axis='x',which='both',top='off')
frame1.patch.set_visible(False)
plt.xlabel('Stock Price')
plt.show()
| mit |
fdft/ml | ch02/heldout.py | 24 | 1377 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script demonstrates the difference between the training accuracy and
# testing (held-out) accuracy.
import numpy as np
from sklearn.datasets import load_iris
from threshold import fit_model, accuracy
data = load_iris()
features = data['data']
labels = data['target_names'][data['target']]
# We are going to remove the setosa examples as they are too easy:
is_setosa = (labels == 'setosa')
features = features[~is_setosa]
labels = labels[~is_setosa]
# Now we classify virginica vs non-virginica
is_virginica = (labels == 'virginica')
# Split the data in two: testing and training
testing = np.tile([True, False], 50) # testing = [True,False,True,False,True,False...]
# Training is the negation of testing: i.e., datapoints not used for testing,
# will be used for training
training = ~testing
model = fit_model(features[training], is_virginica[training])
train_accuracy = accuracy(features[training], is_virginica[training], model)
test_accuracy = accuracy(features[testing], is_virginica[testing], model)
print('''\
Training accuracy was {0:.1%}.
Testing accuracy was {1:.1%} (N = {2}).
'''.format(train_accuracy, test_accuracy, testing.sum()))
| mit |
JeanKossaifi/scikit-learn | sklearn/utils/testing.py | 71 | 26178 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under Python < 3.4 and POSIX (e.g. Linux or OSX), using multiprocessing in
conjunction with some implementation of BLAS (or other libraries that
manage an internal posix thread pool) can cause a crash or a freeze of the
Python process.
Under Python 3.4 and later, joblib uses the forkserver mode of
multiprocessing which does not trigger this problem.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OSX with.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin' and sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
mattilyra/scikit-learn | sklearn/exceptions.py | 35 | 4329 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior."""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifies the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid"""
| bsd-3-clause |
ahaldane/numpy | numpy/doc/structured_arrays.py | 5 | 26509 | """
=================
Structured Arrays
=================
Introduction
============
Structured arrays are ndarrays whose datatype is a composition of simpler
datatypes organized as a sequence of named :term:`fields <field>`. For example,
::
>>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')])
>>> x
array([('Rex', 9, 81.), ('Fido', 3, 27.)],
dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
Here ``x`` is a one-dimensional array of length two whose datatype is a
structure with three fields: 1. A string of length 10 or less named 'name', 2.
a 32-bit integer named 'age', and 3. a 32-bit float named 'weight'.
If you index ``x`` at position 1 you get a structure::
>>> x[1]
('Fido', 3, 27.0)
You can access and modify individual fields of a structured array by indexing
with the field name::
>>> x['age']
array([9, 3], dtype=int32)
>>> x['age'] = 5
>>> x
array([('Rex', 5, 81.), ('Fido', 5, 27.)],
dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
Structured datatypes are designed to be able to mimic 'structs' in the C
language, and share a similar memory layout. They are meant for interfacing with
C code and for low-level manipulation of structured buffers, for example for
interpreting binary blobs. For these purposes they support specialized features
such as subarrays, nested datatypes, and unions, and allow control over the
memory layout of the structure.
Users looking to manipulate tabular data, such as stored in csv files, may find
other pydata projects more suitable, such as xarray, pandas, or DataArray.
These provide a high-level interface for tabular data analysis and are better
optimized for that use. For instance, the C-struct-like memory layout of
structured arrays in numpy can lead to poor cache behavior in comparison.
.. _defining-structured-types:
Structured Datatypes
====================
A structured datatype can be thought of as a sequence of bytes of a certain
length (the structure's :term:`itemsize`) which is interpreted as a collection
of fields. Each field has a name, a datatype, and a byte offset within the
structure. The datatype of a field may be any numpy datatype including other
structured datatypes, and it may also be a :term:`subarray data type` which
behaves like an ndarray of a specified shape. The offsets of the fields are
arbitrary, and fields may even overlap. These offsets are usually determined
automatically by numpy, but can also be specified.
Structured Datatype Creation
----------------------------
Structured datatypes may be created using the function :func:`numpy.dtype`.
There are 4 alternative forms of specification which vary in flexibility and
conciseness. These are further documented in the
:ref:`Data Type Objects <arrays.dtypes.constructing>` reference page, and in
summary they are:
1. A list of tuples, one tuple per field
Each tuple has the form ``(fieldname, datatype, shape)`` where shape is
optional. ``fieldname`` is a string (or tuple if titles are used, see
:ref:`Field Titles <titles>` below), ``datatype`` may be any object
convertible to a datatype, and ``shape`` is a tuple of integers specifying
subarray shape.
>>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2, 2))])
dtype([('x', '<f4'), ('y', '<f4'), ('z', '<f4', (2, 2))])
If ``fieldname`` is the empty string ``''``, the field will be given a
default name of the form ``f#``, where ``#`` is the integer index of the
field, counting from 0 from the left::
>>> np.dtype([('x', 'f4'), ('', 'i4'), ('z', 'i8')])
dtype([('x', '<f4'), ('f1', '<i4'), ('z', '<i8')])
The byte offsets of the fields within the structure and the total
structure itemsize are determined automatically.
2. A string of comma-separated dtype specifications
In this shorthand notation any of the :ref:`string dtype specifications
<arrays.dtypes.constructing>` may be used in a string and separated by
commas. The itemsize and byte offsets of the fields are determined
automatically, and the field names are given the default names ``f0``,
``f1``, etc. ::
>>> np.dtype('i8, f4, S3')
dtype([('f0', '<i8'), ('f1', '<f4'), ('f2', 'S3')])
>>> np.dtype('3int8, float32, (2, 3)float64')
dtype([('f0', 'i1', (3,)), ('f1', '<f4'), ('f2', '<f8', (2, 3))])
3. A dictionary of field parameter arrays
This is the most flexible form of specification since it allows control
over the byte-offsets of the fields and the itemsize of the structure.
The dictionary has two required keys, 'names' and 'formats', and four
optional keys, 'offsets', 'itemsize', 'aligned' and 'titles'. The values
for 'names' and 'formats' should respectively be a list of field names and
a list of dtype specifications, of the same length. The optional 'offsets'
value should be a list of integer byte-offsets, one for each field within
the structure. If 'offsets' is not given the offsets are determined
automatically. The optional 'itemsize' value should be an integer
describing the total size in bytes of the dtype, which must be large
enough to contain all the fields.
::
>>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4', 'f4']})
dtype([('col1', '<i4'), ('col2', '<f4')])
>>> np.dtype({'names': ['col1', 'col2'],
... 'formats': ['i4', 'f4'],
... 'offsets': [0, 4],
... 'itemsize': 12})
dtype({'names':['col1','col2'], 'formats':['<i4','<f4'], 'offsets':[0,4], 'itemsize':12})
Offsets may be chosen such that the fields overlap, though this will mean
that assigning to one field may clobber any overlapping field's data. As
an exception, fields of :class:`numpy.object` type cannot overlap with
other fields, because of the risk of clobbering the internal object
pointer and then dereferencing it.
The optional 'aligned' value can be set to ``True`` to make the automatic
offset computation use aligned offsets (see :ref:`offsets-and-alignment`),
as if the 'align' keyword argument of :func:`numpy.dtype` had been set to
True.
The optional 'titles' value should be a list of titles of the same length
as 'names', see :ref:`Field Titles <titles>` below.
4. A dictionary of field names
The use of this form of specification is discouraged, but documented here
because older numpy code may use it. The keys of the dictionary are the
field names and the values are tuples specifying type and offset::
>>> np.dtype({'col1': ('i1', 0), 'col2': ('f4', 1)})
dtype([('col1', 'i1'), ('col2', '<f4')])
This form is discouraged because Python dictionaries do not preserve order
in Python versions before Python 3.6, and the order of the fields in a
structured dtype has meaning. :ref:`Field Titles <titles>` may be
specified by using a 3-tuple, see below.
Manipulating and Displaying Structured Datatypes
------------------------------------------------
The list of field names of a structured datatype can be found in the ``names``
attribute of the dtype object::
>>> d = np.dtype([('x', 'i8'), ('y', 'f4')])
>>> d.names
('x', 'y')
The field names may be modified by assigning to the ``names`` attribute using a
sequence of strings of the same length.
The dtype object also has a dictionary-like attribute, ``fields``, whose keys
are the field names (and :ref:`Field Titles <titles>`, see below) and whose
values are tuples containing the dtype and byte offset of each field. ::
>>> d.fields
mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)})
Both the ``names`` and ``fields`` attributes will equal ``None`` for
unstructured arrays. The recommended way to test if a dtype is structured is
with `if dt.names is not None` rather than `if dt.names`, to account for dtypes
with 0 fields.
The string representation of a structured datatype is shown in the "list of
tuples" form if possible, otherwise numpy falls back to using the more general
dictionary form.
.. _offsets-and-alignment:
Automatic Byte Offsets and Alignment
------------------------------------
Numpy uses one of two methods to automatically determine the field byte offsets
and the overall itemsize of a structured datatype, depending on whether
``align=True`` was specified as a keyword argument to :func:`numpy.dtype`.
By default (``align=False``), numpy will pack the fields together such that
each field starts at the byte offset the previous field ended, and the fields
are contiguous in memory. ::
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
>>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2'))
offsets: [0, 1, 2, 6, 7, 15]
itemsize: 17
If ``align=True`` is set, numpy will pad the structure in the same way many C
compilers would pad a C-struct. Aligned structures can give a performance
improvement in some cases, at the cost of increased datatype size. Padding
bytes are inserted between fields such that each field's byte offset will be a
multiple of that field's alignment, which is usually equal to the field's size
in bytes for simple datatypes, see :c:member:`PyArray_Descr.alignment`. The
structure will also have trailing padding added so that its itemsize is a
multiple of the largest field's alignment. ::
>>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2', align=True))
offsets: [0, 1, 4, 8, 16, 24]
itemsize: 32
Note that although almost all modern C compilers pad in this way by default,
padding in C structs is C-implementation-dependent so this memory layout is not
guaranteed to exactly match that of a corresponding struct in a C program. Some
work may be needed, either on the numpy side or the C side, to obtain exact
correspondence.
If offsets were specified using the optional ``offsets`` key in the
dictionary-based dtype specification, setting ``align=True`` will check that
each field's offset is a multiple of its size and that the itemsize is a
multiple of the largest field size, and raise an exception if not.
If the offsets of the fields and itemsize of a structured array satisfy the
alignment conditions, the array will have the ``ALIGNED`` :attr:`flag
<numpy.ndarray.flags>` set.
A convenience function :func:`numpy.lib.recfunctions.repack_fields` converts an
aligned dtype or array to a packed one and vice versa. It takes either a dtype
or structured ndarray as an argument, and returns a copy with fields re-packed,
with or without padding bytes.
.. _titles:
Field Titles
------------
In addition to field names, fields may also have an associated :term:`title`,
an alternate name, which is sometimes used as an additional description or
alias for the field. The title may be used to index an array, just like a
field name.
To add titles when using the list-of-tuples form of dtype specification, the
field name may be specified as a tuple of two strings instead of a single
string, which will be the field's title and field name respectively. For
example::
>>> np.dtype([(('my title', 'name'), 'f4')])
dtype([(('my title', 'name'), '<f4')])
When using the first form of dictionary-based specification, the titles may be
supplied as an extra ``'titles'`` key as described above. When using the second
(discouraged) dictionary-based specification, the title can be supplied by
providing a 3-element tuple ``(datatype, offset, title)`` instead of the usual
2-element tuple::
>>> np.dtype({'name': ('i4', 0, 'my title')})
dtype([(('my title', 'name'), '<i4')])
The ``dtype.fields`` dictionary will contain titles as keys, if any
titles are used. This means effectively that a field with a title will be
represented twice in the fields dictionary. The tuple values for these fields
will also have a third element, the field title. Because of this, and because
the ``names`` attribute preserves the field order while the ``fields``
attribute may not, it is recommended to iterate through the fields of a dtype
using the ``names`` attribute of the dtype, which will not list titles, as
in::
>>> for name in d.names:
... print(d.fields[name][:2])
(dtype('int64'), 0)
(dtype('float32'), 8)
Union types
-----------
Structured datatypes are implemented in numpy to have base type
:class:`numpy.void` by default, but it is possible to interpret other numpy
types as structured types using the ``(base_dtype, dtype)`` form of dtype
specification described in
:ref:`Data Type Objects <arrays.dtypes.constructing>`. Here, ``base_dtype`` is
the desired underlying dtype, and fields and flags will be copied from
``dtype``. This dtype is similar to a 'union' in C.
Indexing and Assignment to Structured arrays
============================================
Assigning data to a Structured Array
------------------------------------
There are a number of ways to assign values to a structured array: Using python
tuples, using scalar values, or using other structured arrays.
Assignment from Python Native Types (Tuples)
````````````````````````````````````````````
The simplest way to assign values to a structured array is using python tuples.
Each assigned value should be a tuple of length equal to the number of fields
in the array, and not a list or array as these will trigger numpy's
broadcasting rules. The tuple's elements are assigned to the successive fields
of the array, from left to right::
>>> x = np.array([(1, 2, 3), (4, 5, 6)], dtype='i8, f4, f8')
>>> x[1] = (7, 8, 9)
>>> x
array([(1, 2., 3.), (7, 8., 9.)],
dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '<f8')])
Assignment from Scalars
```````````````````````
A scalar assigned to a structured element will be assigned to all fields. This
happens when a scalar is assigned to a structured array, or when an
unstructured array is assigned to a structured array::
>>> x = np.zeros(2, dtype='i8, f4, ?, S1')
>>> x[:] = 3
>>> x
array([(3, 3., True, b'3'), (3, 3., True, b'3')],
dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
>>> x[:] = np.arange(2)
>>> x
array([(0, 0., False, b'0'), (1, 1., True, b'1')],
dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
Structured arrays can also be assigned to unstructured arrays, but only if the
structured datatype has just a single field::
>>> twofield = np.zeros(2, dtype=[('A', 'i4'), ('B', 'i4')])
>>> onefield = np.zeros(2, dtype=[('A', 'i4')])
>>> nostruct = np.zeros(2, dtype='i4')
>>> nostruct[:] = twofield
Traceback (most recent call last):
...
TypeError: Cannot cast scalar from dtype([('A', '<i4'), ('B', '<i4')]) to dtype('int32') according to the rule 'unsafe'
Assignment from other Structured Arrays
```````````````````````````````````````
Assignment between two structured arrays occurs as if the source elements had
been converted to tuples and then assigned to the destination elements. That
is, the first field of the source array is assigned to the first field of the
destination array, and the second field likewise, and so on, regardless of
field names. Structured arrays with a different number of fields cannot be
assigned to each other. Bytes of the destination structure which are not
included in any of the fields are unaffected. ::
>>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'f4'), ('c', 'S3')])
>>> b = np.ones(3, dtype=[('x', 'f4'), ('y', 'S3'), ('z', 'O')])
>>> b[:] = a
>>> b
array([(0., b'0.0', b''), (0., b'0.0', b''), (0., b'0.0', b'')],
dtype=[('x', '<f4'), ('y', 'S3'), ('z', 'O')])
Assignment involving subarrays
``````````````````````````````
When assigning to fields which are subarrays, the assigned value will first be
broadcast to the shape of the subarray.
Indexing Structured Arrays
--------------------------
Accessing Individual Fields
```````````````````````````
Individual fields of a structured array may be accessed and modified by indexing
the array with the field name. ::
>>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
>>> x['foo']
array([1, 3])
>>> x['foo'] = 10
>>> x
array([(10, 2.), (10, 4.)],
dtype=[('foo', '<i8'), ('bar', '<f4')])
The resulting array is a view into the original array. It shares the same
memory locations and writing to the view will modify the original array. ::
>>> y = x['bar']
>>> y[:] = 11
>>> x
array([(10, 11.), (10, 11.)],
dtype=[('foo', '<i8'), ('bar', '<f4')])
This view has the same dtype and itemsize as the indexed field, so it is
typically a non-structured array, except in the case of nested structures.
>>> y.dtype, y.shape, y.strides
(dtype('float32'), (2,), (12,))
If the accessed field is a subarray, the dimensions of the subarray
are appended to the shape of the result::
>>> x = np.zeros((2, 2), dtype=[('a', np.int32), ('b', np.float64, (3, 3))])
>>> x['a'].shape
(2, 2)
>>> x['b'].shape
(2, 2, 3, 3)
Accessing Multiple Fields
```````````````````````````
One can index and assign to a structured array with a multi-field index, where
the index is a list of field names.
.. warning::
The behavior of multi-field indexes changed from Numpy 1.15 to Numpy 1.16.
The result of indexing with a multi-field index is a view into the original
array, as follows::
>>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')])
>>> a[['a', 'c']]
array([(0, 0.), (0, 0.), (0, 0.)],
dtype={'names':['a','c'], 'formats':['<i4','<f4'], 'offsets':[0,8], 'itemsize':12})
Assignment to the view modifies the original array. The view's fields will be
in the order they were indexed. Note that unlike for single-field indexing, the
dtype of the view has the same itemsize as the original array, and has fields
at the same offsets as in the original array, and unindexed fields are merely
missing.
.. warning::
In Numpy 1.15, indexing an array with a multi-field index returned a copy of
the result above, but with fields packed together in memory as if
passed through :func:`numpy.lib.recfunctions.repack_fields`.
The new behavior as of Numpy 1.16 leads to extra "padding" bytes at the
location of unindexed fields compared to 1.15. You will need to update any
code which depends on the data having a "packed" layout. For instance code
such as::
>>> a[['a', 'c']].view('i8') # Fails in Numpy 1.16
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype
will need to be changed. This code has raised a ``FutureWarning`` since
Numpy 1.12, and similar code has raised ``FutureWarning`` since 1.7.
In 1.16 a number of functions have been introduced in the
:mod:`numpy.lib.recfunctions` module to help users account for this
change. These are
:func:`numpy.lib.recfunctions.repack_fields`.
:func:`numpy.lib.recfunctions.structured_to_unstructured`,
:func:`numpy.lib.recfunctions.unstructured_to_structured`,
:func:`numpy.lib.recfunctions.apply_along_fields`,
:func:`numpy.lib.recfunctions.assign_fields_by_name`, and
:func:`numpy.lib.recfunctions.require_fields`.
The function :func:`numpy.lib.recfunctions.repack_fields` can always be
used to reproduce the old behavior, as it will return a packed copy of the
structured array. The code above, for example, can be replaced with:
>>> from numpy.lib.recfunctions import repack_fields
>>> repack_fields(a[['a', 'c']]).view('i8') # supported in 1.16
array([0, 0, 0])
Furthermore, numpy now provides a new function
:func:`numpy.lib.recfunctions.structured_to_unstructured` which is a safer
and more efficient alternative for users who wish to convert structured
arrays to unstructured arrays, as the view above is often indeded to do.
This function allows safe conversion to an unstructured type taking into
account padding, often avoids a copy, and also casts the datatypes
as needed, unlike the view. Code such as:
>>> b = np.zeros(3, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
>>> b[['x', 'z']].view('f4')
array([0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)
can be made safer by replacing with:
>>> from numpy.lib.recfunctions import structured_to_unstructured
>>> structured_to_unstructured(b[['x', 'z']])
array([0, 0, 0])
Assignment to an array with a multi-field index modifies the original array::
>>> a[['a', 'c']] = (2, 3)
>>> a
array([(2, 0, 3.), (2, 0, 3.), (2, 0, 3.)],
dtype=[('a', '<i4'), ('b', '<i4'), ('c', '<f4')])
This obeys the structured array assignment rules described above. For example,
this means that one can swap the values of two fields using appropriate
multi-field indexes::
>>> a[['a', 'c']] = a[['c', 'a']]
Indexing with an Integer to get a Structured Scalar
```````````````````````````````````````````````````
Indexing a single element of a structured array (with an integer index) returns
a structured scalar::
>>> x = np.array([(1, 2., 3.)], dtype='i, f, f')
>>> scalar = x[0]
>>> scalar
(1, 2., 3.)
>>> type(scalar)
<class 'numpy.void'>
Unlike other numpy scalars, structured scalars are mutable and act like views
into the original array, such that modifying the scalar will modify the
original array. Structured scalars also support access and assignment by field
name::
>>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
>>> s = x[0]
>>> s['bar'] = 100
>>> x
array([(1, 100.), (3, 4.)],
dtype=[('foo', '<i8'), ('bar', '<f4')])
Similarly to tuples, structured scalars can also be indexed with an integer::
>>> scalar = np.array([(1, 2., 3.)], dtype='i, f, f')[0]
>>> scalar[0]
1
>>> scalar[1] = 4
Thus, tuples might be thought of as the native Python equivalent to numpy's
structured types, much like native python integers are the equivalent to
numpy's integer types. Structured scalars may be converted to a tuple by
calling :func:`ndarray.item`::
>>> scalar.item(), type(scalar.item())
((1, 4.0, 3.0), <class 'tuple'>)
Viewing Structured Arrays Containing Objects
--------------------------------------------
In order to prevent clobbering object pointers in fields of
:class:`numpy.object` type, numpy currently does not allow views of structured
arrays containing objects.
Structure Comparison
--------------------
If the dtypes of two void structured arrays are equal, testing the equality of
the arrays will result in a boolean array with the dimensions of the original
arrays, with elements set to ``True`` where all fields of the corresponding
structures are equal. Structured dtypes are equal if the field names,
dtypes and titles are the same, ignoring endianness, and the fields are in
the same order::
>>> a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
>>> b = np.ones(2, dtype=[('a', 'i4'), ('b', 'i4')])
>>> a == b
array([False, False])
Currently, if the dtypes of two void structured arrays are not equivalent the
comparison fails, returning the scalar value ``False``. This behavior is
deprecated as of numpy 1.10 and will raise an error or perform elementwise
comparison in the future.
The ``<`` and ``>`` operators always return ``False`` when comparing void
structured arrays, and arithmetic and bitwise operations are not supported.
Record Arrays
=============
As an optional convenience numpy provides an ndarray subclass,
:class:`numpy.recarray`, and associated helper functions in the
:mod:`numpy.rec` submodule, that allows access to fields of structured arrays
by attribute instead of only by index. Record arrays also use a special
datatype, :class:`numpy.record`, that allows field access by attribute on the
structured scalars obtained from the array.
The simplest way to create a record array is with :func:`numpy.rec.array`::
>>> recordarr = np.rec.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> recordarr.bar
array([ 2., 3.], dtype=float32)
>>> recordarr[1:2]
rec.array([(2, 3., b'World')],
dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
>>> recordarr[1:2].foo
array([2], dtype=int32)
>>> recordarr.foo[1:2]
array([2], dtype=int32)
>>> recordarr[1].baz
b'World'
:func:`numpy.rec.array` can convert a wide variety of arguments into record
arrays, including structured arrays::
>>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
>>> recordarr = np.rec.array(arr)
The :mod:`numpy.rec` module provides a number of other convenience functions for
creating record arrays, see :ref:`record array creation routines
<routines.array-creation.rec>`.
A record array representation of a structured array can be obtained using the
appropriate `view <numpy-ndarray-view>`_::
>>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
>>> recordarr = arr.view(dtype=np.dtype((np.record, arr.dtype)),
... type=np.recarray)
For convenience, viewing an ndarray as type :class:`np.recarray` will
automatically convert to :class:`np.record` datatype, so the dtype can be left
out of the view::
>>> recordarr = arr.view(np.recarray)
>>> recordarr.dtype
dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]))
To get back to a plain ndarray both the dtype and type must be reset. The
following view does so, taking into account the unusual case that the
recordarr was not a structured type::
>>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray)
Record array fields accessed by index or by attribute are returned as a record
array if the field has a structured type but as a plain ndarray otherwise. ::
>>> recordarr = np.rec.array([('Hello', (1, 2)), ("World", (3, 4))],
... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
>>> type(recordarr.foo)
<class 'numpy.ndarray'>
>>> type(recordarr.bar)
<class 'numpy.recarray'>
Note that if a field has the same name as an ndarray attribute, the ndarray
attribute takes precedence. Such fields will be inaccessible by attribute but
will still be accessible by index.
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
dsullivan7/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
rexshihaoren/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
SPP1665DataAnalysisCourse/elephant | elephant/sta.py | 1 | 19728 | # -*- coding: utf-8 -*-
'''
Module: jelephant.analysis.sta
Contains functions to calculate spike-triggered averages of AnalogSignals.
'''
import numpy as np
import scipy.signal
import quantities as pq
from neo.core import AnalogSignal, AnalogSignalArray
if __name__ == '__main__':
pass
#===============================================================================
# Spike-triggered average main functions
#===============================================================================
def sta(lfps, spiketrains, window, method="correlation", crosstrials=False, single_data=None):
"""
Calls the resective sta function specified by 'method'. 'method' can either be 'correlation' for
correlation-based STA calculation or 'average' for average-based STA calculation.
**Args**:
lfps: AnalogSignal object or AnalogSignalArray object or list of AnalogSignals
spikes: SpikeTrain or list of SpikeTrains objects, its time intervall needs to
be completely covered by the lfp
window: positive time interval to specify the cutout around spikes given as Quantity or
number of bins to use
method: default 'correlation'. Specifies method to calculate STA
crosstrials: indicates if STA is averaged over all provided trials or calculated trial-wise
default value 'False'
True: STA is calculated for each pair of lfp and spiketrain an is averaged afterwards
False: STAs are calculated for each pair of lfp and spiketrain and are returned as list
single_data: (None,'train','lfp')
specifies whether one (first) spiketrain is used for all STAs ('train'),
each AnalogSignal comes with its own spiketrain (None, Default) or one (first)
Analogsignal is used for all spiketrains ('lfp') Default value 'None'
**Return**:
Returns a tuple (STA,time,used_spikes), where STA is a list of one-dimensional arrays with
the spike triggered average, and time is a list of the corresponding time bins.
The length of the respective array is defined by 2*window + 1, where window is
the number of bins around the spike times used.
used_spikes contains the number of spikes used for the STA. If the spiketrain did
not contain suitable spikes, the returned STA will be filled with zeros.
**Example**:
(result,time,used_spikes)=sta_corr(lfp,spiketrain,Quantity(10,"ms"))
matplotlib.pyplot.plot(time,result)
"""
if single_data == 'lfp':
# ## 1 ### In case of single lfp provided
# wrapping spiketrains
if isinstance(spiketrains, np.ndarray):
box = []
box.append(spiketrains)
spiketrains = box
# if (lfps has usefull type)
if (isinstance(lfps, np.ndarray) or isinstance(lfps, list)):
# (itselfe is data, but also contains lists) or (contains only one list as first element))
if (len(lfps) > 1 and (isinstance(lfps[0], list) or isinstance(lfps[0], np.ndarray))):
pass
elif (len(lfps == 1) and not(isinstance(lfps[0], list) or isinstance(lfps[0], np.ndarray))):
# unwrapping lfps
lfps = lfps[0]
else:
raise ValueError("There is no single lfp signal present in the supplied lfp signal")
else:
raise ValueError("Supplied LFP does not have the correct data format but %s" % (str(type(lfps))))
loops = len(spiketrains)
result = []
for i in range(loops):
if method == "corr" or method == "correlation":
result.append(sta_corr(lfps, spiketrains[i], window, crosstrials, single_data))
elif method == "aver" or method == "average":
result.append(sta_average(lfps, spiketrains[i], window, crosstrials, single_data))
else:
raise ValueError("Specified STA method is not available. Please use 'correlation' or 'average'")
if single_data == 'lfp':
return averaging_STAs([a[0] for a in result], [a[2] for a in result]), result[0][1], np.sum([a[2] for a in result])
return result[0]
# ## 2 ### normal calling of sta function in case of single_data != 'lfp'
if method == "corr" or method == "correlation":
return (sta_corr(lfps, spiketrains, window, crosstrials, single_data))
elif method == "aver" or method == "average":
return (sta_average(lfps, spiketrains, window, crosstrials, single_data))
else:
raise ValueError("Specified STA method is not available. Please use 'correlation' or 'average'")
def sta_corr(lfps, spiketrains, window, crosstrials=False, single_data=None):
"""
Calculates the respective spike-triggered average of a analog signals of multiple trials
by binning the spiketrain and correlation of lfp and respective spiketrain.
Calculates the spike triggered average of a AnalogSignal or AnalogSignalArray object in a
time window +-window around the spike times in a SpikeTrain object.
**Args**:
lfps: AnalogSignal object or AnalogSignalArray object or list of AnalogSignals
spikes: SpikeTrain or list of SpikeTrains objects, its time intervall needs to
be completely covered by the lfp
window: positive time interval to specify the cutout around spikes given as Quantity or
number of bins to use
crosstrail: indicates if STA is averaged over all provided trials or calculated trial-wise
**Return**:
Returns a tuple (STA,time,used_spikes), where STA is a list of one-dimensional arrays with
the spike triggered average, and time is a list of the corresponding time bins.
The length of the respective array is defined by 2*window + 1, where window is
the number of bins around the spike times used.
used_spikes contains the number of spikes used for the STA. If the spiketrain did
not contain suitable spikes, the returned STA will be filled with zeros.
**Example**:
(result,time,used_spikes)=sta_corr(lfp,spiketrain,Quantity(10,"ms"))
matplotlib.pyplot.plot(time,result)
"""
# checking compatibility of data, calculating parameters of trials
(lfps, spiketrains, window_times, wrapped, num_trials, window_bins, st_lfp_offsetbins, spiketrainbins) = data_quality_check(lfps, spiketrains, window, crosstrials, single_data)
# create binned spiketrains of spikes in suitable time window
st_binned = []
for trial in np.arange(num_trials):
# binning spiketrain with respect to its starting time
st_binned.append(np.zeros(spiketrainbins[trial], dtype=int))
for t in spiketrains[trial]:
# calculating spikebin from spiketime (respective to spiketrainstart)
spikebin = int(np.round(float(t - spiketrains[trial].t_start) / (spiketrains[trial].t_stop - spiketrains[trial].t_start) * spiketrainbins[trial]))
# checking if lfp signal around spiketime t is available
if spikebin + st_lfp_offsetbins[trial] > window_bins[trial] and len(lfps[trial]) - (st_lfp_offsetbins[trial] + spikebin) > window_bins[trial]:
# adds 1 to the bin corresponding to spiketime t
st_binned[trial][spikebin] += 1
# use the correlation function to calculate the STA
result_sta = []
result_time = []
used_spikes = []
for trial in np.arange(num_trials):
if all(np.equal(st_binned[trial] , 0)): # This is slow!
print "No suitable spikes in trial detected. Reduce window size or supply more LFP data."
output = np.zeros(2 * window_bins[trial] + 1) * lfps[trial].units
result_sta.append(output)
# used_spikes.append(0)
else:
# cutting correct segment of lfp with respect to additional information outside of spiketrain intervall
lfp_start = st_lfp_offsetbins[trial] - window_bins[trial]
pre = []
post = []
if lfp_start < 0:
pre = np.zeros(-lfp_start)
lfp_start = 0
lfp_stop = st_lfp_offsetbins[trial] + spiketrainbins[trial] + window_bins[trial]
if lfp_stop > len(lfps[trial]):
post = np.zeros(lfp_stop - len(lfps[trial]))
lfp_stop = len(lfps[trial])
# appending pre and post for symetrie reasons of correlation
lfp = lfps[trial][lfp_start:lfp_stop]
if pre != []:
lfp = np.append(pre, lfp)
if post != []:
lfp = np.append(lfp, post)
# actual calculation of correlation and therefore STA of both signals
output = scipy.signal.correlate(lfp, st_binned[trial], mode='same') / np.sum(st_binned[trial]) * lfps[trial].units
bin_start = int(len(output) / 2) - window_bins[trial]
bin_end = int(len(output) / 2) + window_bins[trial]
# one additional bin to cut STA symmetrically around time = 0
result_sta.append(output[bin_start: bin_end + 1])
result_time.append(np.arange(-window_times[trial], (window_times[trial] + 1 / lfps[trial].sampling_rate).rescale(window_times[trial].units), (1 / lfps[trial].sampling_rate).rescale(window_times[trial].units))[0: 2 * window_bins[trial] + 1] * window_times[trial].units)
used_spikes.append(int(np.sum(st_binned[trial])))
# Averaging over all trials in case of crosstrialing
if crosstrials:
result_sta[0] = averaging_STAs(result_sta, used_spikes)
# Returns array in case only single LFP and spiketrains was passed
if wrapped or crosstrials:
return result_sta[0], result_time[0], used_spikes[0]
else:
return result_sta, result_time, used_spikes
#-------------------------------------------------------------------------------
def sta_average(lfps, spiketrains, window, crosstrials=False, single_data=None):
"""
Calculates the respective spike-triggered average of a analog signals of multiple trials
by averaging the respective parts of the lfp signal.
Calculates the spike triggered average of a neo AnalogSignal or AnalogSignal object in a
time window +-window around the spike times in a SpikeTrain object. Acts the same as
analysis.sta_corr(lfps, spiketrains, window)
**Args**:
lfps: AnalogSignal object or AnalogSignalArray object
spikes: SpikeTrain or list of SpikeTrains objects
window: positive time interval to specify the cutout around given as Quantity or
number of bins to use
crosstrail: indicates if STA is averaged with all given trial or calculated trial-wise
**Return**:
Returns a tuple (STA,time,used_spikes), where STA is a list of one-dimensional arrays with
the spike triggered average, and time is a list of the corresponding time bins.
The length of the respective array is defined by 2*window + 1, where window is
the number of bins around the spike times used.
used_spikes contains the number of spikes used for the STA. If the spiketrain did
not contain suitable spikes, the returned STA will be filled with zeros.
**Example**:
(result,time,used_spikes)=sta_average([lfp1,lfp2], [spiketrain1,spiketrain2], Quantity(10,"ms"), crosstrials)
matplotlib.pyplot.plot(time,result)
"""
# checking compatibility of data, calculating parameters of trials
(lfps, spiketrains, window_times, wrapped, num_trials, window_bins, st_lfp_offsetbins, spiketrainbins) = data_quality_check(lfps, spiketrains, window, crosstrials, single_data)
# calculate the spike-triggered-average by averaging the respective intervals of the lfp
result_sta = []
result_time = []
used_spikes = np.zeros(num_trials, dtype=int)
for trial in range(num_trials):
# summing over all respective lfp intervals around spiketimes
lfp_sum = np.zeros(2 * window_bins[trial] + 1) * lfps[trial].units
for spiketime in spiketrains[trial]:
# converting spiketime to respective bin in binned spiketrain (which starts at t_start of spiketrain)
spikebin = int(np.round(float(spiketime - spiketrains[trial].t_start) / (spiketrains[trial].t_stop - spiketrains[trial].t_start) * spiketrainbins[trial]))
# checks for sufficient lfp data around spikebin
if spikebin + st_lfp_offsetbins[trial] > window_bins[trial] and len(lfps[trial]) - (spikebin + st_lfp_offsetbins[trial]) > window_bins[trial]:
# determines lfp interval to cut with respect to spiketrain timing
bin_start = spikebin - window_bins[trial]
bin_end = spikebin + window_bins[trial] + 1
# actual copying of lfp interval
lfp_cutout = lfps[trial][st_lfp_offsetbins[trial] + bin_start:st_lfp_offsetbins[trial] + bin_end]
# conversion of lfp AnalogSignal to quantity numpy array and summing up
# TODO: This step is slow due to copying the whole array -> Faster version?
lfp_sum = lfp_sum + np.array(lfp_cutout) * lfp_cutout.units
used_spikes[trial] += 1
if used_spikes[trial] == 0:
print "No suitable spikes in trial detected. Reduce window size or supply more LFP data."
result_sta.append(lfp_sum)
else:
# normalizing STA
result_sta.append(lfp_sum / used_spikes[trial])
# generating timesteps for STA
result_time.append(np.arange(-window_times[trial], (window_times[trial] + 1 / lfps[trial].sampling_rate).rescale(window_times[trial].units), (1 / lfps[trial].sampling_rate).rescale(window_times[trial].units))[0:len(result_sta[trial])] * window_times[trial].units)
# Averaging over all trials in case of crosstrialing
if crosstrials:
result_sta[0] = averaging_STAs(result_sta, used_spikes)
# Returns array in case only single LFP and spiketrains was passed or averaging over trials was done
if wrapped or crosstrials:
return result_sta[0], result_time[0], used_spikes[0]
else:
return result_sta, result_time, used_spikes
#===============================================================================
# Supplementary functions
#===============================================================================
def data_quality_check(lfps, spiketrains, window, crosstrials, single_data):
"""
Supplementary function
Checks the properties of the given data and transforms them into a defined format for STA analysis.
**Args**:
lfps: AnalogSignal object or AnalogSignalArray object or list of AnalogSignal objects
spikes: SpikeTrain or list of SpikeTrains objects
window: positive time interval to specify the cutout around given as time Quantity or
number of bins to use
crosstrials: indicates if STA will be calculated trial-wise or across all given trials
**Return**:
Returns a tuple (lfps, spiketrains, window_times, wrapped, num_trials, window_bins)
lfps, spiketrains, and window_times are of type list covering single trails
wrapped indicates whether the data needed to be wrapped or not
num_trial and window_bins are lists containing the respective values for each trial
st_lfp_offsetbins: array with number of bins between lfp start and spiketrain start
spiketrainbins: length of spiketrain in number of bins
**Example**
TODO
"""
if window <= 0:
raise ValueError("Argument 'window' must be positive.")
wrapped = False
# wrapping lfps
if type(lfps) != list and lfps.ndim == 1:
box = []
box.append(lfps)
lfps = box
wrapped = True
# wrapping spiketrains
if isinstance(spiketrains, np.ndarray):
box = []
box.append(spiketrains)
spiketrains = box
# multipling spiketrain in case of single_train option
if single_data == 'train':
template = spiketrains[0]
spiketrains = []
for trial in range(len(lfps)):
spiketrains.append(template)
# Checking for matching numbers of LFPs and spiketrains
# This makes trouble for single_data = 'lfp' option due to variable length of lfp intervals
if len(lfps) != len(spiketrains):
raise ValueError("Number of LFPs and spiketrains has to be the same")
# Checking trial-wise for matching times of lfp and spiketrain
num_trials = len(lfps)
st_lfp_offsetbins = np.zeros(num_trials, dtype=int)
spiketrainbins = np.zeros(num_trials, dtype=int)
for trial in range(num_trials):
# bin distance between start of lfp and spiketrain signal
st_lfp_offsetbins[trial] = int (((spiketrains[trial].t_start - lfps[trial].t_start) * lfps[trial].sampling_rate).rescale('dimensionless'))
spiketrainbins[trial] = int (((spiketrains[trial].t_stop - spiketrains[trial].t_start) * lfps[trial].sampling_rate).rescale('dimensionless'))
# checking time length in bins of lfps and spiketrains
if len(lfps[trial]) < spiketrainbins[trial]:
raise ValueError("LFP signal covers less bins than spiketrain. (LFP length: %i bins, spiketrain: %i bins)" % (len(lfps[trial]), len(spiketrainbins[trial])))
if st_lfp_offsetbins[trial] < 0 or len(lfps[trial]) < st_lfp_offsetbins[trial] + spiketrainbins[trial]:
raise ValueError("LFP does not cover the whole time of the spiketrain")
# checking if STA across trials is possible to calculate due to sampling rates
if crosstrials == True and any(lfp.sampling_rate != lfps[0].sampling_rate for lfp in lfps):
print "Warning: Trials to cross do not have the same sampling rate"
raise ValueError("For calculating STA of multiple trials all need the same sampling rate")
# determine correct window size for each trial and calculating the missing variable window_bins or window_times
window_times = np.zeros(num_trials) * pq.s
window_bins = []
if type(window) == pq.quantity.Quantity:
# for loop is necessary in the following lines, otherwise units will be disregarded
for trial in range(num_trials):
window_times[trial] = window
window_bins.append(int((window_times[trial] * lfps[trial].sampling_rate).rescale("dimensionless")))
# check if windowsize gives number of bins which has to be converted into time interval
elif type(window) == int:
for trial in np.arange(num_trials):
window_times[trial] = window / lfps[trial].sampling_rate
window_bins.append(window)
else:
raise ValueError("window needs to be either a time quantity or an integer")
return (lfps, spiketrains, window_times, wrapped, num_trials, window_bins, st_lfp_offsetbins, spiketrainbins)
#-------------------------------
def averaging_STAs(stas, used_spikes):
"""
Supplementary function
Calculates the average of multiple sta taking into account that they are based on
different numbers of spikes
**Args**:
stas: list of STAs to average. STAs need to be quantities with np.arrays
used_spikes: list of number of spikes used for calculating stas
**Return**:
Returns an averaged STA
"""
cross_sta = np.zeros(len(stas[0]))
for trial in np.arange(len(stas)):
cross_sta[:] += stas[trial] * used_spikes[trial]
if np.sum(used_spikes) != 0:
return cross_sta / np.sum(used_spikes)
else: return cross_sta
| bsd-3-clause |
easonlv/BDA_py_demos | demos_ch4/demo4_1.py | 19 | 5306 | """Bayesian Data Analysis, 3rd ed
Chapter 4, demo 1
Normal approximaton for Bioassay model.
"""
from __future__ import division
import numpy as np
from scipy import optimize, stats
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# Bioassay data, (BDA3 page 86)
x = np.array([-0.86, -0.30, -0.05, 0.73])
n = np.array([5, 5, 5, 5])
y = np.array([0, 1, 3, 5])
# compute the posterior density in grid
# - usually should be computed in logarithms!
# - with alternative prior, check that range and spacing of A and B
# are sensible
ngrid = 100
A = np.linspace(-4, 8, ngrid)
B = np.linspace(-10, 40, ngrid)
ilogit_abx = 1 / (np.exp(-(A[:,None] + B[:,None,None] * x)) + 1)
p = np.prod(ilogit_abx**y * (1 - ilogit_abx)**(n - y), axis=2)
# alternative "bad" way of calcuting the above two lines in a for loop
'''
p = np.empty((len(B),len(A))) # allocate space
for i in range(len(A)):
for j in range(len(B)):
ilogit_abx_ij = (1 / (np.exp(-(A[i] + B[j] * x)) + 1))
p[j,i] = np.prod(ilogit_abx_ij**y * ilogit_abx_ij**(n - y))
'''
# sample from the grid
nsamp = 1000
samp_indices = np.unravel_index(
np.random.choice(p.size, size=nsamp, p=p.ravel()/np.sum(p)),
p.shape
)
samp_A = A[samp_indices[1]]
samp_B = B[samp_indices[0]]
# add random jitter, see BDA3 p. 76
samp_A += (np.random.rand(nsamp) - 0.5) * (A[1]-A[0])
samp_B += (np.random.rand(nsamp) - 0.5) * (B[1]-B[0])
# samples of LD50
samp_ld50 = -samp_A / samp_B
# Find the mode by minimising negative log posterior. Compute gradients and
# Hessian analytically, and use Newton's method for optimisation. You may use
# optimisation routines below for checking your results. See help for
# scipy.optimize.minimize.
# Define the optimised function
def bioassayfun(w):
a = w[0]
b = w[1]
et = np.exp(a + b * x)
z = et / (1 + et)
e = - np.sum(y * np.log(z) + (n - y) * np.log(1 - z))
return e
# initial guess
w0 = np.array([0.0, 0.0])
# optimise
optim_res = optimize.minimize(bioassayfun, w0)
# extract desired results
w = optim_res['x']
S = optim_res['hess_inv']
# compute the normal approximation density in grid
# this is just for the illustration
# Construct a grid array of shape (ngrid, ngrid, 2) from A and B. Although
# Numpy's concatenation functions do not support broadcasting, a clever trick
# can be applied to overcome this without unnecessary memory copies
# (see Numpy's documentation for strides for more information):
A_broadcasted = np.lib.stride_tricks.as_strided(
A, shape=(ngrid,ngrid), strides=(0,A.strides[0]))
B_broadcasted = np.lib.stride_tricks.as_strided(
B, shape=(ngrid,ngrid), strides=(B.strides[0],0))
grid = np.dstack((A_broadcasted, B_broadcasted))
p_norm = stats.multivariate_normal.pdf(x=grid, mean=w, cov=S)
# draw samples from the distribution
samp_norm = stats.multivariate_normal.rvs(mean=w, cov=S, size=1000)
# ====== Plotting
fig = plt.figure(figsize=(12,10))
fig.subplots_adjust(wspace=0.4, hspace=0.25)
# plot the posterior density
plt.subplot(2,3,1)
plt.imshow(p, origin='lower', aspect='auto', extent=(A[0], A[-1], B[0], B[-1]))
plt.xlim([-2,6])
plt.ylim([-10,30])
plt.xlabel(r'$\alpha$', fontsize=18)
plt.ylabel(r'$\beta$', fontsize=18)
# plot the samples
plt.subplot(2,3,2)
plt.scatter(samp_A, samp_B, 10, c='#377eb8', linewidth=0)
plt.xlim([-2,6])
plt.ylim([-10,30])
plt.xlabel(r'$\alpha$', fontsize=18)
plt.ylabel(r'$\beta$', fontsize=18)
plt.text(0,-7,'p(beta>0)={:.2f}'.format(np.mean(samp_B>0)))
# plot the histogram of LD50
plt.subplot(2,3,3)
plt.hist(samp_ld50, np.linspace(-0.8, 0.8, 31))
plt.xlim([-0.8, 0.8])
plt.xlabel(r'LD50 = -$\alpha/\beta$')
plt.yticks(())
plt.xticks(np.linspace(-0.8, 0.8, 5))
# plot the posterior density for normal approx.
plt.subplot(2,3,4)
plt.imshow(p_norm, origin='lower', aspect='auto',
extent=(A[0], A[-1], B[0], B[-1]))
plt.xlim([-2,6])
plt.ylim([-10,30])
plt.xlabel(r'$\alpha$', fontsize=18)
plt.ylabel(r'$\beta$', fontsize=18)
# plot the samples from the normal approx.
plt.subplot(2,3,5)
plt.scatter(samp_norm[:,0], samp_norm[:,1], 10, c='#377eb8', linewidth=0)
plt.xlim([-2,6])
plt.ylim([-10,30])
plt.xlabel(r'$\alpha$', fontsize=18)
plt.ylabel(r'$\beta$', fontsize=18)
# Normal approximation does not take into account that posteriori
# is not symmetric and that there is very low denisty for negative
# beta values. Based on samples from the normal approximation
# it is estimated that there is about 4% probability that beta is negative!
plt.text(0,-7,'p(beta>0)={:.2f}'.format(np.mean(samp_norm[:,1]>0)))
# Plot the histogram of LD50
plt.subplot(2,3,6)
# Since we have strong prior belief that beta should not be negative we can
# improve our normal approximation by conditioning on beta>0.
bpi = samp_norm[:,1] > 0
samp_ld50_norm = - samp_norm[bpi,0] / samp_norm[bpi,1]
plt.hist(samp_ld50_norm, np.linspace(-0.8, 0.8, 31))
plt.xlim([-0.8, 0.8])
plt.xlabel(r'LD50 = -$\alpha/\beta$')
plt.yticks(())
plt.xticks(np.linspace(-0.8, 0.8, 5))
# Add super title
plt.suptitle('Normal approximaton for Bioassay model', fontsize=18)
plt.show()
| gpl-3.0 |
yongfuyang/vnpy | vn.trader/ctaAlgo/StrategyRangeBreak.py | 2 | 11558 | # encoding: UTF-8
"""
一个ATR-RSI指标结合的交易策略,适合用在股指的1分钟和5分钟线上。
注意事项:
1. 作者不对交易盈利做任何保证,策略代码仅供参考
2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装
3. 将IF0000_1min.csv用ctaHistoryData.py导入MongoDB后,直接运行本文件即可回测策略
"""
from ctaBase import *
from ctaTemplate import CtaTemplate
import talib
import numpy as np
########################################################################
class RangeBreakStrategy(CtaTemplate):
"""RangeBreak 日内区间波动性突破交易策略"""
className = 'RangeBreakStrategy'
author = u'用Python的交易员'
# 策略参数
percentOfRange = 0.3 # 振幅百分比
trailingPercent = 1.0 # 百分比移动止损
initDays = 10 # 初始化数据所用的天数
fixedSize = 1 # risk
useTrailingStop = False # 是否使用跟踪止损
profitLock = 30 # 利润锁定
trailingStop = 20 # 跟踪止损
# 策略变量
bar = None # K线对象
barMinute = EMPTY_STRING # K线当前的分钟
dayOpen = 0 # 今日开盘
preDayRange = 0 # 振幅
upperBand = 0 # 上轨
lowerBand = 0 # 下轨
myPrice = 0 # 下单价
bufferSize = 100 # 需要缓存的数据的大小
bufferCount = 0 # 目前已经缓存了的数据的计数
highArray = np.zeros(bufferSize) # K线最高价的数组
lowArray = np.zeros(bufferSize) # K线最低价的数组
closeArray = np.zeros(bufferSize) # K线收盘价的数组
intraTradeHigh = 0 # 移动止损用的持仓期内最高价
intraTradeLow = 0 # 移动止损用的持仓期内最低价
orderList = [] # 保存委托代码的列表
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'percentOfRange']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'dayOpen',
'preDayRange',
'upperBand',
'lowerBand']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(RangeBreakStrategy, self).__init__(ctaEngine, setting)
# 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,
# 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,
# 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读
# 策略时方便(更多是个编程习惯的选择)
self.isPrePosHaved = False
self.isAlreadyTraded = False
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' %self.name)
# 初始化RSI入场阈值
self.rsiBuy = 50 + self.rsiEntry
self.rsiSell = 50 - self.rsiEntry
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 计算K线
tickMinute = tick.datetime.minute
if tickMinute != self.barMinute:
if self.bar:
self.onBar(self.bar)
bar = CtaBarData()
bar.vtSymbol = tick.vtSymbol
bar.symbol = tick.symbol
bar.exchange = tick.exchange
bar.open = tick.lastPrice
bar.high = tick.lastPrice
bar.low = tick.lastPrice
bar.close = tick.lastPrice
bar.date = tick.date
bar.time = tick.time
bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
self.bar = bar # 这种写法为了减少一层访问,加快速度
self.barMinute = tickMinute # 更新当前的分钟
else: # 否则继续累加新的K线
bar = self.bar # 写法同样为了加快速度
bar.high = max(bar.high, tick.lastPrice)
bar.low = min(bar.low, tick.lastPrice)
bar.close = tick.lastPrice
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# 保存K线数据
self.closeArray[0:self.bufferSize-1] = self.closeArray[1:self.bufferSize]
self.highArray[0:self.bufferSize-1] = self.highArray[1:self.bufferSize]
self.lowArray[0:self.bufferSize-1] = self.lowArray[1:self.bufferSize]
self.closeArray[-1] = bar.close
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.bufferCount += 1
if self.bufferCount < self.bufferSize:
return
# 计算指标数值
self.atrValue = talib.ATR(self.highArray,
self.lowArray,
self.closeArray,
self.atrLength)[-1]
self.atrArray[0:self.bufferSize-1] = self.atrArray[1:self.bufferSize]
self.atrArray[-1] = self.atrValue
self.atrCount += 1
if self.atrCount < self.bufferSize:
return
self.atrMa = talib.MA(self.atrArray,
self.atrMaLength)[-1]
self.rsiValue = talib.RSI(self.closeArray,
self.rsiLength)[-1]
# 判断是否要进行交易
# 当前无仓位
if self.pos == 0:
self.intraTradeHigh = bar.high
self.intraTradeLow = bar.low
# ATR数值上穿其移动平均线,说明行情短期内波动加大
# 即处于趋势的概率较大,适合CTA开仓
if self.atrValue > self.atrMa:
# 使用RSI指标的趋势行情时,会在超买超卖区钝化特征,作为开仓信号
if self.rsiValue > self.rsiBuy:
# 这里为了保证成交,选择超价5个整指数点下单
self.buy(bar.close+5, self.fixedSize)
elif self.rsiValue < self.rsiSell:
self.short(bar.close-5, self.fixedSize)
# 持有多头仓位
elif self.pos == 1:
# 计算多头持有期内的最高价,以及重置最低价
self.intraTradeHigh = max(self.intraTradeHigh, bar.high)
self.intraTradeLow = bar.low
# 计算多头移动止损
longStop = self.intraTradeHigh * (1-self.trailingPercent/100)
# 发出本地止损委托,并且把委托号记录下来,用于后续撤单
orderID = self.sell(longStop, abs(self.pos), stop=True)
self.orderList.append(orderID)
# 持有空头仓位
elif self.pos == -1:
self.intraTradeLow = min(self.intraTradeLow, bar.low)
self.intraTradeHigh = bar.high
shortStop = self.intraTradeLow * (1+self.trailingPercent/100)
orderID = self.cover(shortStop, abs(self.pos), stop=True)
self.orderList.append(orderID)
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
pass
#----------------------------------------------------------------------
def onTrade(self, trade):
pass
#----------------------------------------------------------------------
def onPosition(self, pos):
if self.isPrePosHaved or self.isAlreadyTraded: # 还没有开过仓,或,还没有获取历史仓位
return
elif pos.position != 0:
if pos.direction == DIRECTION_LONG:
self.pos = pos.position
else:
self.pos = -1 * pos.position
self.lastEntryPrice = pos.price
self.isPrePosHaved = True
#print (u'{0} {1} 历史持仓 {2} 开仓均价 {3}'.format(datetime.now(), self.vtSymbol, self.pos, pos.price))
#pass
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaBacktesting import *
from PyQt4 import QtCore, QtGui
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20161010')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3 / 10000) # 万0.3
engine.setSize(15) # 股指合约大小
# 设置使用的历史数据库
engine.setDatabase(MINUTE_DB_NAME, 'ag1612')
## 在引擎中创建策略对象
# d = {'atrLength': 11}
# engine.initStrategy(AtrRsiStrategy, d)
## 开始跑回测
##engine.runBacktesting()
## 显示回测结果
##engine.showBacktestingResult()
# 跑优化
setting = OptimizationSetting() # 新建一个优化任务设置对象
setting.setOptimizeTarget('capital') # 设置优化排序的目标是策略净盈利
setting.addParameter('atrLength', 11, 20, 1) # 增加第一个优化参数atrLength,起始11,结束12,步进1
setting.addParameter('atrMaLength', 20, 30, 5) # 增加第二个优化参数atrMa,起始20,结束30,步进1
# 性能测试环境:I7-3770,主频3.4G, 8核心,内存16G,Windows 7 专业版
# 测试时还跑着一堆其他的程序,性能仅供参考
import time
start = time.time()
# 运行单进程优化函数,自动输出结果,耗时:359秒
# engine.runOptimization(AtrRsiStrategy, setting)
# 多进程优化,耗时:89秒
engine.runParallelOptimization(AtrRsiStrategy, setting)
print u'耗时:%s' % (time.time() - start)
| mit |
jonathansick/m31hst | m31hst/phatast.py | 1 | 7898 | #!/usr/bin/env python
# encoding: utf-8
"""
PHAT v2 artificial star tests.
2015-03-31 - Created by Jonathan Sick
"""
import numpy as np
from sklearn.cluster import KMeans
from astropy.table import Table
from astroML.stats import binned_statistic_2d
from m31hst.paths import phat_v2_ast_path
def load_phat_ast_table():
"""Read the PHAT v2 AST catalog.
From http://cdsarc.u-strasbg.fr/vizier/ftp/cats/J/ApJS/215/9/ReadMe
1- 11 F11.8 deg RAdeg Right Ascension in decimal degrees (J2000)
13- 23 F11.8 deg DEdeg Declination in decimal degrees (J2000)
25- 30 F6.3 mag F275W-in [14.1/36.9] Input HST/WFC3 F275W band mag
32- 37 F6.3 mag F275W-out [14.1/25.4]?=99.999 Output HST/WFC3 F275W
39- 44 F6.3 mag F336W-in [14.4/34.8] Input HST/WFC3 F336W band mag
46- 51 F6.3 mag F336W-out ?=99.999 Output HST/WFC3 F336W band mag
53- 58 F6.3 mag F475W-in Input HST/ACS F475W band magnitude
60- 65 F6.3 mag F475W-out ?=99.999 Output HST/ACS F475W band mag
67- 72 F6.3 mag F814W-in Input HST/ACS F814W band magnitude
74- 79 F6.3 mag F814W-out ?=99.999 Output HST/ACS F814W band mag
81- 86 F6.3 mag F110W-in ?=99.999 Input HST/WFC3 F110W band mag
88- 93 F6.3 mag F110W-out ?=99.999 Output HST/WFC3 F110W band mag
95-100 F6.3 mag F160W-in [13.5/27.3]?=99.999 Input HST/WFC3 F160W
102-107 F6.3 mag F160W-out [13.5/25.7]?=99.999 Output HST/WFC3 F160W
"""
colnames = ['ra',
'dec',
'f275w_in',
'f275w_out',
'f336w_in',
'f336w_out',
'f475w_in',
'f475w_out',
'f814w_in',
'f814w_out',
'f110w_in',
'f110w_out',
'f160w_in',
'f160w_out']
t = Table.read(phat_v2_ast_path(),
format='ascii.no_header',
names=colnames,
guess=False,
delimiter=' ')
return t
class PhatAstTable(object):
"""Data structure for the PHAT AST results."""
def __init__(self):
super(PhatAstTable, self).__init__()
self.t = load_phat_ast_table()
cluster_centers, self.labels = self._label_stars()
self._define_fields(cluster_centers, self.labels)
def _label_stars(self):
km = KMeans(n_clusters=6)
xy = np.vstack((self.t['ra'], self.t['dec'])).T
km.fit(xy)
return km.cluster_centers_, km.labels_
def _define_fields(self, cluster_centers, labels):
# Pre-baked list of centers, ordered sanely
known_centers = [[11.55581084, 42.14674574],
[11.15978774, 41.63931688],
[10.87125638, 41.45011536],
[10.80073952, 41.31165493],
[10.70681719, 41.26110849],
[10.68679924, 41.30852815]]
self.fields = []
for c in known_centers:
dists = np.hypot(c[0] - cluster_centers[:, 0],
c[1] - cluster_centers[:, 1])
i = np.argmin(dists)
d = {'center': c,
'label': i}
self.fields.append(d)
def write_crowdfile_for_field(self, path, fieldnum,
bands=('f275w', 'f336w', 'f475w',
'f814w', 'f110w', 'f160w')):
"""Write a StarFISH-compatible crowding file.
Parameters
----------
path : str
Filepath where the crowdfile will be written.
fieldnum : int
Index of the PHAT AST fields to use (0-5).
bands : list
List of bands (in order) to include in the crowdfile.
"""
label = self.fields[fieldnum]['label']
sel = np.where(self.labels == label)[0]
cols = [self.t['ra'][sel], self.t['dec'][sel]]
fmt = ['%.8f', '%.8f']
for band in bands:
inkey = "{0}_in".format(band.lower())
outkey = "{0}_out".format(band.lower())
diffs = self.t[inkey][sel] - self.t[outkey][sel]
dropped = np.where(np.abs(diffs) > 9.)[0]
indata = np.array(self.t[inkey][sel])
diffdata = np.array(diffs)
diffdata[dropped] = 9.99
cols.append(indata)
cols.append(diffdata)
fmt.append('%2.2f')
fmt.append('%+1.2f')
crowddata = np.vstack(cols).T
np.savetxt(path, crowddata,
delimiter=' ',
fmt=fmt)
def band_key_in(self, band):
return "{0}_in".format(band.lower())
def band_key_out(self, band):
return "{0}_out".format(band.lower())
def completeness_hess(self, fieldnum, band,
x_mag, y_mag, xlim, ylim, dmag):
"""Make a Hess diagram of completeness acros the plane."""
label = self.fields[fieldnum]['label']
s = np.where(self.labels == label)[0]
tt = self.t[s]
if isinstance(y_mag, basestring):
# a single mag
y = tt[self.band_key_in(y_mag)]
else:
b1, b2 = y_mag
y = tt[self.band_key_in(b1)] - tt[self.band_key_in(b2)]
if isinstance(x_mag, basestring):
# a single mag
x = tt[self.band_key_in(x_mag)]
else:
b1, b2 = x_mag
x = tt[self.band_key_in(b1)] - tt[self.band_key_in(b2)]
# bin the number of stars into the hess plane and the number of
# recovered stars to get the completeness fraction
def _completeness(values):
v = np.array(values)
if len(v) == 0:
return np.nan
else:
return float(np.where(v < 90.)[0].shape[0]) / v.shape[0]
# extend stop so it is included; len(edges) is nx+1
x_grid = np.arange(min(xlim), max(xlim) + dmag / 2., dmag)
y_grid = np.arange(min(ylim), max(ylim) + dmag / 2., dmag)
H, x_edges, y_edges = binned_statistic_2d(x, y,
tt[self.band_key_out(band)],
statistic=_completeness,
bins=[x_grid, y_grid])
return H.T, x_edges, y_edges
def error_hess(self, fieldnum, band,
x_mag, y_mag, xlim, ylim, dmag):
"""Make a Hess diagram of the mean error across the Hess plane."""
label = self.fields[fieldnum]['label']
s = np.where(self.labels == label)[0]
tt = self.t[s]
if isinstance(y_mag, basestring):
# a single mag
y = tt[self.band_key_in(y_mag)]
else:
b1, b2 = y_mag
y = tt[self.band_key_in(b1)] - tt[self.band_key_in(b2)]
if isinstance(x_mag, basestring):
# a single mag
x = tt[self.band_key_in(x_mag)]
else:
b1, b2 = x_mag
x = tt[self.band_key_in(b1)] - tt[self.band_key_in(b2)]
# extend stop so it is included; len(edges) is nx+1
x_grid = np.arange(min(xlim), max(xlim) + dmag / 2., dmag)
y_grid = np.arange(min(ylim), max(ylim) + dmag / 2., dmag)
diff = tt[self.band_key_in(band)] - tt[self.band_key_out(band)]
def filtered_sigma(vals):
"""Filter out the dropped stars from sigma computation."""
s = np.where(np.abs(vals) < 20.)[0]
return np.std(vals[s])
H, x_edges, y_edges = binned_statistic_2d(x, y,
diff,
statistic=filtered_sigma,
bins=[x_grid, y_grid])
return H.T, x_edges, y_edges
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter4/fig_bootstrap_gaussian.py | 4 | 2843 | r"""
Bootstrap Calculations of Error on Mean
---------------------------------------
Figure 4.3.
The bootstrap uncertainty estimates for the sample standard deviation
:math:`\sigma` (dashed line; see eq. 3.32) and :math:`\sigma_G` (solid line;
see eq. 3.36). The sample consists of N = 1000 values drawn from a Gaussian
distribution with :math:`\mu = 0` and :math:`\sigma = 1`. The bootstrap
estimates are based on 10,000 samples. The thin lines show Gaussians with
the widths determined as :math:`s / \sqrt{2(N - 1)}` (eq. 3.35) for
:math:`\sigma` and :math:`1.06 s / \sqrt{N}` (eq. 3.37) for :math:`\sigma_G`.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy.stats import norm
from matplotlib import pyplot as plt
from astroML.resample import bootstrap
from astroML.stats import sigmaG
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
m = 1000 # number of points
n = 10000 # number of bootstraps
#------------------------------------------------------------
# sample values from a normal distribution
np.random.seed(123)
data = norm(0, 1).rvs(m)
#------------------------------------------------------------
# Compute bootstrap resamplings of data
mu1_bootstrap = bootstrap(data, n, np.std, kwargs=dict(axis=1, ddof=1))
mu2_bootstrap = bootstrap(data, n, sigmaG, kwargs=dict(axis=1))
#------------------------------------------------------------
# Compute the theoretical expectations for the two distributions
x = np.linspace(0.8, 1.2, 1000)
sigma1 = 1. / np.sqrt(2 * (m - 1))
pdf1 = norm(1, sigma1).pdf(x)
sigma2 = 1.06 / np.sqrt(m)
pdf2 = norm(1, sigma2).pdf(x)
#------------------------------------------------------------
# Plot the results
fig, ax = plt.subplots(figsize=(5, 3.75))
ax.hist(mu1_bootstrap, bins=50, normed=True, histtype='step',
color='blue', ls='dashed', label=r'$\sigma\ {\rm (std. dev.)}$')
ax.plot(x, pdf1, color='gray')
ax.hist(mu2_bootstrap, bins=50, normed=True, histtype='step',
color='red', label=r'$\sigma_G\ {\rm (quartile)}$')
ax.plot(x, pdf2, color='gray')
ax.set_xlim(0.82, 1.18)
ax.set_xlabel(r'$\sigma$')
ax.set_ylabel(r'$p(\sigma|x,I)$')
ax.legend()
plt.show()
| bsd-2-clause |
aetilley/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/odl/space/pspace.py | 1 | 46185 | # Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Cartesian products of `LinearSpace` instances."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from builtins import range, str, super, zip
from future import standard_library
standard_library.install_aliases()
from numbers import Integral
from itertools import product
import numpy as np
from odl.set import LinearSpace, LinearSpaceElement
from odl.space.weighting import (
Weighting, ArrayWeighting, ConstWeighting, NoWeighting,
CustomInner, CustomNorm, CustomDist)
from odl.util import is_real_dtype
from odl.util.ufuncs import ProductSpaceUfuncs
__all__ = ('ProductSpace', 'ProductSpaceElement')
class ProductSpace(LinearSpace):
"""Cartesian product of `LinearSpace`'s.
A product space is the Cartesian product ``X_1 x ... x X_n`` of
linear spaces ``X_i``. It is itself a linear space, where the linear
combination is defined component-wise. Inner product, norm and
distance can also be defined in natural ways from the corresponding
functions in the individual components.
"""
def __init__(self, *spaces, **kwargs):
"""Initialize a new instance.
Parameters
----------
space1,...,spaceN : `LinearSpace` or int
The individual spaces ("factors / parts") in the product
space. Can also be given as ``space, n`` with ``n`` integer,
in which case the power space ``space ** n`` is created.
exponent : non-zero float or ``float('inf')``, optional
Order of the product distance/norm, i.e.
``dist(x, y) = np.linalg.norm(x-y, ord=exponent)``
``norm(x) = np.linalg.norm(x, ord=exponent)``
Values ``0 <= exponent < 1`` are currently unsupported
due to numerical instability. See ``Notes`` for further
information about the interpretation of the values.
Default: 2.0
field : `Field`, optional
Scalar field of the resulting space.
Default: ``spaces[0].field``
weighting : optional
Use weighted inner product, norm, and dist. The following
types are supported as ``weighting``:
``None`` : no weighting (default)
`Weighting` : weighting class, used directly. Such a
class instance can be retrieved from the space by the
`ProductSpace.weighting` property.
`array-like` : weigh each component with one entry from the
array. The array must be one-dimensional and have the same
length as the number of spaces.
float : same weighting factor in each component
Other Parameters
----------------
dist : callable, optional
The distance function defining a metric on the space.
It must accept two `ProductSpaceElement` arguments and
fulfill the following mathematical conditions for any
three space elements ``x, y, z``:
- ``dist(x, y) >= 0``
- ``dist(x, y) = 0`` if and only if ``x = y``
- ``dist(x, y) = dist(y, x)``
- ``dist(x, y) <= dist(x, z) + dist(z, y)``
By default, ``dist(x, y)`` is calculated as ``norm(x - y)``.
This creates an intermediate array ``x - y``, which can be
avoided by choosing ``dist_using_inner=True``.
Cannot be combined with: ``weighting, norm, inner``
norm : callable, optional
The norm implementation. It must accept an
`ProductSpaceElement` argument, return a float and satisfy the
following conditions for all space elements ``x, y`` and scalars
``s``:
- ``||x|| >= 0``
- ``||x|| = 0`` if and only if ``x = 0``
- ``||s * x|| = |s| * ||x||``
- ``||x + y|| <= ||x|| + ||y||``
By default, ``norm(x)`` is calculated as ``inner(x, x)``.
Cannot be combined with: ``weighting, dist, inner``
inner : callable, optional
The inner product implementation. It must accept two
`ProductSpaceElement` arguments, return a element from
the field of the space (real or complex number) and
satisfy the following conditions for all space elements
``x, y, z`` and scalars ``s``:
- ``<x, y> = conj(<y, x>)``
- ``<s*x + y, z> = s * <x, z> + <y, z>``
- ``<x, x> = 0`` if and only if ``x = 0``
Cannot be combined with: ``weighting, dist, norm``
dist_using_inner : bool, optional
Calculate ``dist`` using the formula
``||x - y||^2 = ||x||^2 + ||y||^2 - 2 * Re <x, y>``
This avoids the creation of new arrays and is thus faster
for large arrays. On the downside, it will not evaluate to
exactly zero for equal (but not identical) ``x`` and ``y``.
This option can only be used if ``exponent`` is 2.0.
Default: ``False``.
Cannot be combined with: ``dist``
Returns
-------
prodspace : `ProductSpace`
See Also
--------
ProductSpaceArrayWeighting
ProductSpaceConstWeighting
Examples
--------
>>> r2x3 = ProductSpace(odl.rn(2), odl.rn(3))
Notes
-----
Inner product, norm and distance are evaluated by collecting
the result of the corresponding operation in the individual
components and reducing the resulting vector to a single number.
The ``exponent`` parameter influences only this last part,
not the computations in the individual components. We give the
exact definitions in the following:
Let :math:`\mathcal{X} = \mathcal{X}_1 \\times \dots \\times
\mathcal{X}_d` be a product space, and
:math:`\langle \cdot, \cdot\\rangle_i`,
:math:`\lVert \cdot \\rVert_i`, :math:`d_i(\cdot, \cdot)` be
inner products, norms and distances in the respective
component spaces.
**Inner product:**
:math:`\langle x, y \\rangle =
\\sum_{i=1}^d \langle x_i, y_i \\rangle_i`
**Norm:**
- :math:`p < \infty`:
:math:`\lVert x\\rVert =
\left( \sum_{i=1}^d \lVert x_i \\rVert_i^p \\right)^{1/p}`
- :math:`p = \infty`:
:math:`\lVert x\\rVert =
\max_i \lVert x_i \\rVert_i`
**Distance:**
- :math:`p < \infty`:
:math:`d(x, y) =
\left( \sum_{i=1}^d d_i(x_i, y_i)^p \\right)^{1/p}`
- :math:`p = \infty`:
:math:`d(x, y) =
\max_i d_i(x_i, y_i)`
To implement own versions of these functions, you can use
the following snippet to gather the vector of norms (analogously
for inner products and distances)::
norms = np.fromiter(
(xi.norm() for xi in x),
dtype=np.float64, count=len(x))
"""
field = kwargs.pop('field', None)
dist = kwargs.pop('dist', None)
norm = kwargs.pop('norm', None)
inner = kwargs.pop('inner', None)
weighting = kwargs.pop('weighting', None)
exponent = kwargs.pop('exponent', 2.0)
dist_using_inner = bool(kwargs.pop('dist_using_inner', False))
if kwargs:
raise TypeError('got unexpected keyword arguments: {}'
''.format(kwargs))
# Check validity of option combination (3 or 4 out of 4 must be None)
if sum(x is None for x in (dist, norm, inner, weighting)) < 3:
raise ValueError('invalid combination of options weighting, '
'dist, norm and inner')
if any(x is not None for x in (dist, norm, inner)) and exponent != 2.0:
raise ValueError('`exponent` cannot be used together with '
'inner, norm or dist')
# Make a power space if the second argument is an integer
if (len(spaces) == 2 and
isinstance(spaces[0], LinearSpace) and
isinstance(spaces[1], Integral)):
spaces = [spaces[0]] * spaces[1]
# Validate the space arguments
wrong_spaces = [spc for spc in spaces
if not isinstance(spc, LinearSpace)]
if wrong_spaces:
raise TypeError('{!r} not LinearSpace instance(s)'
''.format(wrong_spaces))
if not all(spc.field == spaces[0].field for spc in spaces):
raise ValueError('all spaces must have the same field')
# Assign spaces and field
self.__spaces = tuple(spaces)
self.__size = len(spaces)
if field is None:
if self.size == 0:
raise ValueError('no spaces provided, cannot deduce field')
field = self.spaces[0].field
# Cache for efficiency
self.__is_power_space = all(spc == self.spaces[0]
for spc in self.spaces[1:])
super().__init__(field)
# Assign weighting
if weighting is not None:
if isinstance(weighting, Weighting):
self.__weighting = weighting
elif np.isscalar(weighting):
self.__weighting = ProductSpaceConstWeighting(
weighting, exponent, dist_using_inner=dist_using_inner)
elif weighting is None:
# Need to wait until dist, norm and inner are handled
pass
else: # last possibility: make a product space element
arr = np.asarray(weighting)
if arr.dtype == object:
raise ValueError('invalid weighting argument {}'
''.format(weighting))
if arr.ndim == 1:
self.__weighting = ProductSpaceArrayWeighting(
arr, exponent, dist_using_inner=dist_using_inner)
else:
raise ValueError('weighting array has {} dimensions, '
'expected 1'.format(arr.ndim))
elif dist is not None:
self.__weighting = ProductSpaceCustomDist(dist)
elif norm is not None:
self.__weighting = ProductSpaceCustomNorm(norm)
elif inner is not None:
self.__weighting = ProductSpaceCustomInner(inner)
else: # all None -> no weighing
self.__weighting = ProductSpaceNoWeighting(
exponent, dist_using_inner=dist_using_inner)
@property
def size(self):
"""Number of factors."""
return self.__size
def __len__(self):
"""Return ``len(self)``."""
return self.size
@property
def shape(self):
"""Number of spaces per axis."""
# Currently supporting only 1d product spaces
return (self.size,)
@property
def spaces(self):
"""A tuple containing all spaces."""
return self.__spaces
@property
def is_power_space(self):
"""``True`` if all member spaces are equal."""
return self.__is_power_space
@property
def exponent(self):
"""Exponent of the product space norm/dist, ``None`` for custom."""
return self.weighting.exponent
@property
def weighting(self):
"""This space's weighting scheme."""
return self.__weighting
@property
def is_weighted(self):
"""Return ``True`` if weighting is not `ProductSpaceNoWeighting`."""
return not isinstance(self.weighting, ProductSpaceNoWeighting)
@property
def dtype(self):
"""The data type of this space.
This is only well defined if all subspaces have the same dtype.
Raises
------
AttributeError
If any of the subspaces does not implement `dtype` or if the dtype
of the subspaces does not match.
"""
dtypes = [space.dtype for space in self.spaces]
if all(dtype == dtypes[0] for dtype in dtypes):
return dtypes[0]
else:
raise AttributeError("`dtype`'s of subspaces not equal")
def element(self, inp=None, cast=True):
"""Create an element in the product space.
Parameters
----------
inp : optional
If ``inp`` is ``None``, a new element is created from
scratch by allocation in the spaces. If ``inp`` is
already an element of this space, it is re-wrapped.
Otherwise, a new element is created from the
components by calling the ``element()`` methods
in the component spaces.
cast : bool
If ``True``, casting is allowed. Otherwise, a ``TypeError``
is raised for input that is not a sequence of elements of
the spaces that make up this product space.
Returns
-------
element : `ProductSpaceElement`
The new element
Examples
--------
>>> r2, r3 = odl.rn(2), odl.rn(3)
>>> vec_2, vec_3 = r2.element(), r3.element()
>>> r2x3 = ProductSpace(r2, r3)
>>> vec_2x3 = r2x3.element()
>>> vec_2.space == vec_2x3[0].space
True
>>> vec_3.space == vec_2x3[1].space
True
Create an element of the product space
>>> r2, r3 = odl.rn(2), odl.rn(3)
>>> prod = ProductSpace(r2, r3)
>>> x2 = r2.element([1, 2])
>>> x3 = r3.element([1, 2, 3])
>>> x = prod.element([x2, x3])
>>> print(x)
{[1.0, 2.0], [1.0, 2.0, 3.0]}
"""
# If data is given as keyword arg, prefer it over arg list
if inp is None:
inp = [space.element() for space in self.spaces]
if inp in self:
return inp
if len(inp) != len(self):
raise ValueError('length of `inp` {} does not match length of '
'space {}'.format(len(inp), len(self)))
if (all(isinstance(v, LinearSpaceElement) and v.space == space
for v, space in zip(inp, self.spaces))):
parts = list(inp)
elif cast:
# Delegate constructors
parts = [space.element(arg)
for arg, space in zip(inp, self.spaces)]
else:
raise TypeError('input {!r} not a sequence of elements of the '
'component spaces'.format(inp))
return self.element_type(self, parts)
@property
def examples(self):
"""Return examples from all sub-spaces."""
for examples in product(*[spc.examples for spc in self.spaces]):
name = ', '.join(name for name, _ in examples)
element = self.element([elem for _, elem in examples])
yield (name, element)
def zero(self):
"""Create the zero element of the product space.
The i-th component of the product space zero element is the
zero element of the i-th space in the product.
Parameters
----------
None
Returns
-------
zero : ProductSpaceElement
The zero element in the product space.
Examples
--------
>>> r2, r3 = odl.rn(2), odl.rn(3)
>>> zero_2, zero_3 = r2.zero(), r3.zero()
>>> r2x3 = ProductSpace(r2, r3)
>>> zero_2x3 = r2x3.zero()
>>> zero_2 == zero_2x3[0]
True
>>> zero_3 == zero_2x3[1]
True
"""
return self.element([space.zero() for space in self.spaces])
def one(self):
"""Create the one element of the product space.
The i-th component of the product space one element is the
one element of the i-th space in the product.
Parameters
----------
None
Returns
-------
one : ProductSpaceElement
The one element in the product space.
Examples
--------
>>> r2, r3 = odl.rn(2), odl.rn(3)
>>> one_2, one_3 = r2.one(), r3.one()
>>> r2x3 = ProductSpace(r2, r3)
>>> one_2x3 = r2x3.one()
>>> one_2 == one_2x3[0]
True
>>> one_3 == one_2x3[1]
True
"""
return self.element([space.one() for space in self.spaces])
def _lincomb(self, a, x, b, y, out):
"""Linear combination ``out = a*x + b*y``."""
for space, xp, yp, outp in zip(self.spaces, x.parts, y.parts,
out.parts):
space._lincomb(a, xp, b, yp, outp)
def _dist(self, x1, x2):
"""Distance between two elements."""
return self.weighting.dist(x1, x2)
def _norm(self, x):
"""Norm of an element."""
return self.weighting.norm(x)
def _inner(self, x1, x2):
"""Inner product of two elements."""
return self.weighting.inner(x1, x2)
def _multiply(self, x1, x2, out):
"""Product ``out = x1 * x2``."""
for spc, xp, yp, outp in zip(self.spaces, x1.parts, x2.parts,
out.parts):
spc._multiply(xp, yp, outp)
def _divide(self, x1, x2, out):
"""Quotient ``out = x1 / x2``."""
for spc, xp, yp, outp in zip(self.spaces, x1.parts, x2.parts,
out.parts):
spc._divide(xp, yp, outp)
def __eq__(self, other):
"""Return ``self == other``.
Returns
-------
equals : bool
``True`` if ``other`` is a `ProductSpace` instance, has
the same length and the same factors. ``False`` otherwise.
Examples
--------
>>> r2, r3 = odl.rn(2), odl.rn(3)
>>> rn, rm = odl.rn(2), odl.rn(3)
>>> r2x3, rnxm = ProductSpace(r2, r3), ProductSpace(rn, rm)
>>> r2x3 == rnxm
True
>>> r3x2 = ProductSpace(r3, r2)
>>> r2x3 == r3x2
False
>>> r5 = ProductSpace(*[odl.rn(1)]*5)
>>> r2x3 == r5
False
>>> r5 = odl.rn(5)
>>> r2x3 == r5
False
"""
if other is self:
return True
else:
return (isinstance(other, ProductSpace) and
self.shape == other.shape and
self.weighting == other.weighting and
all(x == y for x, y in zip(self.spaces,
other.spaces)))
def __getitem__(self, indices):
"""Return ``self[indices]``."""
if isinstance(indices, Integral):
return self.spaces[indices]
elif isinstance(indices, slice):
return ProductSpace(*self.spaces[indices],
field=self.field)
else:
return ProductSpace(*[self.spaces[i] for i in indices],
field=self.field)
def __str__(self):
"""Return ``str(self)``."""
if self.size == 0:
return '{}'
elif all(self.spaces[0] == space for space in self.spaces):
return '{' + str(self.spaces[0]) + '}^' + str(self.size)
else:
return ' x '.join(str(space) for space in self.spaces)
def __repr__(self):
"""Return ``repr(self)``."""
if self.size == 0:
return '{}(field={})'.format(self.__class__.__name__, self.field)
elif self.is_power_space:
return '{}({!r}, {})'.format(self.__class__.__name__,
self.spaces[0], self.size)
else:
inner_str = ', '.join(repr(space) for space in self.spaces)
return '{}({})'.format(self.__class__.__name__, inner_str)
@property
def element_type(self):
"""`ProductSpaceElement`"""
return ProductSpaceElement
class ProductSpaceElement(LinearSpaceElement):
"""Elements of a `ProductSpace`."""
def __init__(self, space, parts):
"""Initialize a new instance."""
super().__init__(space)
self.__parts = tuple(parts)
@property
def parts(self):
"""Parts of this product space element."""
return self.__parts
@property
def size(self):
"""Number of factors of this element's space."""
return self.space.size
@property
def dtype(self):
"""The data type of the space of this element."""
return self.space.dtype
def __len__(self):
"""Return ``len(self)``."""
return len(self.space)
def __eq__(self, other):
"""Return ``self == other``.
Overrides the default `LinearSpace` method since it is
implemented with the distance function, which is prone to
numerical errors. This function checks equality per
component.
"""
if other is self:
return True
elif other not in self.space:
return False
else:
return all(sp == op for sp, op in zip(self.parts, other.parts))
def __getitem__(self, indices):
"""Return ``self[indices]``."""
if isinstance(indices, Integral):
return self.parts[indices]
elif isinstance(indices, slice):
return self.space[indices].element(self.parts[indices])
elif isinstance(indices, list):
out_parts = [self.parts[i] for i in indices]
return self.space[indices].element(out_parts)
else:
raise TypeError('bad index type {}'.format(type(indices)))
def __setitem__(self, indices, values):
"""Implement ``self[indices] = values``."""
# Get the parts to which we assign values
if isinstance(indices, Integral):
indexed_parts = (self.parts[indices],)
values = (values,)
elif isinstance(indices, slice):
indexed_parts = self.parts[indices]
elif isinstance(indices, list):
indexed_parts = tuple(self.parts[i] for i in indices)
else:
raise TypeError('bad index type {}'.format(type(indices)))
# Do the assignment, with broadcasting if desired
try:
iter(values)
except TypeError:
# `values` is not iterable, assume it can be assigned to
# all indexed parts
for p in indexed_parts:
p[:] = values
else:
# `values` is iterable; it could still represent a single
# element of a power space.
if self.space.is_power_space and values in self.space[0]:
# Broadcast a single element across a power space
for p in indexed_parts:
p[:] = values
else:
# Now we really have one assigned value per part
if len(values) != len(indexed_parts):
raise ValueError(
'length of iterable `values` not equal to number of '
'indexed parts ({} != {})'
''.format(len(values), len(indexed_parts)))
for p, v in zip(indexed_parts, values):
p[:] = v
@property
def ufuncs(self):
"""`ProductSpaceUfuncs`, access to Numpy style ufuncs.
These are always available if the underlying spaces are
`NtuplesBase`.
Examples
--------
>>> r22 = odl.ProductSpace(odl.rn(2), 2)
>>> x = r22.element([[1, -2], [-3, 4]])
>>> x.ufuncs.absolute()
ProductSpace(rn(2), 2).element([
[1.0, 2.0],
[3.0, 4.0]
])
These functions can also be used with non-vector arguments and
support broadcasting, per component and even recursively:
>>> x.ufuncs.add([1, 2])
ProductSpace(rn(2), 2).element([
[2.0, 0.0],
[-2.0, 6.0]
])
>>> x.ufuncs.subtract(1)
ProductSpace(rn(2), 2).element([
[0.0, -3.0],
[-4.0, 3.0]
])
There is also support for various reductions (sum, prod, min, max):
>>> x.ufuncs.sum()
0.0
Writing to ``out`` is also supported:
>>> y = r22.element()
>>> result = x.ufuncs.absolute(out=y)
>>> result
ProductSpace(rn(2), 2).element([
[1.0, 2.0],
[3.0, 4.0]
])
>>> result is y
True
See Also
--------
odl.util.ufuncs.NtuplesBaseUfuncs
Base class for ufuncs in `NtuplesBase` spaces, subspaces may
override this for greater efficiency.
odl.util.ufuncs.ProductSpaceUfuncs
For a list of available ufuncs.
"""
return ProductSpaceUfuncs(self)
def __str__(self):
"""Return ``str(self)``."""
inner_str = ', '.join(str(part) for part in self.parts)
return '{{{}}}'.format(inner_str)
def __repr__(self):
"""Return ``repr(self)``.
Examples
--------
>>> from odl import rn # need to import rn into namespace
>>> r2, r3 = odl.rn(2), odl.rn(3)
>>> r2x3 = ProductSpace(r2, r3)
>>> x = r2x3.element([[1, 2], [3, 4, 5]])
>>> eval(repr(x)) == x
True
The result is readable:
>>> x
ProductSpace(rn(2), rn(3)).element([
[1.0, 2.0],
[3.0, 4.0, 5.0]
])
Nestled spaces work as well
>>> X = ProductSpace(r2x3, r2x3)
>>> x = X.element([[[1, 2], [3, 4, 5]],[[1, 2], [3, 4, 5]]])
>>> eval(repr(x)) == x
True
>>> x
ProductSpace(ProductSpace(rn(2), rn(3)), 2).element([
[
[1.0, 2.0],
[3.0, 4.0, 5.0]
],
[
[1.0, 2.0],
[3.0, 4.0, 5.0]
]
])
"""
inner_str = '[\n'
if len(self) < 5:
inner_str += ',\n'.join('{}'.format(
_indent(_strip_space(part))) for part in self.parts)
else:
inner_str += ',\n'.join('{}'.format(
_indent(_strip_space(part))) for part in self.parts[:3])
inner_str += ',\n ...\n'
inner_str += ',\n'.join('{}'.format(
_indent(_strip_space(part))) for part in self.parts[-1:])
inner_str += '\n]'
return '{!r}.element({})'.format(self.space, inner_str)
def show(self, title=None, indices=None, **kwargs):
"""Display the parts of this product space element graphically.
Parameters
----------
title : string
Title of the figures
indices : index expression, optional
Indices can refer to parts of a `ProductSpaceElement` and slices
in the parts in the following way:
Single index (``indices=0``)
=> display that part
Single slice (``indices=slice(None)``), or
index list (``indices=[0, 1, 3]``)
=> display those parts
Any tuple, for example:
Created by `numpy.s_` ``indices=np.s_[0, :, :]`` or
Using a raw tuple ``indices=([0, 3], slice(None))``
=> take the first elements to select the parts and
pass the rest on to the underlying show methods.
kwargs
Additional arguments passed on to the ``show`` methods of
the parts.
Returns
-------
fig : list of `matplotlib.figure.Figure`
The resulting figures. It is also shown to the user.
See Also
--------
odl.discr.lp_discr.DiscreteLpElement.show :
Display of a discretized function
odl.space.base_ntuples.NtuplesBaseVector.show :
Display of sequence type data
odl.util.graphics.show_discrete_data :
Underlying implementation
"""
if title is None:
title = 'ProductSpaceElement'
if indices is None:
if len(self) < 5:
indices = list(np.arange(self.size))
else:
indices = list(np.linspace(0, self.size - 1, 4, dtype=int))
else:
if isinstance(indices, tuple):
indices, kwargs['indices'] = indices[0], indices[1:]
if isinstance(indices, slice):
indices = list(range(*indices.indices(self.size)))
elif isinstance(indices, Integral):
indices = [indices]
# else try with indices as is
in_figs = kwargs.pop('fig', None)
in_figs = [None] * len(indices) if in_figs is None else in_figs
figs = []
for i, part, fig in zip(indices, self[indices], in_figs):
fig = part.show(title='{}. Part {}'.format(title, i), fig=fig,
**kwargs)
figs += [fig]
return figs
# --- Add arithmetic operators that broadcast ---
def _broadcast_arithmetic(op):
"""Return ``op(self, other)`` with broadcasting.
Parameters
----------
op : string
Name of the operator, e.g. ``'__add__'``.
Returns
-------
broadcast_arithmetic_op : function
Function intended to be used as a method for `ProductSpaceVector`
which performs broadcasting if possible.
Notes
-----
Broadcasting is the operation of "applying an operator multiple times" in
some sense. For example:
.. math::
(1, 2) + 1 = (2, 3)
is a form of broadcasting. In this implementation, we only allow "single
layer" broadcasting, i.e., we do not support broadcasting over several
product spaces at once.
"""
def _broadcast_arithmetic_impl(self, other):
if (self.space.is_power_space and other in self.space[0]):
results = []
for xi in self:
res = getattr(xi, op)(other)
if res is NotImplemented:
return NotImplemented
else:
results.append(res)
return self.space.element(results)
else:
return getattr(LinearSpaceElement, op)(self, other)
# Set docstring
docstring = """Broadcasted {op}.""".format(op=op)
_broadcast_arithmetic_impl.__doc__ = docstring
return _broadcast_arithmetic_impl
for op in ['add', 'sub', 'mul', 'div', 'truediv']:
for modifier in ['', 'r', 'i']:
name = '__{}{}__'.format(modifier, op)
setattr(ProductSpaceElement, name, _broadcast_arithmetic(name))
class ProductSpaceArrayWeighting(ArrayWeighting):
"""Array weighting for `ProductSpace`.
This class defines a weighting that has a different value for
each index defined in a given space.
See ``Notes`` for mathematical details.
"""
def __init__(self, array, exponent=2.0, dist_using_inner=False):
"""Initialize a new instance.
Parameters
----------
array : 1-dim. `array-like`
Weighting array of the inner product.
exponent : positive float, optional
Exponent of the norm. For values other than 2.0, no inner
product is defined.
dist_using_inner : bool, optional
Calculate ``dist`` using the formula
``||x - y||^2 = ||x||^2 + ||y||^2 - 2 * Re <x, y>``.
This avoids the creation of new arrays and is thus faster
for large arrays. On the downside, it will not evaluate to
exactly zero for equal (but not identical) ``x`` and ``y``.
Can only be used if ``exponent`` is 2.0.
Notes
-----
- For exponent 2.0, a new weighted inner product with array
:math:`w` is defined as
.. math::
\\langle x, y \\rangle_w = \\langle w \odot x, y \\rangle
with component-wise multiplication :math:`w \odot x`. For other
exponents, only ``norm`` and ``dist`` are defined. In the case
of exponent ``inf``, the weighted norm is
.. math::
\|x\|_{w,\infty} = \|w \odot x\|_\infty,
otherwise it is
.. math::
\|x\|_{w,p} = \|w^{1/p} \odot x\|_p.
- Note that this definition does **not** fulfill the limit property
in :math:`p`, i.e.,
.. math::
\|x\|_{w,p} \\not\\to \|x\|_{w,\infty}
\quad\\text{for } p \\to \infty
unless :math:`w = (1,...,1)`. The reason for this choice
is that the alternative with the limit property consists in
ignoring the weights altogether.
- The array may only have positive entries, otherwise it does not
define an inner product or norm, respectively. This is not checked
during initialization.
"""
super().__init__(array, impl='numpy', exponent=exponent,
dist_using_inner=dist_using_inner)
def inner(self, x1, x2):
"""Calculate the array-weighted inner product of two elements.
Parameters
----------
x1, x2 : `ProductSpaceElement`
Elements whose inner product is calculated.
Returns
-------
inner : float or complex
The inner product of the two provided elements.
"""
if self.exponent != 2.0:
raise NotImplementedError('no inner product defined for '
'exponent != 2 (got {})'
''.format(self.exponent))
inners = np.fromiter(
(x1i.inner(x2i) for x1i, x2i in zip(x1, x2)),
dtype=x1[0].space.dtype, count=len(x1))
inner = np.dot(inners, self.array)
if is_real_dtype(x1[0].dtype):
return float(inner)
else:
return complex(inner)
def norm(self, x):
"""Calculate the array-weighted norm of an element.
Parameters
----------
x : `ProductSpaceElement`
Element whose norm is calculated.
Returns
-------
norm : float
The norm of the provided element.
"""
if self.exponent == 2.0:
norm_squared = self.inner(x, x).real # TODO: optimize?!
return np.sqrt(norm_squared)
else:
norms = np.fromiter(
(xi.norm() for xi in x), dtype=np.float64, count=len(x))
if self.exponent in (1.0, float('inf')):
norms *= self.array
else:
norms *= self.array ** (1.0 / self.exponent)
return float(np.linalg.norm(norms, ord=self.exponent))
class ProductSpaceConstWeighting(ConstWeighting):
"""Constant weighting for `ProductSpace`.
"""
def __init__(self, constant, exponent=2.0, dist_using_inner=False):
"""Initialize a new instance.
Parameters
----------
constant : positive float
Weighting constant of the inner product
exponent : positive float, optional
Exponent of the norm. For values other than 2.0, no inner
product is defined.
dist_using_inner : bool, optional
Calculate ``dist`` using the formula
``||x - y||^2 = ||x||^2 + ||y||^2 - 2 * Re <x, y>``
This avoids the creation of new arrays and is thus faster
for large arrays. On the downside, it will not evaluate to
exactly zero for equal (but not identical) ``x`` and ``y``.
Can only be used if ``exponent`` is 2.0.
Notes
-----
- For exponent 2.0, a new weighted inner product with constant
:math:`c` is defined as
.. math::
\\langle x, y \\rangle_c = c\, \\langle x, y \\rangle.
For other exponents, only ``norm`` and ```dist`` are defined.
In the case of exponent ``inf``, the weighted norm is
.. math::
\|x\|_{c,\infty} = c\, \|x\|_\infty,
otherwise it is
.. math::
\|x\|_{c,p} = c^{1/p} \, \|x\|_p.
- Note that this definition does **not** fulfill the limit property
in :math:`p`, i.e.,
.. math::
\|x\|_{c,p} \\not\\to \|x\|_{c,\infty}
\quad \\text{for } p \\to \infty
unless :math:`c = 1`. The reason for this choice
is that the alternative with the limit property consists in
ignoring the weight altogether.
- The constant must be positive, otherwise it does not define an
inner product or norm, respectively.
"""
super().__init__(constant, impl='numpy', exponent=exponent,
dist_using_inner=dist_using_inner)
def inner(self, x1, x2):
"""Calculate the constant-weighted inner product of two elements.
Parameters
----------
x1, x2 : `ProductSpaceElement`
Elements whose inner product is calculated.
Returns
-------
inner : float or complex
The inner product of the two provided elements.
"""
if self.exponent != 2.0:
raise NotImplementedError('no inner product defined for '
'exponent != 2 (got {})'
''.format(self.exponent))
inners = np.fromiter(
(x1i.inner(x2i) for x1i, x2i in zip(x1, x2)),
dtype=complex, count=len(x1))
inner = self.const * np.sum(inners)
return x1.space.field.element(inner)
def norm(self, x):
"""Calculate the constant-weighted norm of an element.
Parameters
----------
x1 : `ProductSpaceElement`
Element whose norm is calculated.
Returns
-------
norm : float
The norm of the element.
"""
if self.exponent == 2.0:
norm_squared = self.inner(x, x).real # TODO: optimize?!
return np.sqrt(norm_squared)
else:
norms = np.fromiter(
(xi.norm() for xi in x), dtype=np.float64, count=len(x))
if self.exponent in (1.0, float('inf')):
return (self.const *
float(np.linalg.norm(norms, ord=self.exponent)))
else:
return (self.const ** (1 / self.exponent) *
float(np.linalg.norm(norms, ord=self.exponent)))
def dist(self, x1, x2):
"""Calculate the constant-weighted distance between two elements.
Parameters
----------
x1, x2 : `ProductSpaceElement`
Elements whose mutual distance is calculated.
Returns
-------
dist : float
The distance between the elements.
"""
if self.dist_using_inner:
norms1 = np.fromiter(
(x1i.norm() for x1i in x1),
dtype=np.float64, count=len(x1))
norm1 = np.linalg.norm(norms1)
norms2 = np.fromiter(
(x2i.norm() for x2i in x2),
dtype=np.float64, count=len(x2))
norm2 = np.linalg.norm(norms2)
inners = np.fromiter(
(x1i.inner(x2i) for x1i, x2i in zip(x1, x2)),
dtype=x1[0].space.dtype, count=len(x1))
inner_re = np.sum(inners.real)
dist_squared = norm1 ** 2 + norm2 ** 2 - 2 * inner_re
if dist_squared < 0.0: # Compensate for numerical error
dist_squared = 0.0
return np.sqrt(self.const) * float(np.sqrt(dist_squared))
else:
dnorms = np.fromiter(
((x1i - x2i).norm() for x1i, x2i in zip(x1, x2)),
dtype=np.float64, count=len(x1))
if self.exponent == float('inf'):
return self.const * np.linalg.norm(dnorms, ord=self.exponent)
else:
return (self.const ** (1 / self.exponent) *
np.linalg.norm(dnorms, ord=self.exponent))
class ProductSpaceNoWeighting(NoWeighting, ProductSpaceConstWeighting):
"""Weighting of `ProductSpace` with constant 1."""
# Implement singleton pattern for efficiency in the default case
_instance = None
def __new__(cls, *args, **kwargs):
"""Implement singleton pattern if ``exp==2.0``."""
if len(args) == 0:
exponent = kwargs.pop('exponent', 2.0)
dist_using_inner = kwargs.pop('dist_using_inner', False)
elif len(args) == 1:
exponent = args[0]
args = args[1:]
dist_using_inner = kwargs.pop('dist_using_inner', False)
else:
exponent = args[0]
dist_using_inner = args[1]
args = args[2:]
if exponent == 2.0 and not dist_using_inner:
if not cls._instance:
cls._instance = super().__new__(cls, *args, **kwargs)
return cls._instance
else:
return super().__new__(cls, *args, **kwargs)
def __init__(self, exponent=2.0, dist_using_inner=False):
"""Initialize a new instance.
Parameters
----------
exponent : positive float
Exponent of the norm. For values other than 2.0, the inner
product is not defined.
dist_using_inner : bool, optional
Calculate ``dist`` using the formula
``||x - y||^2 = ||x||^2 + ||y||^2 - 2 * Re <x, y>``
This avoids the creation of new arrays and is thus faster
for large arrays. On the downside, it will not evaluate to
exactly zero for equal (but not identical) ``x`` and ``y``.
Can only be used if ``exponent`` is 2.0.
"""
super().__init__(impl='numpy', exponent=exponent,
dist_using_inner=dist_using_inner)
class ProductSpaceCustomInner(CustomInner):
"""Class for handling a user-specified inner products."""
def __init__(self, inner, dist_using_inner=False):
"""Initialize a new instance.
Parameters
----------
inner : callable
The inner product implementation. It must accept two
`ProductSpaceElement` arguments, return a element from
the field of the space (real or complex number) and
satisfy the following conditions for all space elements
``x, y, z`` and scalars ``s``:
- ``<x, y> = conj(<y, x>)``
- ``<s*x + y, z> = s * <x, z> + <y, z>``
- ``<x, x> = 0`` if and only if ``x = 0``
dist_using_inner : bool, optional
Calculate ``dist`` using the formula
``||x - y||^2 = ||x||^2 + ||y||^2 - 2 * Re <x, y>``
This avoids the creation of new arrays and is thus faster
for large arrays. On the downside, it will not evaluate to
exactly zero for equal (but not identical) ``x`` and ``y``.
Can only be used if ``exponent`` is 2.0.
"""
super().__init__(impl='numpy', inner=inner,
dist_using_inner=dist_using_inner)
class ProductSpaceCustomNorm(CustomNorm):
"""Class for handling a user-specified norm on `ProductSpace`.
Note that this removes ``inner``.
"""
def __init__(self, norm):
"""Initialize a new instance.
Parameters
----------
norm : callable
The norm implementation. It must accept a
`ProductSpaceElement` argument, return a float and satisfy
the following conditions for all space elements
``x, y`` and scalars ``s``:
- ``||x|| >= 0``
- ``||x|| = 0`` if and only if ``x = 0``
- ``||s * x|| = |s| * ||x||``
- ``||x + y|| <= ||x|| + ||y||``
"""
super().__init__(norm, impl='numpy')
class ProductSpaceCustomDist(CustomDist):
"""Class for handling a user-specified distance on `ProductSpace`.
Note that this removes ``inner`` and ``norm``.
"""
def __init__(self, dist):
"""Initialize a new instance.
Parameters
----------
dist : callable
The distance function defining a metric on
`ProductSpace`. It must accept two `ProductSpaceElement`
arguments and fulfill the following mathematical conditions
for any three space elements ``x, y, z``:
- ``dist(x, y) >= 0``
- ``dist(x, y) = 0`` if and only if ``x = y``
- ``dist(x, y) = dist(y, x)``
- ``dist(x, y) <= dist(x, z) + dist(z, y)``
"""
super().__init__(dist, impl='numpy')
def _strip_space(x):
"""Strip the SPACE.element( ... ) part from a repr."""
r = repr(x)
space_repr = '{!r}.element('.format(x.space)
if r.startswith(space_repr) and r.endswith(')'):
r = r[len(space_repr):-1]
return r
def _indent(x):
"""Indent a string by 4 characters."""
lines = x.splitlines()
for i, line in enumerate(lines):
lines[i] = ' ' + line
return '\n'.join(lines)
if __name__ == '__main__':
# pylint: disable=wrong-import-position
from odl.util.testutils import run_doctests
run_doctests()
| gpl-3.0 |
nce/sedater | docs/conf.py | 1 | 11534 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sedater documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 6 18:16:57 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import unittest.mock as mock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# all modules which are used, but not allowed to run on readthedocs.org
MOCK_MODULES = ['matplotlib', 'matplotlib.pyplot', 'numpy']
#MOCK_MODULES = ['typing']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sedater'
copyright = '2015, Ulli Goschler'
author = 'Ulli Goschler'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sedaterdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sedater.tex', 'sedater Documentation',
'Ulli Goschler', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sedater', 'sedater Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sedater', 'sedater Documentation',
author, 'sedater', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| mit |
zblz/naima | src/naima/tests/test_saveread.py | 1 | 3719 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import astropy.units as u
import numpy as np
from astropy.io import ascii
from astropy.tests.helper import pytest
from astropy.utils.data import get_pkg_data_filename
from ..analysis import read_run, save_run
from ..core import run_sampler, uniform_prior
from ..model_fitter import InteractiveModelFitter
from ..models import ExponentialCutoffPowerLaw
from ..plot import plot_chain, plot_data, plot_fit
from ..utils import validate_data_table
from .fixtures import simple_sampler as sampler
try:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
HAS_MATPLOTLIB = True
except:
HAS_MATPLOTLIB = False
try:
import emcee
HAS_EMCEE = True
except:
HAS_EMCEE = False
fname = get_pkg_data_filename("data/CrabNebula_HESS_ipac.dat")
data_table = ascii.read(fname)
@pytest.mark.skipif("not HAS_EMCEE")
def test_roundtrip(sampler):
save_run("test_chain.h5", sampler, clobber=True)
assert os.path.exists("test_chain.h5")
nresult = read_run("test_chain.h5")
assert np.allclose(sampler.chain, nresult.chain)
assert np.allclose(sampler.flatchain, nresult.flatchain)
assert np.allclose(sampler.lnprobability, nresult.lnprobability)
assert np.allclose(sampler.flatlnprobability, nresult.flatlnprobability)
nwalkers, nsteps = sampler.chain.shape[:2]
j, k = int(nsteps / 2), int(nwalkers / 2)
for l in range(len(sampler.blobs[j][k])):
b0 = sampler.blobs[j][k][l]
b1 = nresult.blobs[j][k][l]
if isinstance(b0, tuple) or isinstance(b0, list):
for m in range(len(b0)):
assert b0[m].unit == b1[m].unit
assert np.allclose(b0[m].value, b1[m].value)
else:
if isinstance(b0, u.Quantity):
assert b0.unit == b1.unit
assert np.allclose(b0.value, b1.value)
else:
assert np.allclose(b0, b1)
for key in sampler.run_info.keys():
assert np.all(sampler.run_info[key] == nresult.run_info[key])
for i in range(len(sampler.labels)):
assert sampler.labels[i] == nresult.labels[i]
for col in sampler.data.colnames:
assert np.allclose(
u.Quantity(sampler.data[col]).value,
u.Quantity(nresult.data[col]).value,
)
assert str(sampler.data[col].unit) == str(nresult.data[col].unit)
validate_data_table(nresult.data)
assert np.allclose(
np.mean(sampler.acceptance_fraction), nresult.acceptance_fraction
)
@pytest.mark.skipif("not HAS_MATPLOTLIB or not HAS_EMCEE")
def test_plot_fit(sampler):
save_run("test_chain.h5", sampler, clobber=True)
nresult = read_run("test_chain.h5", modelfn=sampler.modelfn)
plot_data(nresult)
plot_fit(nresult, 0)
plot_fit(nresult, 0, e_range=[0.1, 10] * u.TeV)
plot_fit(nresult, 0, sed=False)
plt.close("all")
@pytest.mark.skipif("not HAS_MATPLOTLIB or not HAS_EMCEE")
def test_plot_chain(sampler):
save_run("test_chain.h5", sampler, clobber=True)
nresult = read_run("test_chain.h5", modelfn=sampler.modelfn)
for i in range(nresult.chain.shape[2]):
plot_chain(nresult, i)
plt.close("all")
@pytest.mark.skipif("not HAS_MATPLOTLIB or not HAS_EMCEE")
def test_imf(sampler):
save_run("test_chain.h5", sampler, clobber=True)
nresult = read_run("test_chain.h5", modelfn=sampler.modelfn)
imf = InteractiveModelFitter(
nresult.modelfn, nresult.chain[-1][-1], nresult.data
)
imf.do_fit("test")
from naima.core import lnprobmodel
lnprobmodel(nresult.modelfn(imf.pars, nresult.data)[0], nresult.data)
plt.close("all")
| bsd-3-clause |
Erotemic/ibeis | dev/_scripts/fix_common_issues.py | 2 | 1048 | # -*- coding: utf-8 -*-
import sys
# FIXME: setup for more than just win32
WIN32 = sys.platform.startswith('win32')
def get_install_cmd(modname):
if WIN32:
install_cmd = ('_scripts\win32bootstrap.py --run --dl ' + modname)
else:
install_cmd = 'sudo pip install ' + modname
return install_cmd
# Order is important here
modlist = [
'patsy',
'pandas',
'statsmodels',
'simplejson',
]
for modname in modlist:
try:
level = 0
module = __import__(modname, globals(), locals(), fromlist=[], level=level)
except ImportError as ex:
install_cmd = get_install_cmd(modname)
print('Please Run follow instruction and then rerun fix_common_issues.py: ')
print(install_cmd)
import utool as ut
ut.cmd(install_cmd, shell=True)
sys.exit(0)
"""
References:
http://superuser.com/questions/345719/how-to-chmod-and-chown-hidden-files-in-linux
cd /opt/ibeis
shopt -s dotglob
sudo chown -R jasonp:ibeis *
sudo chmod -R 775 *
shopt -u dotglob
"""
| apache-2.0 |
tiw51/DeepPurple | Stock_Programs/findStockTrends.py | 1 | 3781 | #this will preform sum basic statistical analysis of data as specified as user input
#this should give a starting date
import datetime as dt
import numpy
#this allows us to make plots
import matplotlib.pyplot as plt
from matplotlib.finance import candlestick_ohlc
import matplotlib.dates as mdates
#makes the graphs look better:
from datetime import timedelta
from matplotlib import style
#good data analysis library
import pandas as pd
#inports more libraries
#this makes it easy to grab data from the yahoo finance api
#will return a "pandas data frame"
import pandas_datareader.data as web
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
company = 'GOOG'
current_t=dt.date.today()
##current_time_array=[endY,endM,endD]
##time_in_year=timedelta(weeks=40, days=85)
def makeDatePretty(dateArray):
year=str(dateArray[0])
month=str(dateArray[1])
day=str(dateArray[2])
slash='/'
string=month+slash+day+slash+year
return string
def findStockTrends(comp=company, years=10, months=0, days=0):
current_time=dt.date.today()
endY=current_time.year
endM=current_time.month
endD=current_time.day
current_time_array=[endY,endM,endD]
x=makeDatePretty(current_time_array)
startDate=[endY-years,endM-months,endD-days]
if startDate[1]<0:
while (startDate[1]<0):
startDate[0]-=1
startDate[1]+=12
if startDate[2]<0:
while (startDate[2]<0):
startDate[1]-=1
startDate[2]+=30
style.use('ggplot')
#start the data on jan 1st 2000
start=dt.datetime(startDate[0], startDate[1], startDate[2])
end=dt.datetime(current_time_array[0],current_time_array[1],current_time_array[2])
print(comp)
#dataframe
df=web.DataReader(comp, 'google', start, end)
#converting the data to a csv
#df.to_csv('google.csv')
'''
user_response=input('Do you want to create a file for the data? (Y/N)')
if user_response.lower() =='y':
#reading in a csv
name_of_file=company + '.csv'
df=pd.read_csv(name_of_file, parse_dates=True, index_col=0)
print(df.head(10))
'''
print(df.head(10))
##df['Low'].plot()
##plt.show()
##df['High'].plot()
##plt.show()
#adding a rolling average
df['100 M.Avg']=df['Close'].rolling(window=100, min_periods=0).mean()
df.dropna(inplace=True)
#print(df.head())
plt.figure(1)
#using matplotlib to plot
ax1= plt.subplot2grid((12,1), (0,0), rowspan=5, colspan=1)
x=makeDatePretty(current_time_array)
y=makeDatePretty(startDate)
title_of_graph=' Stock Price of ' +comp+' from ' +y+' to ' +x
plt.title(title_of_graph)
ax2= plt.subplot2grid((12,1), (5,0), rowspan=5, colspan=1, sharex=ax1)
ax1.plot(df.index, df['100 M.Avg'])
ax1.plot(df.index, df['Close'])
ax2.bar(df.index, df['Volume'])
#finds the mean of data over 10 days
df_ohlc=df['Close'].resample('10D').ohlc()
df_volume =df['Volume'].resample('10D').sum()
df_ohlc.reset_index(inplace=True)
df_ohlc['Date']=df_ohlc['Date'].map(mdates.date2num)
#print(df_ohlc.head())
ax3= plt.subplot2grid((12,1), (6,0), rowspan=5, colspan=1, sharex=ax1)
ax4= plt.subplot2grid((12,1), (11,0), rowspan=5, colspan=1, sharex=ax1)
ax3.xaxis_date()
candlestick_ohlc(ax3, df_ohlc.values, width=2, colorup='g')
ax4.fill_between(df_volume.index.map(mdates.date2num), df_volume.values, 0)
plt.show()
| apache-2.0 |
dnolivieri/RFVextract | rfVextract/VsRFtrain03.py | 1 | 14112 | #!/usr/bin/env python
"""
dnolivieri: (started: 23 september 2014)
- doing multiple binary trainings.
"""
import pylab as pl
import numpy as np
import sys
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.cross_validation import cross_val_score
from numpy import genfromtxt, savetxt
import time
import itertools
import cPickle as pickle
import timeit
import pybedtools as pb
from Bio import SeqIO
from Bio.Seq import Seq
from scipy import *
import struct
import re
from propy import PyPro
from propy.GetProteinFromUniprot import GetProteinSequence
import featureHeatmap01 as fHM
import json
AA = {1:'A', 2:'R',3:'N',4:'D',5:'C', 6:'Q', 7:'E', 8:'G', 9:'H', 10:'I',11:'L', 12:'K', 13:'M', 14:'F', 15:'P', 16:'S', 17:'T', 18:'W', 19:'Y', 20:'V', 21:'B', 22:'Z', 23:'X'}
rno = {'A':0,'R':1,'N':2,'D':3,'C':4,'Q':5,'E':6,'G':7,'H':8,'I':9,'L':10,'K':11,'M':12,'F':13,'P':14,'S':15,'T':16,'W':17,'Y':18,'V':19}
n_classes = 2
n_estimators = 1000
RANDOM_SEED = 13
def save_object(obj, filename):
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
class TrainingPipeline:
def __init__(self, S, loci_classes, desc_method, bckgnd_method, pos_exists=False, neg_exists=False, do_training=False):
self.normI=self.normalized_AAindx()
self.S = S
self.loci_classes = loci_classes
self.desc_method = desc_method
self.bckgnd_method = bckgnd_method
self.pos_exists= pos_exists
self.neg_exists= neg_exists
self.posFile = 'train_posSignal'
self.bckgndFile = 'train_bckgndSignal'
self.plot_featurevecs = False
#self.form_pos_featurevecs()
#self.form_neg_featurevecs()
train_signals, train_vals = self.form_featurevecs()
#train_signals, train_vals = self.form_featurevecs_fromfiles()
npzsignals_out = "train_signals.npz"
np.savez(npzsignals_out, dp=train_signals )
npzvals_out = "train_vals.npz"
np.savez(npzvals_out, dp=train_vals )
do_training=True
if do_training:
A=TrainClassifier( train_signals, train_vals, 1000 )
def form_pos_featurevecs(self):
zP=self.get_positive_signals()
def form_neg_featurevecs(self):
zB=self.get_background()
def form_featurevecs_fromfiles(self):
dataP = np.load( self.posFile + '.npz')
zP = dataP['dp']
dataB = np.load( self.bckgndFile + '.npz')
zB = dataB['dp']
xMat=[]
xMat.append(zP)
xMat.append(zB)
sigLabels = ['P','B']
valP = np.ones( [zP.shape[0],1] )
valB = np.zeros( [zB.shape[0],1] )
train_signals = np.vstack([zP, zB])
train_vals = np.vstack([valP, valB])
return train_signals, train_vals
def add_positive_signals(self, fname):
D = self.descriptors_from_fasta(fname)
return D
def form_featurevecs(self):
if self.neg_exists:
zB=self.get_existing_neg_background()
else:
zB=self.get_background()
npzoutfile=self.bckgndFile+".npz"
np.savez(npzoutfile, dp=np.array(zB) )
valB = np.zeros( [zB.shape[0],1] )
print "-----Done with Background -----"
if self.pos_exists:
zP=self.get_existing_pos_signals()
else:
zP = np.array([])
valP = np.array([])
lcnt=1
for loci in self.loci_classes:
sig_fname = self.S['All'][loci]
zprm=self.add_positive_signals(sig_fname)
valprm = lcnt * np.ones( [zprm.shape[0],1] )
if lcnt==1:
zP = zprm
valP = valprm
else:
zP = np.vstack([zP, zprm])
valP = np.vstack([valP, valprm])
lcnt+=1
npzoutfile=self.posFile+".npz"
np.savez(npzoutfile, dp=np.array(zP) )
if self.plot_featurevecs:
xMat=[]
xMat.append(zP)
xMat.append(zB)
sigLabels = ['P','B']
#xbarMat=np.log(xMat)
#H = fHM.heatMap(xbarMat, sigLabels)
H = fHM.heatMap(xMat, sigLabels)
train_signals = np.vstack([zP, zB])
train_vals = np.vstack([valP, valB])
return train_signals, train_vals
def get_existing_pos_signals(self):
data_posSignal = self.posFile+'.npz'
dataP = np.load( data_posSignal)
zP = dataP['dp']
return zP
def get_existing_neg_background(self):
"""
data_bkgSignal1 = './train_randbackgrnd.npz'
data_bkgSignal2 = './train_extrctBkg.npz'
dataB1 = np.load( data_bkgSignal1)
dataB2 = np.load( data_bkgSignal2)
zB1 = dataB1['dp']
zB2 = dataB2['dp']
zB = np.vstack([zB1, zB2])
"""
data_bkgSignal = self.bckgndFile + '.npz'
dataB = np.load( data_bkgSignal)
zB = dataB['dp']
return zB
def descriptors_from_fasta(self, infile):
# other methods available:
# 'GetAAComp', 'GetAAindex1', 'GetAAindex23', 'GetALL', 'GetAPAAC', 'GetCTD',
# 'GetDPComp', 'GetGearyAuto', 'GetGearyAutop', 'GetMoranAuto', 'GetMoranAutop',
# 'GetMoreauBrotoAuto', 'GetMoreauBrotoAutop', 'GetPAAC', 'GetPAACp', 'GetQSO',
# 'GetQSOp', 'GetSOCN', 'GetSOCNp', 'GetSubSeq', 'GetTPComp',
qbar=[]
cnt=0
for record in SeqIO.parse(infile, "fasta"):
descObject=PyPro.GetProDes(record.seq.tostring())
if ('X' not in record.seq.tostring()) and ('Z' not in record.seq.tostring()) and ('B' not in record.seq.tostring()):
if self.desc_method=='AAComp':
T = descObject.GetAAComp()
elif self.desc_method=='GestQSO':
T = descObject.GetQSO()
elif self.desc_method=='GetGearyAuto':
T = descObject.GetMoranAuto()
elif self.desc_method=='GetCTD':
T=descObject.GetCTD()
elif self.desc_method=='GetPAAC':
T =descObject.GetPAAC()
elif self.desc_method=='PDT':
T = self.getPDT3(record.seq.tostring())
else:
T=descObject.GetCTD()
Tx = [ T[x] for x in T.iterkeys() ]
#print Tx
#raw_input('press to continue')
print cnt, Tx[0:5]
qbar.append(Tx)
cnt+=1
if cnt>1e9:
break
return np.array(qbar)
def get_positive_signals(self):
infile = './dataVgeneDB/All_mammals_Vs.fasta'
D = self.descriptors_from_fasta(infile)
npzoutfile=self.posFile+".npz"
np.savez(npzoutfile, dp=np.array(D) )
return D
def get_background(self):
if self.bckgnd_method=='Mine':
infile='./dataVgeneDB/bkg.fasta'
D = self.descriptors_from_fasta(infile)
#npzoutfile=self.bckgndFile+".npz"
#np.savez(npzoutfile, dp=np.array(D) )
if self.bckgnd_method=='Mine_with_mut':
D= self.fasta_with_mutations(infile)
npzoutfile=self.bckgndFile+".npz"
np.savez(npzoutfile, dp=np.array(D) )
elif self.bckgnd_method=='Total_random':
D=self.total_random_backgrnd()
npzoutfile=self.bckgndFile+".npz"
np.savez(npzoutfile, dp=np.array(D) )
return D
def fasta_with_mutations(self, infile):
for record in SeqIO.parse(infile, "fasta"):
sbar = record.seq.tostring()
sbarL= list(sbar)
posSeq = list(np.random.randint(1,len(sbar), int(0.95*len(sbar))) )
print posSeq
for p in posSeq:
aindx=np.random.randint(1,21,1)[0]
sbarL[p] = AA[aindx]
rbar = ''.join(sbarL)
descObject=PyPro.GetProDes(rbar)
print record.seq
print rbar
T =descObject.GetPAAC()
Tx = [ T[x] for x in T.iterkeys() ]
print Tx
qbar.append(Tx)
return np.array(qbar)
def total_random_backgrnd(self):
qbar=[]
for i in range(50):
indx = list(np.random.randint(1,21, 90))
seqList = [ AA[j] for j in indx ]
#print seqList
seq =''
rbar = seq.join(seqList)
print rbar
descObject=PyPro.GetProDes(rbar)
if self.desc_method=='AAComp':
T = descObject.GetAAComp()
elif self.desc_method=='GestQSO':
T = descObject.GetQSO()
elif self.desc_method=='GetGearyAuto':
T = descObject.GetMoreauBrotoAuto()
elif self.desc_method=='GetCTD':
T=descObject.GetCTD()
elif self.desc_method=='GetPAAC':
T =descObject.GetPAAC()
elif self.desc_method=='PDT':
T = self.getPDT3(rbar)
else:
T=descObject.GetCTD()
Tx = [ T[x] for x in T.iterkeys() ]
print i, Tx[0:5]
#raw_input('press to continue')
qbar.append(Tx)
return np.array(qbar)
def get_AAdescriptors(self):
pass
def normalized_AAindx(self):
fp = open('aaindex.txt','r')
D=[]
for lk in fp:
q=[ float(i) for i in lk.split() ]
D.append(q)
Dvec=[]
normI = []
for j in D:
q= np.sum(np.array( j ))/20.
denom=0.0
for kp in j:
denom+= (kp - q)*(kp - q)
denom = np.sqrt(denom/20.)
abar=[]
for kp in j:
abar.append( (kp - q)/denom )
normI.append(abar)
save_object(normI, r'normalizedAA_Matrix.pkl')
return normI
def getPDT3(self, seq):
Dvec={}
cnt=0
for q in self.normI:
sumDseq=0.0
for i in range(len(seq)-3):
sumDseq+= (q[rno[seq[i]]] - q[rno[seq[i+3]]])*(q[rno[seq[i]]] - q[rno[seq[i+3]]])
sumDseq = sumDseq/np.float(len(seq)-3)
Dvec.update( {str(cnt): sumDseq} )
cnt+=1
return Dvec
# ------------------
class TrainEachLoci:
def __init__(self, S, loci_classes, desc_method, Nestimators):
self.S = S
self.desc_method = desc_method
self.Nestimators = Nestimators
self.make_training_matrices()
def get_existing_signals(self, infile):
data = np.load( infile )
z = data['dp']
return z
def make_training_matrices(self):
# background
zB1 = self.get_existing_signals(self.S['Bkgnd']['bkg1'] )
print "zB1.shape=", zB1.shape
zB2 = self.get_existing_signals(self.S['Bkgnd']['bkg2'] )
print "zB1.shape=", zB2.shape
zB = np.vstack([zB1, zB2])
valB = np.zeros( [zB.shape[0],1] )
for loci in self.S['Loci'].iterkeys():
zP = self.get_existing_signals(self.S['Loci'][loci] )
valP = np.ones( [zP.shape[0],1] )
print loci, "zP.shape=", zP.shape
train_signals = np.vstack([zP, zB])
train_vals = np.vstack([valP, valB])
outfile = "trainMat_"+ loci + ".pkl"
A=TrainClassifier( train_signals, train_vals, self.Nestimators, outfile)
# ------------------
class TrainClassifier:
def __init__(self, train_signals, train_vals, Nestimators, action=None):
self.train_signals = train_signals
#self.train_vals = train_vals
self.Nestimators = Nestimators
self.outfile = "test.pkl"
#self.train_vals= train_vals.reshape( [train_vals.shape[0], 1])
self.train_vals= np.ravel(train_vals)
do_training=False
if do_training:
print "do training"
start_time = timeit.default_timer()
rf=self.do_training()
#save_object(rf, r'train_Matrix.pkl')
save_object(rf, self.outfile)
elapsed = timeit.default_timer() - start_time
print "ELAPSED=", elapsed
count_vectors=True
if count_vectors:
self.obtain_trainig_set_size()
def do_training(self):
rf = RandomForestClassifier(n_estimators=self.Nestimators, oob_score=True)
rf.fit(self.train_signals, self.train_vals)
return rf
def obtain_trainig_set_size(self):
print np.count_nonzero(train_vals)
sbar=set(np.ravel(train_vals).astype(int))
for k in list(sbar):
print k, np.extract( train_vals==k, train_vals).size
## ---------------MAIN ----------------------------------
if __name__ == '__main__':
Vs_Loci = 'Vs_Loci_npz.json'
json_data=open( Vs_Loci )
S = json.load(json_data)
json_data.close()
method="PDT"
loci_classes=[ 'ighv', 'iglv', 'igkv', 'trav','trbv','trgv', 'trdv']
"""
if method=="PDT":
T = TrainingPipeline( S, loci_classes, desc_method='PDT', bckgnd_method='Mine', pos_exists=False, neg_exists=True, do_training=False)
elif method=="PAAC":
T = TrainingPipeline( S, loci_classes, desc_method='GetPAAC', bckgnd_method='Mine', pos_exists=False, neg_exists=False, do_training=True)
"""
#if method=="PDT":
# T= TrainEachLoci(S, loci_classes, desc_method='PDT', Nestimators=50)
do_training=True
if do_training:
npzsingals_in = "train_signals.npz"
npzvals_in = "train_vals.npz"
signals = np.load(npzsingals_in)
vals = np.load( npzvals_in )
train_signals = signals['dp']
train_vals = vals['dp']
n_classifiers=5000
print "n_classifiers=", n_classifiers
A=TrainClassifier( train_signals, train_vals, 5000 )
| bsd-3-clause |
vossman/ctfeval | appionlib/apCtf/ctfdisplay.py | 1 | 43159 | #!/usr/bin/env python
import os
import sys
import math
import numpy
import time
import random
from pyami import imagefun
from pyami import ellipse
from pyami import mrc
from appionlib import apDisplay
#from appionlib import lowess
from appionlib.apImage import imagefile
from appionlib.apImage import imagefilter
from appionlib.apImage import imagestat
from matplotlib import use
use('Agg')
from matplotlib import pyplot
from matplotlib.patches import Ellipse
from appionlib.apCtf import ctfnoise
from appionlib.apCtf import ctftools
from appionlib.apCtf import genctf
from appionlib.apCtf import ctfres
from PIL import Image
from PIL import ImageDraw
from scipy import ndimage
import scipy.stats
class CtfDisplay(object):
#====================
#====================
def __init__(self):
### global params that do NOT change with image
self.ringwidth = 1.0
self.debug = False
return
#====================
#====================
def funcrad(self, r, rdata=None, zdata=None):
return numpy.interp(r, rdata, zdata)
#====================
#====================
def Array1dintoArray2d(self, array1d, shape):
array2d = imagefun.fromRadialFunction(self.funcrad, shape, rdata=rdata, zdata=array1d)
return array2d
#====================
#====================
def normalizeCtf(self, zdata2d, twod=True):
"""
inner cut radius - radius for number of pixels to clip in the center of image
"""
###
### PART 1: SETUP PARAMETERS AND ELLIPTICAL AVERAGE
###
apDisplay.printColor("PART 1: SETUP PARAMETERS AND ELLIPTICAL AVERAGE", "magenta")
meandefocus = math.sqrt(self.defocus1*self.defocus2)
if meandefocus < 0.6e-6:
self.ringwidth = 3.0
elif meandefocus < 1.0e-6:
self.ringwidth = 2.0
elif meandefocus > 5.0e-6:
self.ringwidth = 0.5
### get all peak (not valley)
peak = ctftools.getCtfExtrema(meandefocus, self.trimfreq*1e10, self.cs, self.volts,
self.ampcontrast, numzeros=250, zerotype="peak")
apDisplay.printMsg("Number of available peaks is %d"%(len(peak)))
if len(peak) < 6:
apDisplay.printWarning("Too few peaks to work with, probably bad defocus estimate")
return None
firstpeak = peak[0]
peakradii = numpy.array(peak, dtype=numpy.float64)*self.trimfreq
### get all valley (not peak)
valley = ctftools.getCtfExtrema(meandefocus, self.trimfreq*1e10, self.cs, self.volts,
self.ampcontrast, numzeros=250, zerotype="valley")
firstvalley = valley[0]
valleyradii = numpy.array(valley, dtype=numpy.float64)*self.trimfreq
### do the elliptical average
if self.ellipratio is None:
return None
#imagestat.printImageInfo(zdata2d)
pixelrdata, rotdata = ctftools.ellipticalAverage(zdata2d, self.ellipratio, self.angle,
self.ringwidth, firstpeak, full=False)
raddata = pixelrdata*self.trimfreq
if self.debug is True:
print "Elliptical CTF limits %.1f A -->> %.1fA"%(1./raddata.min(), 1./raddata.max())
apDisplay.printMsg("Determine and subtract noise model")
CtfNoise = ctfnoise.CtfNoise()
###
### PART 2: BACKGROUND NOISE SUBTRACTION
###
apDisplay.printColor("PART 2: BACKGROUND NOISE SUBTRACTION", "magenta")
### split the function up in first 3/5 and last 3/5 of data with 1/5 overlap
firstvalleyindex = numpy.searchsorted(raddata, self.trimfreq*firstvalley)
numpoints = len(raddata) - firstvalleyindex
# require at least 10 points past first peak of CTF to perform estimation
if numpoints < 10:
apDisplay.printWarning("Not enough points past first peak (n=%d < 10) to do background subtraction"
%(numpoints))
return None
npart1start = firstvalleyindex
npart1end = int(firstvalleyindex + numpoints*6/10.)
npart2start = int(firstvalleyindex + numpoints*5/10.)
npart2end = int(firstvalleyindex + numpoints*9/10.)
npart3start = int(firstvalleyindex + numpoints*8/10.)
npart3end = len(raddata)
svalleydata = ctfnoise.peakExtender(raddata, rotdata, valleyradii, "below")
### fit function below log(CTF), i.e., noise model
## first part data
noisefitparams1 = CtfNoise.modelCTFNoise(raddata[npart1start:npart1end],
svalleydata[npart1start:npart1end], "below")
noisedata1 = CtfNoise.noiseModel(noisefitparams1, raddata)
## second part data
noisefitparams2 = CtfNoise.modelCTFNoise(raddata[npart2start:npart2end],
rotdata[npart2start:npart2end], "below")
noisedata2 = CtfNoise.noiseModel(noisefitparams2, raddata)
## third part data
#noisefitparams3 = CtfNoise.modelCTFNoise(raddata[npart3start:npart3end],
# svalleydata[npart3start:npart3end], "below")
noisefitparams3 = CtfNoise.modelCTFNoise(raddata[npart3start:npart3end],
rotdata[npart3start:npart3end], "below")
noisedata3 = CtfNoise.noiseModel(noisefitparams3, raddata)
## debug only
singlenoisefitparams = CtfNoise.modelCTFNoise(raddata[npart1start:npart3end],
svalleydata[npart1start:npart3end], "below")
singlenoisedata = CtfNoise.noiseModel(singlenoisefitparams, raddata)
## merge data
scale = numpy.arange(npart1end-npart2start, dtype=numpy.float32)
scale /= scale.max()
overlapdata1 = noisedata1[npart2start:npart1end]*(1-scale) + noisedata2[npart2start:npart1end]*scale
scale = numpy.arange(npart2end-npart3start, dtype=numpy.float32)
scale /= scale.max()
overlapdata2 = noisedata2[npart3start:npart2end]*(1-scale) + noisedata3[npart3start:npart2end]*scale
mergedata = numpy.hstack((noisedata1[:npart2start], overlapdata1,
noisedata2[npart1end:npart3start], overlapdata2,
noisedata3[npart2end:]))
noisedata = mergedata
### DO THE SUBTRACTION
normexprotdata = numpy.exp(rotdata) - numpy.exp(noisedata)
### CUT OUT ANY NEGATIVE VALUES FOR DISPLAY AND FITTING PURPOSES ONLY
minval = -1
mindata = ndimage.maximum_filter(normexprotdata, 2)
count = 0
while minval < 3 and count < 10:
count += 1
mindata = ndimage.maximum_filter(mindata, 2)
minval = mindata.min()
if self.debug is True:
apDisplay.printMsg("Minimum value for normalization: %.3f"%(minval))
if minval < 3:
minval = 3
normlogrotdata = numpy.log(numpy.where(normexprotdata<minval, minval, normexprotdata))
if numpy.isnan(normlogrotdata).any() is True:
apDisplay.printError("Error in log normalization of CTF data")
###
### PART 3: ENVELOPE NORMALIZATION
###
apDisplay.printColor("PART 3: ENVELOPE NORMALIZATION", "magenta")
### split the function up in first 3/5 and last 3/5 of data with 1/5 overlap
firstpeakindex = numpy.searchsorted(raddata, firstpeak*self.trimfreq)
numpoints = len(raddata) - firstpeakindex
epart1start = firstpeakindex
epart1end = int(firstpeakindex + numpoints*6/10.)
epart2start = int(firstpeakindex + numpoints*5/10.)
epart2end = int(firstpeakindex + numpoints*9/10.)
epart3start = int(firstpeakindex + numpoints*8/10.)
epart3end = len(raddata)
peakdata = ctfnoise.peakExtender(raddata, normlogrotdata, peakradii, "above")
## first part data
envelopfitparams1 = CtfNoise.modelCTFNoise(raddata[epart1start:epart1end],
peakdata[epart1start:epart1end], "above")
envelopdata1 = CtfNoise.noiseModel(envelopfitparams1, raddata)
## second part data
envelopfitparams2 = CtfNoise.modelCTFNoise(raddata[epart2start:epart2end],
peakdata[epart2start:epart2end], "above")
envelopdata2 = CtfNoise.noiseModel(envelopfitparams2, raddata)
## third part data
envelopfitparams3 = CtfNoise.modelCTFNoise(raddata[epart3start:epart3end],
peakdata[epart3start:epart3end], "above")
envelopdata3 = CtfNoise.noiseModel(envelopfitparams3, raddata)
## merge data
scale = numpy.arange(epart1end-epart2start, dtype=numpy.float32)
scale /= scale.max()
overlapdata1 = envelopdata1[epart2start:epart1end]*(1-scale) + envelopdata2[epart2start:epart1end]*scale
scale = numpy.arange(epart2end-epart3start, dtype=numpy.float32)
scale /= scale.max()
overlapdata2 = envelopdata2[epart3start:epart2end]*(1-scale) + envelopdata3[epart3start:epart2end]*scale
mergedata = numpy.hstack((envelopdata1[:epart2start], overlapdata1,
envelopdata2[epart1end:epart3start], overlapdata2,
envelopdata3[epart2end:]))
envelopdata = mergedata
normnormexprotdata = normexprotdata / numpy.exp(envelopdata)
###
### PART 4: PEAK EXTENSION
###
apDisplay.printColor("PART 4: PEAK EXTENSION", "magenta")
### Subtract fit valley locations
valleydata = ctfnoise.peakExtender(raddata, normnormexprotdata, valleyradii, "below")
valleydata = ndimage.gaussian_filter1d(valleydata, 1)
normvalleydata = normnormexprotdata - valleydata
### Normalize fit peak locations
peakdata = ctfnoise.peakExtender(raddata, normvalleydata, peakradii, "above")
peakdata = ndimage.gaussian_filter1d(peakdata, 1)
normpeakdata = normvalleydata / peakdata
###
### PART 5: CTF FIT AND CONFIDENCE
###
apDisplay.printColor("PART 5: CTF FIT AND CONFIDENCE", "magenta")
### everything in mks units, because rdata is 1/A multiply be 1e10 to get 1/m
ctffitdata = genctf.generateCTF1d(raddata*1e10, focus=meandefocus, cs=self.cs,
volts=self.volts, ampconst=self.ampcontrast, failParams=False)
#ctffitdata2 = genctf.generateCTF1dACE2(raddata*1e10, focus=meandefocus, cs=self.cs,
# volts=self.volts, ampconst=self.ampcontrast, failParams=False)
overctffitdata = genctf.generateCTF1d(raddata*1e10, focus=meandefocus, cs=self.cs,
volts=self.volts, ampconst=self.ampcontrast, failParams=False, overfocus=True)
ind30 = numpy.searchsorted(raddata, 1/30.)
ind10 = numpy.searchsorted(raddata, 1/10.)
self.conf3010 = scipy.stats.pearsonr(normpeakdata[ind30:ind10], ctffitdata[ind30:ind10])[0]
self.overconf3010 = scipy.stats.pearsonr(normpeakdata[ind30:ind10], overctffitdata[ind30:ind10])[0]
apDisplay.printColor("1/30A - 1/10A confidence is %.3f (overfocus %.3f)"%(self.conf3010, self.overconf3010), "green")
if self.overconf3010 > self.conf3010*1.1:
apDisplay.printWarning("Image is possibly over-focused")
ind5peak1 = numpy.searchsorted(raddata, peakradii[0])
ind5peak2 = numpy.searchsorted(raddata, peakradii[5])
self.conf5peak = scipy.stats.pearsonr(normpeakdata[ind5peak1:ind5peak2], ctffitdata[ind5peak1:ind5peak2])[0]
self.overconf5peak = scipy.stats.pearsonr(normpeakdata[ind5peak1:ind5peak2], overctffitdata[ind5peak1:ind5peak2])[0]
apDisplay.printColor("5 peak confidence is %.3f (overfocus %.3f)"%(self.conf5peak, self.overconf5peak), "green")
if self.overconf5peak > self.conf5peak*1.1:
apDisplay.printWarning("Image is possibly over-focused")
###
### PART 6: CTF RESOLUTION LIMITS
###
apDisplay.printColor("PART 6: CTF RESOLUTION LIMITS", "magenta")
confraddata, confdata = ctfres.getCorrelationProfile(raddata,
normpeakdata, ctffitdata, peak, self.trimfreq)
overconfraddata, overconfdata = ctfres.getCorrelationProfile(raddata,
normpeakdata, overctffitdata, peak, self.trimfreq)
self.res80 = ctfres.getResolutionFromConf(confraddata, confdata, limit=0.8)
if self.res80 is None:
self.res80 = 100.0
self.overres80 = ctfres.getResolutionFromConf(overconfraddata, overconfdata, limit=0.8)
if self.overres80 is None:
self.overres80 = 100.0
self.res50 = ctfres.getResolutionFromConf(confraddata, confdata, limit=0.5)
if self.res50 is None:
self.res50 = 100.0
res50max = min(raddata.max(), 1/10.)
elif self.res50 > 15.0:
res50max = min(raddata.max(), 1/10.)
else:
res50max = min(raddata.max(), 1.5/self.res50)
self.overres50 = ctfres.getResolutionFromConf(overconfraddata, overconfdata, limit=0.5)
if self.overres50 is None:
self.overres50 = 100.0
apDisplay.printColor("Resolution limit is %.2f at 0.8 and %.2f at 0.5"
%(self.res80, self.res50), "green")
###
### PART 7: MAKE 1D PLOT SUMMARY FIGURE
###
apDisplay.printColor("PART 7: MAKE 1D PLOT SUMMARY FIGURE", "magenta")
titlefontsize=8
axisfontsize=7
raddatasq = raddata**2
confraddatasq = confraddata**2
valleyradiisq = valleyradii**2
peakradiisq = peakradii**2
fpi = firstpeakindex
pyplot.clf()
if 'subplot2grid' in dir(pyplot):
pyplot.subplot2grid((3,2), (0,0))
else:
pyplot.subplot(2,2,1) # 2 rows, 2 columns, plot 1
pyplot.title("Background Noise Subtraction", fontsize=titlefontsize)
pyplot.ylabel("Log(PSD)", fontsize=axisfontsize)
pyplot.plot(raddata[fpi:], rotdata[fpi:],
'-', color="blue", alpha=0.5, linewidth=0.5)
pyplot.plot(raddata[fpi:], rotdata[fpi:],
'.', color="blue", alpha=0.75, markersize=2.0)
pyplot.plot(raddata[npart1start:npart1end], noisedata1[npart1start:npart1end],
'-', color="magenta", alpha=0.5, linewidth=2)
pyplot.plot(raddata[npart2start:npart2end], noisedata2[npart2start:npart2end],
'-', color="red", alpha=0.5, linewidth=2)
pyplot.plot(raddata[npart3start:npart3end], noisedata3[npart3start:npart3end],
'-', color="orange", alpha=0.5, linewidth=2)
pyplot.plot(raddata[fpi:], noisedata[fpi:],
'--', color="purple", alpha=1.0, linewidth=1)
self.setPyPlotXLabels(raddata, valleyradii=valleyradii, maxloc=res50max)
pyplot.ylim(ymin=noisedata.min())
if 'subplot2grid' in dir(pyplot):
pyplot.subplot2grid((3,2), (0,1))
else:
pyplot.subplot(2,2,2) # 2 rows, 2 columns, plot 2
pyplot.title("Envelope Normalization", fontsize=titlefontsize)
pyplot.ylabel("Log(PSD-Noise)", fontsize=axisfontsize)
pyplot.plot(raddata[fpi:], normlogrotdata[fpi:],
'-', color="blue", alpha=0.5, linewidth=0.5)
pyplot.plot(raddata[fpi:], normlogrotdata[fpi:],
'.', color="blue", alpha=0.75, markersize=2.0)
pyplot.plot(raddata[epart1start:epart1end], envelopdata1[epart1start:epart1end],
'-', color="magenta", alpha=0.5, linewidth=2)
pyplot.plot(raddata[epart2start:epart2end], envelopdata2[epart2start:epart2end],
'-', color="red", alpha=0.5, linewidth=2)
pyplot.plot(raddata[epart3start:epart3end], envelopdata3[epart3start:epart3end],
'-', color="orange", alpha=0.5, linewidth=2)
pyplot.plot(raddata[fpi:], envelopdata[fpi:],
'--', color="purple", alpha=1.0, linewidth=1)
self.setPyPlotXLabels(raddata, peakradii=peakradii, maxloc=res50max)
pyplot.ylim(ymax=envelopdata.max())
if 'subplot2grid' in dir(pyplot):
pyplot.subplot2grid((3,2), (1,0), colspan=2)
else:
pyplot.subplot(2,2,3) # 2 rows, 2 columns, plot 3
pyplot.title("Fit of CTF data (30-10A %.3f / 5-peak %.3f) Def1= %.3e / Def2= %.3e"
%(self.conf3010, self.conf5peak, self.defocus1, self.defocus2), fontsize=titlefontsize)
pyplot.ylabel("Norm PSD", fontsize=titlefontsize)
pyplot.plot(raddatasq[fpi:], ctffitdata[fpi:],
'-', color="black", alpha=0.5, linewidth=1)
#pyplot.plot(raddatasq[fpi:], overctffitdata[fpi:],
# '-', color="red", alpha=0.75, linewidth=1)
pyplot.plot(raddatasq[fpi:], normpeakdata[fpi:],
'-', color="blue", alpha=0.5, linewidth=0.5)
pyplot.plot(raddatasq[fpi:], normpeakdata[fpi:],
'.', color="blue", alpha=0.75, markersize=2.0)
self.setPyPlotXLabels(raddatasq, maxloc=1/5.**2, square=True)
pyplot.grid(True, linestyle=':', )
pyplot.ylim(-0.05, 1.05)
"""
pyplot.subplot2grid((3,2), (1,1))
tenangindex = numpy.searchsorted(raddata, 1/10.)-1
pyplot.title("Defocus1= %.3e / Defocus2= %.3e"
%(self.defocus1, self.defocus2), fontsize=titlefontsize)
pyplot.ylabel("Norm PSD", fontsize=titlefontsize)
pyplot.plot(raddatasq[tenangindex:], ctffitdata[tenangindex:],
'-', color="black", alpha=0.5, linewidth=1)
pyplot.plot(raddatasq[tenangindex:], normpeakdata[tenangindex:],
'-', color="blue", alpha=0.5, linewidth=0.5)
pyplot.plot(raddatasq[tenangindex:], normpeakdata[tenangindex:],
'.', color="blue", alpha=0.75, markersize=2.0)
self.setPyPlotXLabels(raddatasq[tenangindex:], maxloc=1/7.**2, square=True)
pyplot.grid(True, linestyle=':', )
pyplot.ylim(-0.05, 1.05)
"""
if 'subplot2grid' in dir(pyplot):
pyplot.subplot2grid((3,2), (2,0), colspan=2)
else:
pyplot.subplot(2,2,4) # 2 rows, 2 columns, plot 4
pyplot.title("Resolution limits: %.2fA at 0.8 and %.2fA at 0.5"
%(self.res80, self.res50), fontsize=titlefontsize)
pyplot.ylabel("Correlation", fontsize=titlefontsize)
pyplot.plot(raddata[fpi:], ctffitdata[fpi:],
'-', color="black", alpha=0.2, linewidth=1)
pyplot.plot(raddata[fpi:], normpeakdata[fpi:],
'-', color="blue", alpha=0.2, linewidth=1)
#pyplot.plot(raddata[fpi:], normpeakdata[fpi:],
# '.', color="black", alpha=0.25, markersize=1.0)
pyplot.axvline(x=1.0/self.res80, linewidth=2, color="gold", alpha=0.95, ymin=0, ymax=0.8)
pyplot.axvline(x=1.0/self.res50, linewidth=2, color="red", alpha=0.95, ymin=0, ymax=0.5)
res80index = numpy.searchsorted(confraddata, 1.0/self.res80)
pyplot.plot(confraddata[:res80index+1], confdata[:res80index+1],
'-', color="green", alpha=1, linewidth=2)
res50index = numpy.searchsorted(confraddata, 1.0/self.res50)
pyplot.plot(confraddata[res80index-1:res50index+1], confdata[res80index-1:res50index+1],
'-', color="orange", alpha=1, linewidth=2)
pyplot.plot(confraddata[res50index-1:], confdata[res50index-1:],
'-', color="red", alpha=1, linewidth=2)
self.setPyPlotXLabels(raddata, maxloc=res50max)
pyplot.grid(True, linestyle=':', )
if self.res80 < 99:
pyplot.ylim(-0.05, 1.05)
elif self.res50 < 99:
pyplot.ylim(-0.25, 1.05)
else:
pyplot.ylim(-0.55, 1.05)
pyplot.subplots_adjust(wspace=0.22, hspace=0.50,
bottom=0.08, left=0.07, top=0.95, right=0.965, )
self.plotsfile = apDisplay.short(self.imgname)+"-plots.png"
apDisplay.printMsg("Saving 1D graph to file %s"%(self.plotsfile))
pyplot.savefig(self.plotsfile, format="png", dpi=300, orientation='landscape', pad_inches=0.0)
if self.debug is True:
### write a 1d profile dat files
f = open(apDisplay.short(self.imgname)+"-noise_fit.dat", "w")
for i in range(npart1start, npart3end):
f.write("%.16f\t%.16f\t%.16f\t%.16f\n"%(raddata[i], rotdata[i], singlenoisedata[i], noisedata[i]))
f.write("&\n")
for i in range(npart1start, npart1end):
f.write("%.16f\t%.16f\n"%(raddata[i], noisedata1[i]))
f.write("&\n")
for i in range(npart2start, npart2end):
f.write("%.16f\t%.16f\n"%(raddata[i], noisedata2[i]))
f.write("&\n")
for i in range(npart3start, npart3end):
f.write("%.16f\t%.16f\n"%(raddata[i], noisedata3[i]))
f.write("&\n")
f.close()
#smallrotdata = numpy.where(rotdata-singlenoisedata>0.19, 0.19, rotdata-singlenoisedata)
noiseexp = numpy.exp(singlenoisedata)
smallrotdata = numpy.exp(rotdata) - noiseexp
minval = 3
smallrotdata = numpy.log(numpy.where(smallrotdata<minval, minval, smallrotdata))
smallnoise = numpy.exp(noisedata) - noiseexp
smallnoise = numpy.log(numpy.where(smallnoise<minval, minval, smallnoise))
smallnoise1 = numpy.exp(noisedata1) - noiseexp
smallnoise1 = numpy.log(numpy.where(smallnoise1<minval, minval, smallnoise1))
smallnoise2 = numpy.exp(noisedata2) - noiseexp
smallnoise2 = numpy.log(numpy.where(smallnoise2<minval, minval, smallnoise2))
smallnoise3 = numpy.exp(noisedata3) - noiseexp
smallnoise3 = numpy.log(numpy.where(smallnoise3<minval, minval, smallnoise3))
f = open(apDisplay.short(self.imgname)+"-noisesubt_fit.dat", "w")
for i in range(len(ctffitdata)):
f.write("%.16f\t%.16f\n"%(raddata[i], smallrotdata[i]))
f.write("&\n")
for i in range(npart1start, npart3end):
f.write("%.16f\t%.16f\t%.16f\t%.16f\n"%(raddata[i], smallrotdata[i], smallnoise[i], 0))
f.write("&\n")
for i in range(npart1start, npart1end):
f.write("%.16f\t%.16f\n"%(raddata[i], smallnoise1[i]))
f.write("&\n")
for i in range(npart2start, npart2end):
f.write("%.16f\t%.16f\n"%(raddata[i], smallnoise2[i]))
f.write("&\n")
for i in range(npart3start, npart3end):
f.write("%.16f\t%.16f\n"%(raddata[i], smallnoise3[i]))
f.write("&\n")
f.close()
f = open(apDisplay.short(self.imgname)+"-ctf_fit.dat", "w")
for i in range(len(ctffitdata)):
f.write("%.16f\t%.16f\t%.16f\n"%(raddata[i], normpeakdata[i], ctffitdata[i]))
f.close()
#sys.exit(1)
if self.debug is True:
print "Showing results"
#pyplot.show()
#plotspng = Image.open(self.plotsfile)
#plotspng.show()
pyplot.clf()
if twod is False:
return zdata2d
###
### PART 8: NORMALIZE THE 2D IMAGE
###
apDisplay.printColor("PART 8: NORMALIZE THE 2D IMAGE", "magenta")
### Convert 1D array into 2D array by un-elliptical average
noise2d = ctftools.unEllipticalAverage(pixelrdata, noisedata,
self.ellipratio, self.angle, zdata2d.shape)
envelop2d = ctftools.unEllipticalAverage(pixelrdata, envelopdata,
self.ellipratio, self.angle, zdata2d.shape)
valley2d = ctftools.unEllipticalAverage(pixelrdata, valleydata,
self.ellipratio, self.angle, zdata2d.shape)
peak2d = ctftools.unEllipticalAverage(pixelrdata, peakdata,
self.ellipratio, self.angle, zdata2d.shape)
### Do the normalization on the 2d data
#blur2d = ndimage.gaussian_filter(zdata2d, 2)
normal2d = numpy.exp(zdata2d) - numpy.exp(noise2d)
normal2d = normal2d / numpy.exp(envelop2d)
normal2d = normal2d - valley2d
normal2d = normal2d / peak2d
normal2d = numpy.where(normal2d < -0.2, -0.2, normal2d)
normal2d = numpy.where(normal2d > 1.2, 1.2, normal2d)
return normal2d
#====================
#====================
def trimDataToExtrema(self, xdata, rawdata, extrema):
trimxdata = []
trimrawdata = []
for i in range(len(extrema)):
exvalue = extrema[i]
index = numpy.searchsorted(xdata, exvalue)
trimxdata.extend(xdata[index-10:index+10])
trimrawdata.extend(rawdata[index-10:index+10])
return numpy.array(trimxdata), numpy.array(trimrawdata)
#====================
#====================
def setPyPlotXLabels(self, xdata, peakradii=None, valleyradii=None, square=False, maxloc=None):
"""
assumes xdata is in units of 1/Angstroms
"""
minloc = xdata.min()
if maxloc is None:
maxloc = xdata.max()
xstd = xdata.std()/2.
pyplot.xlim(xmin=minloc, xmax=maxloc)
locs, labels = pyplot.xticks()
### assumes that x values are 1/Angstroms^2, which give the best plot
newlocs = []
newlabels = []
#print "maxloc=", maxloc
for loc in locs:
if loc < minloc + xstd/4:
continue
if square is True:
origres = 1.0/math.sqrt(loc)
else:
origres = 1.0/loc
if origres > 50:
trueres = round(origres/10.0)*10
if origres > 25:
trueres = round(origres/5.0)*5
elif origres > 12:
trueres = round(origres/2.0)*2
elif origres > 7.5:
trueres = round(origres)
else:
trueres = round(origres*2)/2.0
if square is True:
trueloc = 1.0/trueres**2
else:
trueloc = 1.0/trueres
#print ("Loc=%.4f, Res=%.2f, TrueRes=%.1f, TrueLoc=%.4f"
# %(loc, origres, trueres, trueloc))
if trueloc > maxloc - xstd:
continue
if trueres < 10 and (trueres*2)%2 == 1:
label = "1/%.1fA"%(trueres)
else:
label = "1/%dA"%(trueres)
if not label in newlabels:
newlabels.append(label)
newlocs.append(trueloc)
#add final value
newlocs.append(minloc)
if square is True:
minres = 1.0/math.sqrt(minloc)
else:
minres = 1.0/minloc
label = "1/%dA"%(minres)
newlabels.append(label)
newlocs.append(maxloc)
if square is True:
maxres = 1.0/math.sqrt(maxloc)
else:
maxres = 1.0/maxloc
label = "1/%.1fA"%(maxres)
newlabels.append(label)
# set the labels
pyplot.yticks(fontsize=8)
pyplot.xticks(newlocs, newlabels, fontsize=7)
if square is True:
pyplot.xlabel("Resolution (s^2)", fontsize=9)
else:
pyplot.xlabel("Resolution (s)", fontsize=9)
if peakradii is not None:
for i, rad in enumerate(peakradii):
if rad < minloc:
continue
elif rad > maxloc:
break
else:
pyplot.axvline(x=rad, linewidth=0.5, color="cyan", alpha=0.5)
if valleyradii is not None:
for i, rad in enumerate(valleyradii):
if rad < minloc:
continue
elif rad > maxloc:
break
else:
pyplot.axvline(x=rad, linewidth=0.5, color="gold", alpha=0.5)
return
#====================
#====================
def drawPowerSpecImage(self, origpowerspec, maxsize=1200, outerresolution=7.7):
### would be nice to have a more intelligent way to set 'outerresolution'
### based on defocus and measured CTF resolution
origpowerspec = ctftools.trimPowerSpectraToOuterResolution(origpowerspec, outerresolution, self.trimfreq)
if self.debug is True:
print "origpowerspec shape", origpowerspec.shape
#compute elliptical average and merge with original image
pixelrdata, rotdata = ctftools.ellipticalAverage(origpowerspec, self.ellipratio, self.angle,
self.ringwidth*3, 1, full=True)
ellipavgpowerspec = ctftools.unEllipticalAverage(pixelrdata, rotdata,
self.ellipratio, self.angle, origpowerspec.shape)
halfshape = origpowerspec.shape[1]/2
halfpowerspec = numpy.hstack( (origpowerspec[:,:halfshape] , ellipavgpowerspec[:,halfshape:] ) )
if halfpowerspec.shape != origpowerspec.shape:
apDisplay.printError("Error in power spectra creation")
if max(halfpowerspec.shape) > maxsize:
scale = maxsize/float(max(halfpowerspec.shape))
#scale = math.sqrt((random.random()+random.random()+random.random())/3.0)
apDisplay.printMsg( "Scaling final powerspec image by %.3f"%(scale))
powerspec = imagefilter.scaleImage(halfpowerspec, scale)
else:
scale = 1280./float(max(halfpowerspec.shape))
powerspec = imagefilter.scaleImage(halfpowerspec, scale)
#scale = 1.0
#powerspec = halfpowerspec.copy()
self.scaleapix = self.trimapix
self.scalefreq = self.trimfreq/scale
if self.debug is True:
print "orig pixel", self.apix
print "trim pixel", self.trimapix
print "scale pixel", self.scaleapix
numzeros = 13
radii1 = ctftools.getCtfExtrema(self.defocus1, self.scalefreq*1e10,
self.cs, self.volts, self.ampcontrast, numzeros=numzeros, zerotype="valley")
radii2 = ctftools.getCtfExtrema(self.defocus2, self.scalefreq*1e10,
self.cs, self.volts, self.ampcontrast, numzeros=numzeros, zerotype="valley")
#smallest of two defocii
firstpeak = radii2[0]
###
### PART 9: DRAW THE 2D POWERSPEC IMAGE
###
center = numpy.array(powerspec.shape, dtype=numpy.float)/2.0
foundzeros = min(len(radii1), len(radii2))
"""
pyplot.clf()
ax = pyplot.subplot(1,1,1)
pyplot.xticks([], [])
pyplot.yticks([], [])
pyplot.imshow(powerspec)
pyplot.gray()
for i in range(foundzeros):
# because |def1| < |def2| ==> firstzero1 > firstzero2
major = radii1[i]*2
minor = radii2[i]*2
ell = Ellipse(xy=center, width=major, height=minor, angle=self.angle+90,
fill=False, edgecolor="yellow", antialiased=True, linewidth=0.5)
ax.add_artist(ell)
pyplot.subplots_adjust(wspace=0, hspace=0, bottom=0, left=0, top=1, right=1, )
self.newpowerspecfile = apDisplay.short(self.imgname)+"-powerspec-new.png"
pyplot.savefig(self.newpowerspecfile, format="png", dpi=150, pad_inches=0.0)
"""
###
### PART 9: DRAW THE 2D POWERSPEC IMAGE
###
apDisplay.printColor("PART 9: DRAW THE 2D POWERSPEC IMAGE", "magenta")
center = numpy.array(powerspec.shape, dtype=numpy.float)/2.0
originalimage = imagefile.arrayToImage(powerspec)
originalimage = originalimage.convert("RGB")
pilimage = originalimage.copy()
draw = ImageDraw.Draw(pilimage)
#########
## draw astig axis line, if astig > 5%
#########
perdiff = 2*abs(self.defocus1-self.defocus2)/abs(self.defocus1+self.defocus2)
if self.debug is True:
print "Percent Difference %.1f"%(perdiff*100)
if perdiff > 0.05:
#print self.angle, radii2[0], center
x = 1*firstpeak*math.cos(math.radians(self.angle))
y = firstpeak*math.sin(math.radians(self.angle))
#print x,y
xy = (x+center[0], y+center[1], -x+center[0], -y+center[1])
#print xy
draw.line(xy, fill="#f23d3d", width=10)
elif perdiff > 1e-6:
#print self.angle, radii2[0], center
x = 1*firstpeak*math.cos(math.radians(self.angle))
y = firstpeak*math.sin(math.radians(self.angle))
#print x,y
xy = (x+center[0], y+center[1], -x+center[0], -y+center[1])
#print xy
draw.line(xy, fill="#f23d3d", width=2)
#########
## draw colored CTF Thon rings
#########
foundzeros = min(len(radii1), len(radii2))
#color="#3d3dd2" #blue
color="#ffd700" #gold
for i in range(foundzeros):
# because |def1| < |def2| ==> firstzero1 > firstzero2
major = radii1[i]
minor = radii2[i]
if self.debug is True:
print "major=%.1f, minor=%.1f, angle=%.1f"%(major, minor, self.angle)
if minor > powerspec.shape[0]/math.sqrt(3):
# this limits how far we draw out the ellipses sqrt(3) to corner, just 2 inside line
break
width = int(math.ceil(math.sqrt(numzeros - i)))*2
### determine color of circle
currentres = 1.0/(major*self.scalefreq)
if currentres > self.res80:
ringcolor = "green"
elif currentres > self.res50:
ringcolor = "gold"
else:
ringcolor = "red"
### determine number of points to use to draw ellipse, minimize distance btw points
#isoceles triangle, b: radius ot CTF ring, a: distance btw points
#a = 2 * b sin (theta/2)
#a / 2b = sin(theta/2)
#theta = 2 * asin (a/2b)
#numpoints = 2 pi / theta
## define a to be 5 pixels
a = 40
theta = 2.0 * math.asin (a/(2.0*major))
skipfactor = 2
numpoints = int(math.ceil(2.0*math.pi/theta/skipfactor))*skipfactor + 1
#print "numpoints", numpoints
points = ellipse.generate_ellipse(major, minor,
math.radians(self.angle), center, numpoints, None, "step", True)
x = points[:,0]
y = points[:,1]
## wrap around to end
x = numpy.hstack((x, [x[0],]))
y = numpy.hstack((y, [y[0],]))
## convert image
numsteps = int(math.floor((len(x)-2)/skipfactor))
for j in range(numsteps):
k = j*skipfactor
xy = (x[k], y[k], x[k+1], y[k+1])
draw.line(xy, fill=ringcolor, width=width)
#########
## draw blue resolution ring
#########
# 1/res = freq * pixrad => pixrad = 1/(res*freq)
maxrad = (max(powerspec.shape)-1)/2.0 - 3
maxres = 1.0/(self.scalefreq*maxrad)
bestres = math.ceil(maxres)
pixrad = 1.0/(self.scalefreq*bestres)
if self.debug is True:
print "bestres %d Angstroms (max: %.3f)"%(bestres, maxres)
print "pixrad %d (max: %.3f)"%(pixrad, maxrad)
if pixrad > maxrad:
apDisplay.printError("Too big of outer radius to draw")
outpixrad = math.ceil(pixrad)+1
inpixrad = math.floor(pixrad)-1
for i in numpy.arange(-4.0,4.01,0.01):
r = pixrad + i
blackxy = numpy.array((center[0]-r,center[1]-r,
center[0]+r,center[1]+r), dtype=numpy.float64)
draw.ellipse(tuple(blackxy), outline="black")
for i in numpy.arange(-1.50,1.51,0.01):
r = pixrad + i
whitexy = numpy.array((center[0]-r,center[1]-r,
center[0]+r,center[1]+r), dtype=numpy.float64)
draw.ellipse(tuple(whitexy), outline="#0BB5FF")
#########
## setup font to add text
#########
fontpath = "/usr/share/fonts/liberation/LiberationSans-Regular.ttf"
from PIL import ImageFont
if os.path.isfile(fontpath):
fontsize = int(math.ceil( 48/2. * min(powerspec.shape)/float(maxsize))*2)
font = ImageFont.truetype(fontpath, fontsize)
else:
font = ImageFont.load_default()
#########
## add resolution ring text
#########
angrad = maxrad/math.sqrt(2) + 1
coord = (angrad+maxrad, angrad+maxrad)
for i in [-2,2]:
for j in [-2,2]:
draw.text((coord[0]+i,coord[1]+j), "%.1f A"%(bestres), font=font, fill="black")
draw.text(coord, "%.1f A"%(bestres), font=font, fill="#0BB5FF")
#########
## add defocus value text
#########
meandef = abs(self.defocus1+self.defocus2)/2.0
deftext = "%.2f um"%(meandef*1e6)
tsize = draw.textsize(deftext, font=font)
coord = (powerspec.shape[0]-4-tsize[0], powerspec.shape[0]-4-tsize[1])
for i in [-2,2]:
for j in [-2,2]:
draw.text((coord[0]+i,coord[1]+j), deftext, font=font, fill="black")
draw.text(coord, deftext, font=font, fill="#AB82FF")
#########
## add text about what sides of powerspec are:
## left - raw data; right - elliptical average data
#########
leftcoord = (4, 4)
for i in [-3, -1, 0, 1, 3]:
for j in [-3, -1, 0, 1, 3]:
draw.text((leftcoord[0]+i,leftcoord[1]+j) , "Raw CTF Data", font=font, fill="black")
draw.text(leftcoord, "Raw CTF Data", font=font, fill="#00BFFF")
tsize = draw.textsize("Elliptical Average", font=font)
xdist = powerspec.shape[0] - 4 - tsize[0]
rightcoord = (xdist, 4)
for i in [-2,2]:
for j in [-2,2]:
draw.text((rightcoord[0]+i,rightcoord[1]+j), "Elliptical Average", font=font, fill="black")
draw.text(rightcoord, "Elliptical Average", font=font, fill="#00BFFF")
#########
## create an alpha blend effect
#########
originalimage = Image.blend(originalimage, pilimage, 0.95)
apDisplay.printMsg("Saving 2D powerspectra to file: %s"%(self.powerspecfile))
#pilimage.save(self.powerspecfile, "JPEG", quality=85)
originalimage.save(self.powerspecfile, "JPEG", quality=85)
if not os.path.isfile(self.powerspecfile):
apDisplay.printWarning("power spec file not created")
if self.debug is True:
#powerspecjpg = Image.open(self.powerspecfile)
#powerspecjpg.show()
pass
return
#=====================
def printCtfData(self, ctfvalue):
if ctfvalue is None:
return
defocusratio = ctfvalue['defocus2']/ctfvalue['defocus1']
if 'acerun' in ctfvalue:
method = getCtfMethod(ctfvalue)
runname = ctfvalue['acerun']['name']
sys.stderr.write("[%s] method: %s | runname %s\n"%
(apDisplay.colorString("CTF run", "blue"), method, runname))
sys.stderr.write("[%s] def1: %.2e | def2: %.2e | angle: %.1f | ampcontr %.2f | defratio %.3f\n"%
(apDisplay.colorString("CTF param", "blue"), ctfvalue['defocus1'],
ctfvalue['defocus2'], ctfvalue['angle_astigmatism'],
ctfvalue['amplitude_contrast'], defocusratio))
if 'resolution_80_percent' in ctfvalue.keys() and ctfvalue['resolution_80_percent'] is not None:
sys.stderr.write("[%s] conf_30-10: %s | conf_5peak: %s | res_0.8: %.1fA | res_0.5 %.1fA\n"%
(apDisplay.colorString("CTF stats", "blue"),
apDisplay.colorProb(ctfvalue['confidence_30_10']),
apDisplay.colorProb(ctfvalue['confidence_5_peak']),
ctfvalue['resolution_80_percent'], ctfvalue['resolution_50_percent']))
#sys.stderr.write("[%s] conf: %s | conf_d: %s\n"%
# (apDisplay.colorString("CTF stats", "blue"), apDisplay.colorProb(ctfvalue['confidence']),
# apDisplay.colorProb(ctfvalue['confidence_d'])))
#apDisplay.colorProb(numlist[i])
#time.sleep(3)
return
#====================
#====================
def convertDefociToConvention(self, ctfdata):
self.printCtfData(ctfdata)
initdefocusratio = ctfdata['defocus2']/ctfdata['defocus1']
# program specific corrections?
self.angle = ctfdata['angle_astigmatism']
#angle = round(self.angle/2.5,0)*2.5
#by convention: abs(ctfdata['defocus1']) < abs(ctfdata['defocus2'])
if abs(ctfdata['defocus1']) > abs(ctfdata['defocus2']):
# incorrect, need to shift angle by 90 degrees
apDisplay.printWarning("|def1| > |def2|, flipping defocus axes")
self.defocus1 = ctfdata['defocus2']
self.defocus2 = ctfdata['defocus1']
self.angle += 90
else:
# correct, ratio > 1
self.defocus1 = ctfdata['defocus1']
self.defocus2 = ctfdata['defocus2']
if self.defocus1 < 0 and self.defocus2 < 0:
apDisplay.printWarning("Negative defocus values, taking absolute value")
self.defocus1 = abs(self.defocus1)
self.defocus2 = abs(self.defocus2)
self.defdiff = self.defocus1 - self.defocus2
#elliptical ratio is ratio of zero locations NOT defocii
self.defocusratio = self.defocus2/self.defocus1
self.ellipratio = ctftools.defocusRatioToEllipseRatio(self.defocus1, self.defocus2,
self.initfreq, self.cs, self.volts, self.ampcontrast)
# get angle within range -90 < angle <= 90
while self.angle > 90:
self.angle -= 180
while self.angle < -90:
self.angle += 180
apDisplay.printColor("Final params: def1: %.2e | def2: %.2e | angle: %.1f | defratio %.2f"%
(self.defocus1, self.defocus2, self.angle, self.defocusratio), "cyan")
perdiff = abs(self.defocus1-self.defocus2)/abs(self.defocus1+self.defocus2)
apDisplay.printMsg("Defocus Astig Percent Diff %.2f -- %.3e, %.3e"
%(perdiff*100,self.defocus1,self.defocus2))
return
#====================
#====================
def CTFpowerspec(self, imgdata, ctfdata, fftpath=None, fftfreq=None, outerbound=5e-10, twod=True):
"""
Make a nice looking powerspectra with lines for location of Thon rings
inputs:
imgdata - sinedon AcquistionImage table row
ctfdata - sinedon apCtfData table row
amplitude constrast - ( a cos + sqrt(1-a^2) sin format)
defocus1 > defocus2
angle - in degrees, positive x-axis is zero
outerbound = 5 #Angstrom resolution (in meters)
outside this radius is trimmed away
"""
### setup initial parameters for image
#outerbound = outerbound * 2*math.sqrt(random.random())
self.imgname = imgdata['filename']
if self.debug is True:
print apDisplay.short(self.imgname)
self.powerspecfile = apDisplay.short(self.imgname)+"-powerspec.jpg"
### get peak of CTF
self.cs = ctfdata['cs']*1e-3
self.volts = ctfdata['volts']
self.ampcontrast = ctfdata['amplitude_contrast']
### process power spectra
self.apix = ctfdata['apix']
if self.debug is True:
print "Pixelsize (A/pix)", self.apix
apDisplay.printMsg("Reading image...")
image = imgdata['image']
#imagestat.printImageInfo(image)
self.initfreq = 1./(self.apix * image.shape[0])
self.origimageshape = image.shape
### get correct data
self.convertDefociToConvention(ctfdata)
if self.debug is True:
for key in ctfdata.keys():
if ctfdata[key] is not None and not isinstance(ctfdata[key], dict):
print " ", key, "--", ctfdata[key]
if fftpath is not None and fftfreq is not None and os.path.isfile(fftpath):
powerspec = mrc.read(fftpath).astype(numpy.float64)
self.trimfreq = fftfreq
else:
powerspec, self.trimfreq = ctftools.powerSpectraToOuterResolution(image,
outerbound, self.apix)
self.trimapix = 1.0/(self.trimfreq * powerspec.shape[0])
#imagestat.printImageInfo(powerspec)
if max(powerspec.shape) < 33:
apDisplay.printError("Something went wrong the fft image is too small")
#print "Median filter image..."
#powerspec = ndimage.median_filter(powerspec, 2)
apDisplay.printMsg("Preform a rotational average and remove spikes...")
rotfftarray = ctftools.rotationalAverage2D(powerspec)
stdev = rotfftarray.std()
rotplus = rotfftarray + stdev*4
powerspec = numpy.where(powerspec > rotplus, rotfftarray, powerspec)
#print "Light Gaussian blur image..."
#powerspec = ndimage.gaussian_filter(powerspec, 3)
if self.debug is True:
print "\torig pixel %.3f freq %.3e"%(self.apix, self.initfreq)
print "\ttrim pixel %.3f freq %.3e"%(self.trimapix, self.trimfreq)
### more processing
normpowerspec = self.normalizeCtf(powerspec, twod=twod)
if normpowerspec is None:
return None
if twod is True:
self.drawPowerSpecImage(normpowerspec)
ctfdisplaydict = {
'powerspecfile': self.powerspecfile,
'plotsfile': self.plotsfile,
'conf3010': self.conf3010,
'conf5peak': self.conf5peak,
'overconf3010': self.overconf3010,
'overconf5peak': self.overconf5peak,
'res80': self.res80,
'res50': self.res50,
'overres80': self.overres80,
'overres50': self.overres50,
}
return ctfdisplaydict
#====================
#====================
#====================
#====================
if __name__ == "__main__":
import glob
import sinedon
from appionlib import apProject
imagelist = []
#=====================
### CNV data
#imagelist.extend(glob.glob("/data01/leginon/10apr19a/rawdata/10apr19a_10apr19a_*en_1.mrc"))
imagelist.extend(glob.glob("/data01/leginon/10apr19a/rawdata/10apr19a_10apr19a_*23gr*10sq*02hl*17en_1.mrc"))
### Pick-wei images with lots of rings
#imagelist.extend(glob.glob("/data01/leginon/09sep20a/rawdata/09*en.mrc"))
### Something else, ice data
#imagelist.extend(glob.glob("/data01/leginon/09feb20d/rawdata/09*en.mrc"))
### OK groEL ice data
#imagelist.extend(glob.glob("/data01/leginon/05may19a/rawdata/05*en*.mrc"))
### 30S ribosome in stain
#imagelist.extend(glob.glob("/data01/leginon/12jun06h52a/rawdata/12*en*.mrc"))
imagelist.extend(glob.glob("/data01/leginon/12jun06h52a/rawdata/12jun06h52a_09oct22c*04sq*19hl*2en*.mrc"))
### images of Hassan with 1.45/1.65 astig at various angles
#imagelist.extend(glob.glob("/data01/leginon/12jun12a/rawdata/12jun12a_ctf_image_ang*.mrc"))
### rectangular images
#imagelist.extend(glob.glob("/data01/leginon/12may08eD1/rawdata/*.mrc"))
#=====================
apDisplay.printMsg("# of images: %d"%(len(imagelist)))
#imagelist.sort()
#imagelist.reverse()
random.shuffle(imagelist)
#imagelist = imagelist[:30]
random.shuffle(imagelist)
for imgfile in imagelist:
apDisplay.printMsg(apDisplay.short(os.path.basename(imgfile)))
count = 0
for imgfile in imagelist:
count += 1
imagename = os.path.basename(imgfile)
imagename = imagename.replace(".mrc", "")
imgdata = apDatabase.getImageData(imagename)
### change project
projid = apProject.getProjectIdFromImageData(imgdata)
newdbname = apProject.getAppionDBFromProjectId(projid)
sinedon.setConfig('appiondata', db=newdbname)
powerspecfile = apDisplay.short(imagename)+"-powerspec.jpg"
if os.path.isfile(powerspecfile):
apDisplay.printColor("Skipping image %s, already complete"%(apDisplay.short(imagename)), "cyan")
continue
ctfdata = ctfdb.getBestCtfByResolution(imgdata)
#ctfdata, bestconf = ctfdb.getBestCtfValueForImage(imgdata, method="ctffind")
#ctfdata, bestconf = ctfdb.getBestCtfValueForImage(imgdata, method="ace2")
if ctfdata is None:
apDisplay.printColor("Skipping image %s, no CTF data"%(apDisplay.short(imagename)), "red")
continue
#print ctfdata
if ctfdata['confidence_30_10'] < 0.88:
apDisplay.printColor("Skipping image %s, poor confidence"%(apDisplay.short(imagename)), "red")
continue
"""
if ctfdata['resolution_50_percent'] > 10 or ctfdata['resolution_50_percent'] < 7.5:
apDisplay.printColor("Skipping image %s, not right 50per resolution"%(apDisplay.short(imagename)), "red")
continue
if ctfdata['resolution_80_percent'] > 13 or ctfdata['resolution_80_percent'] < 8.5:
apDisplay.printColor("Skipping image %s, not right 80per resolution"%(apDisplay.short(imagename)), "red")
continue
if ctfdata['defocus1'] > 2.0e-6:
apDisplay.printColor("Skipping image %s, too high defocus"%(apDisplay.short(imagename)), "red")
continue
"""
print ""
print "**********************************"
print "IMAGE: %s (%d of %d)"%(apDisplay.short(imagename), count, len(imagelist))
print "**********************************"
a = CtfDisplay()
ctfdisplaydict = a.CTFpowerspec(imgdata, ctfdata)
print "**********************************"
#if count > 8:
# sys.exit(1)
#====================
#====================
#====================
def makeCtfImages(imgdata, ctfdata, fftpath=None, fftfreq=None, twod=True):
a = CtfDisplay()
apix = apDatabase.getPixelSize(imgdata)
ctfdata['apix'] = apix
ctfdata['volts'] = imgdata['scope']['high tension']
ctfdisplaydict = a.CTFpowerspec(imgdata, ctfdata, fftpath, fftfreq, twod=twod)
return ctfdisplaydict
| apache-2.0 |
detrout/debian-statsmodels | statsmodels/examples/tut_ols_rlm_short.py | 34 | 1649 | '''Examples: comparing OLS and RLM
robust estimators and outliers
RLM is less influenced by outliers than OLS and has estimated slope
closer to true slope and not tilted like OLS.
Note: uncomment plt.show() to display graphs
'''
from __future__ import print_function
import numpy as np
#from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
#fix a seed for these examples
np.random.seed(98765789)
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.c_[x1, np.ones(nsample)]
sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger
beta = [0.5, 5.]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig*1. * np.random.normal(size=nsample)
y2[[39,41,43,45,48]] -= 5 # add some outliers (10% of nsample)
# Example: estimate linear function (true is linear)
plt.figure()
plt.plot(x1, y2, 'o', x1, y_true2, 'b-')
res2 = sm.OLS(y2, X).fit()
print("OLS: parameter estimates: slope, constant")
print(res2.params)
print("standard deviation of parameter estimates")
print(res2.bse)
prstd, iv_l, iv_u = wls_prediction_std(res2)
plt.plot(x1, res2.fittedvalues, 'r-')
plt.plot(x1, iv_u, 'r--')
plt.plot(x1, iv_l, 'r--')
#compare with robust estimator
resrlm2 = sm.RLM(y2, X).fit()
print("\nRLM: parameter estimates: slope, constant")
print(resrlm2.params)
print("standard deviation of parameter estimates")
print(resrlm2.bse)
plt.plot(x1, resrlm2.fittedvalues, 'g.-')
plt.title('Data with Outliers; blue: true, red: OLS, green: RLM')
# see also help(sm.RLM.fit) for more options and
# module sm.robust.scale for scale options
plt.show()
| bsd-3-clause |
manashmndl/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
lcharleux/numerical_analysis | doc/Traitement_images/Example_code/europe_comptage.py | 1 | 1998 | #----------------------------------------------------
# Comptage des iles et continents
#----------------------------------------------------
# PACKAGES
from PIL import Image # On charge Python Image Library
import numpy as np # On charge Numpy
from matplotlib import pyplot as plt # On charge pyplot (un sous module de Matplotlib) et on le renomme plt
from scipy.ndimage import morphology # Module de morphologie mathematique de Scipy
from scipy.ndimage import measurements # Module de morphologie mathematique de Scipy
# TRAITEMENT IMAGE
im = Image.open('europe.tif') # PIL permet de lire tous les formats d'images
Nx, Ny = im.size # On reduit la definition de l'image
im = im.resize((Nx/5, Ny/5), Image.ANTIALIAS)
Z = np.array(im).astype(np.float64) # On convertir l'image en array
max_altitude = 1000. # Altitude maximale en metres, cette donnee est un peu douteuse, (a confirmer).
Z = Z / Z.max() * max_altitude # On recale les altitudes
Z = np.where(Z > 0., 1., 0.) # La fonction np.where permet d'appliquer un test booleen a chaque pixel et de specifier la reponse.
# EROSION / DILATATION
structure = np.ones([10,10]) # On definit l'element structurant
Z = morphology.binary_erosion(Z, structure = structure) + 0. # On applique l'erosion
Z = morphology.binary_dilation(Z, structure = structure) + 0. # On applique l'erosion
# COMPTAGE
Zl, nombre = measurements.label(Z) # On compte les zones
# ISOLEMENT D'UNE ZONE
zone = 5 # Label de la zone a isoler
Zli = np.where(Zl == zone, 1, np.nan) # On masque la mer
# AFFICHAGE
fig = plt.figure(0)
plt.clf()
fig.add_subplot(131)
plt.title('Image initiale')
plt.imshow(Z, interpolation = 'nearest')
fig.add_subplot(132)
plt.title('Comptage: {0} zones'.format(nombre))
plt.imshow(np.where(Zl == 0, np.nan, Zl), interpolation = 'nearest')
fig.add_subplot(133)
plt.title('Zone {0} isolee'.format(zone))
plt.imshow(Zli, interpolation = 'nearest')
plt.show()
| gpl-2.0 |
TomAugspurger/pandas | pandas/tests/indexes/datetimes/test_date_range.py | 2 | 36774 | """
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from pandas._libs.tslibs import timezones
from pandas._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import pandas._testing as tm
from pandas.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
assert ts == stamp
@td.skip_if_windows_python_3
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettz("US/Eastern"), freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance, freq="D")
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05", freq="D")
assert timestamp_instance == ts
class TestDateRanges:
def test_date_range_nat(self):
# GH#11587
msg = "Neither `start` nor `end` can be NaT"
with pytest.raises(ValueError, match=msg):
date_range(start="2016-01-01", end=pd.NaT, freq="D")
with pytest.raises(ValueError, match=msg):
date_range(start=pd.NaT, end="2016-01-01", freq="D")
def test_date_range_multiplication_overflow(self):
# GH#24255
# check that overflows in calculating `addend = periods * stride`
# are caught
with tm.assert_produces_warning(None):
# we should _not_ be seeing a overflow RuntimeWarning
dti = date_range(start="1677-09-22", periods=213503, freq="D")
assert dti[0] == Timestamp("1677-09-22")
assert len(dti) == 213503
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("1969-05-04", periods=200000000, freq="30000D")
def test_date_range_unsigned_overflow_handling(self):
# GH#24255
# case where `addend = periods * stride` overflows int64 bounds
# but not uint64 bounds
dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")
dti2 = date_range(start=dti[0], periods=len(dti), freq="D")
assert dti2.equals(dti)
dti3 = date_range(end=dti[-1], periods=len(dti), freq="D")
assert dti3.equals(dti)
def test_date_range_int64_overflow_non_recoverable(self):
# GH#24255
# case with start later than 1970-01-01, overflow int64 but not uint64
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(start="1970-02-01", periods=106752 * 24, freq="H")
# case with end before 1970-01-01, overflow int64 but not uint64
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1969-11-14", periods=106752 * 24, freq="H")
def test_date_range_int64_overflow_stride_endpoint_different_signs(self):
# cases where stride * periods overflow int64 and stride/endpoint
# have different signs
start = Timestamp("2262-02-23")
end = Timestamp("1969-11-14")
expected = date_range(start=start, end=end, freq="-1H")
assert expected[0] == start
assert expected[-1] == end
dti = date_range(end=end, periods=len(expected), freq="-1H")
tm.assert_index_equal(dti, expected)
start2 = Timestamp("1970-02-01")
end2 = Timestamp("1677-10-22")
expected2 = date_range(start=start2, end=end2, freq="-1H")
assert expected2[0] == start2
assert expected2[-1] == end2
dti2 = date_range(start=start2, periods=len(expected2), freq="-1H")
tm.assert_index_equal(dti2, expected2)
def test_date_range_out_of_bounds(self):
# GH#14187
msg = "Cannot generate range"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("2016-01-01", periods=100000, freq="D")
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1763-10-12", periods=100000, freq="D")
def test_date_range_gen_error(self):
rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min")
assert len(rng) == 4
@pytest.mark.parametrize("freq", ["AS", "YS"])
def test_begin_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = pd.DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
freq=freq,
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["A", "Y"])
def test_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = pd.DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["BA", "BY"])
def test_business_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = pd.DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq
)
tm.assert_index_equal(rng, exp)
def test_date_range_negative_freq(self):
# GH 11018
rng = date_range("2011-12-31", freq="-2A", periods=3)
exp = pd.DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2A"
rng = date_range("2011-01-31", freq="-2M", periods=3)
exp = pd.DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2M"
def test_date_range_bms_bug(self):
# #1645
rng = date_range("1/1/2000", periods=10, freq="BMS")
ex_first = Timestamp("2000-01-03")
assert rng[0] == ex_first
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq="2D")
offset = timedelta(2)
values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset)
tm.assert_index_equal(rng, values)
rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B")
the_time = time(8, 15)
for val in rng:
assert val.time() == the_time
def test_date_range_fy5252(self):
dr = date_range(
start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"),
)
assert dr[0] == Timestamp("2013-01-31")
assert dr[1] == Timestamp("2014-01-30")
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start, end, periods=10, freq="s")
def test_date_range_convenience_periods(self):
# GH 20808
result = date_range("2018-04-24", "2018-04-27", periods=3)
expected = DatetimeIndex(
["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"],
freq=None,
)
tm.assert_index_equal(result, expected)
# Test if spacing remains linear if tz changes to dst in range
result = date_range(
"2018-04-01 01:00:00",
"2018-04-01 04:00:00",
tz="Australia/Sydney",
periods=3,
)
expected = DatetimeIndex(
[
Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"),
Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"),
Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"),
]
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start,end,result_tz",
[
["20180101", "20180103", "US/Eastern"],
[datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"],
[Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
"US/Eastern",
],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
None,
],
],
)
def test_date_range_linspacing_tz(self, start, end, result_tz):
# GH 20983
result = date_range(start, end, periods=3, tz=result_tz)
expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_date_range_businesshour(self):
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(["2014-07-04 16:00", "2014-07-07 09:00"], freq="BH")
rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
"2014-07-07 12:00",
"2014-07-07 13:00",
"2014-07-07 14:00",
"2014-07-07 15:00",
"2014-07-07 16:00",
"2014-07-08 09:00",
"2014-07-08 10:00",
"2014-07-08 11:00",
"2014-07-08 12:00",
"2014-07-08 13:00",
"2014-07-08 14:00",
"2014-07-08 15:00",
"2014-07-08 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
def test_range_misspecified(self):
# GH #1095
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(periods=10)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(periods=10, freq="H")
with pytest.raises(ValueError, match=msg):
date_range()
def test_compat_replace(self):
# https://github.com/statsmodels/statsmodels/issues/3349
# replace should take ints/longs for compat
result = date_range(
Timestamp("1960-04-01 00:00:00", freq="QS-JAN"), periods=76, freq="QS-JAN"
)
assert len(result) == 76
def test_catch_infinite_loop(self):
offset = offsets.DateOffset(minute=5)
# blow up, don't loop forever
msg = "Offset <DateOffset: minute=5> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset)
@pytest.mark.parametrize("periods", (1, 2))
def test_wom_len(self, periods):
# https://github.com/pandas-dev/pandas/issues/20517
res = date_range(start="20110101", periods=periods, freq="WOM-1MON")
assert len(res) == periods
def test_construct_over_dst(self):
# GH 20854
pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=True
)
pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=False
)
expect_data = [
Timestamp("2010-11-07 00:00:00", tz="US/Pacific"),
pre_dst,
pst_dst,
]
expected = DatetimeIndex(expect_data, freq="H")
result = date_range(start="2010-11-7", periods=3, freq="H", tz="US/Pacific")
tm.assert_index_equal(result, expected)
def test_construct_with_different_start_end_string_format(self):
# GH 12064
result = date_range(
"2013-01-01 00:00:00+09:00", "2013/01/01 02:00:00+09:00", freq="H"
)
expected = DatetimeIndex(
[
Timestamp("2013-01-01 00:00:00+09:00"),
Timestamp("2013-01-01 01:00:00+09:00"),
Timestamp("2013-01-01 02:00:00+09:00"),
],
freq="H",
)
tm.assert_index_equal(result, expected)
def test_error_with_zero_monthends(self):
msg = r"Offset <0 \* MonthEnds> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range("1/1/2000", "1/1/2001", freq=MonthEnd(0))
def test_range_bug(self):
# GH #770
offset = DateOffset(months=3)
result = date_range("2011-1-1", "2012-1-31", freq=offset)
start = datetime(2011, 1, 1)
expected = DatetimeIndex([start + i * offset for i in range(5)], freq=offset)
tm.assert_index_equal(result, expected)
def test_range_tz_pytz(self):
# see gh-2906
tz = timezone("US/Eastern")
start = tz.localize(datetime(2011, 1, 1))
end = tz.localize(datetime(2011, 1, 3))
dr = date_range(start=start, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize(
"start, end",
[
[
Timestamp(datetime(2014, 3, 6), tz="US/Eastern"),
Timestamp(datetime(2014, 3, 12), tz="US/Eastern"),
],
[
Timestamp(datetime(2013, 11, 1), tz="US/Eastern"),
Timestamp(datetime(2013, 11, 6), tz="US/Eastern"),
],
],
)
def test_range_tz_dst_straddle_pytz(self, start, end):
dr = date_range(start, end, freq="D")
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(start, end, freq="D", tz="US/Eastern")
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(
start.replace(tzinfo=None),
end.replace(tzinfo=None),
freq="D",
tz="US/Eastern",
)
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
def test_range_tz_dateutil(self):
# see gh-2906
# Use maybe_get_tz to fix filename in tz under dateutil.
from pandas._libs.tslibs.timezones import maybe_get_tz
tz = lambda x: maybe_get_tz("dateutil/" + x)
start = datetime(2011, 1, 1, tzinfo=tz("US/Eastern"))
end = datetime(2011, 1, 3, tzinfo=tz("US/Eastern"))
dr = date_range(start=start, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"])
def test_range_closed(self, freq):
begin = datetime(2011, 1, 1)
end = datetime(2014, 1, 1)
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
def test_range_closed_with_tz_aware_start_end(self):
# GH12409, GH12684
begin = Timestamp("2011/1/1", tz="US/Eastern")
end = Timestamp("2014/1/1", tz="US/Eastern")
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
begin = Timestamp("2011/1/1")
end = Timestamp("2014/1/1")
begintz = Timestamp("2011/1/1", tz="US/Eastern")
endtz = Timestamp("2014/1/1", tz="US/Eastern")
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq, tz="US/Eastern")
left = date_range(begin, end, closed="left", freq=freq, tz="US/Eastern")
right = date_range(begin, end, closed="right", freq=freq, tz="US/Eastern")
expected_left = left
expected_right = right
if endtz == closed[-1]:
expected_left = closed[:-1]
if begintz == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
@pytest.mark.parametrize("closed", ["right", "left", None])
def test_range_closed_boundary(self, closed):
# GH#11804
right_boundary = date_range(
"2015-09-12", "2015-12-01", freq="QS-MAR", closed=closed
)
left_boundary = date_range(
"2015-09-01", "2015-09-12", freq="QS-MAR", closed=closed
)
both_boundary = date_range(
"2015-09-01", "2015-12-01", freq="QS-MAR", closed=closed
)
expected_right = expected_left = expected_both = both_boundary
if closed == "right":
expected_left = both_boundary[1:]
if closed == "left":
expected_right = both_boundary[:-1]
if closed is None:
expected_right = both_boundary[1:]
expected_left = both_boundary[:-1]
tm.assert_index_equal(right_boundary, expected_right)
tm.assert_index_equal(left_boundary, expected_left)
tm.assert_index_equal(both_boundary, expected_both)
def test_years_only(self):
# GH 6961
dr = date_range("2014", "2015", freq="M")
assert dr[0] == datetime(2014, 1, 31)
assert dr[-1] == datetime(2014, 12, 31)
def test_freq_divides_end_in_nanos(self):
# GH 10885
result_1 = date_range("2005-01-12 10:00", "2005-01-12 16:00", freq="345min")
result_2 = date_range("2005-01-13 10:00", "2005-01-13 16:00", freq="345min")
expected_1 = DatetimeIndex(
["2005-01-12 10:00:00", "2005-01-12 15:45:00"],
dtype="datetime64[ns]",
freq="345T",
tz=None,
)
expected_2 = DatetimeIndex(
["2005-01-13 10:00:00", "2005-01-13 15:45:00"],
dtype="datetime64[ns]",
freq="345T",
tz=None,
)
tm.assert_index_equal(result_1, expected_1)
tm.assert_index_equal(result_2, expected_2)
def test_cached_range_bug(self):
rng = date_range("2010-09-01 05:00:00", periods=50, freq=DateOffset(hours=6))
assert len(rng) == 50
assert rng[0] == datetime(2010, 9, 1, 5)
def test_timezone_comparaison_bug(self):
# smoke test
start = Timestamp("20130220 10:00", tz="US/Eastern")
result = date_range(start, periods=2, tz="US/Eastern")
assert len(result) == 2
def test_timezone_comparaison_assert(self):
start = Timestamp("20130220 10:00", tz="US/Eastern")
msg = "Inferred time zone not equal to passed time zone"
with pytest.raises(AssertionError, match=msg):
date_range(start, periods=2, tz="Europe/Berlin")
def test_negative_non_tick_frequency_descending_dates(self, tz_aware_fixture):
# GH 23270
tz = tz_aware_fixture
result = pd.date_range(start="2011-06-01", end="2011-01-01", freq="-1MS", tz=tz)
expected = pd.date_range(
end="2011-06-01", start="2011-01-01", freq="1MS", tz=tz
)[::-1]
tm.assert_index_equal(result, expected)
class TestDateRangeTZ:
"""Tests for date_range with timezones"""
def test_hongkong_tz_convert(self):
# GH#1673 smoke test
dr = date_range("2012-01-01", "2012-01-10", freq="D", tz="Hongkong")
# it works!
dr.hour
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_span_dst_transition(self, tzstr):
# GH#1778
# Standard -> Daylight Savings Time
dr = date_range("03/06/2012 00:00", periods=200, freq="W-FRI", tz="US/Eastern")
assert (dr.hour == 0).all()
dr = date_range("2012-11-02", periods=10, tz=tzstr)
result = dr.hour
expected = pd.Index([0] * 10)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_timezone_str_argument(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
result = date_range("1/1/2000", periods=10, tz=tzstr)
expected = date_range("1/1/2000", periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_date_range_with_fixedoffset_noname(self):
from pandas.tests.indexes.datetimes.test_timezones import fixed_off_no_name
off = fixed_off_no_name
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
idx = pd.Index([start, end])
assert off == idx.tz
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_with_tz(self, tzstr):
stamp = Timestamp("3/11/2012 05:00", tz=tzstr)
assert stamp.hour == 5
rng = date_range("3/11/2012 04:00", periods=10, freq="H", tz=tzstr)
assert stamp == rng[1]
class TestGenRangeGeneration:
def test_generate(self):
rng1 = list(generate_range(START, END, offset=BDay()))
rng2 = list(generate_range(START, END, offset="B"))
assert rng1 == rng2
def test_generate_cday(self):
rng1 = list(generate_range(START, END, offset=CDay()))
rng2 = list(generate_range(START, END, offset="C"))
assert rng1 == rng2
def test_1(self):
rng = list(generate_range(start=datetime(2009, 3, 25), periods=2))
expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)]
assert rng == expected
def test_2(self):
rng = list(generate_range(start=datetime(2008, 1, 1), end=datetime(2008, 1, 3)))
expected = [datetime(2008, 1, 1), datetime(2008, 1, 2), datetime(2008, 1, 3)]
assert rng == expected
def test_3(self):
rng = list(generate_range(start=datetime(2008, 1, 5), end=datetime(2008, 1, 6)))
expected = []
assert rng == expected
def test_precision_finer_than_offset(self):
# GH#9907
result1 = pd.date_range(
start="2015-04-15 00:00:03", end="2016-04-22 00:00:00", freq="Q"
)
result2 = pd.date_range(
start="2015-04-15 00:00:03", end="2015-06-22 00:00:04", freq="W"
)
expected1_list = [
"2015-06-30 00:00:03",
"2015-09-30 00:00:03",
"2015-12-31 00:00:03",
"2016-03-31 00:00:03",
]
expected2_list = [
"2015-04-19 00:00:03",
"2015-04-26 00:00:03",
"2015-05-03 00:00:03",
"2015-05-10 00:00:03",
"2015-05-17 00:00:03",
"2015-05-24 00:00:03",
"2015-05-31 00:00:03",
"2015-06-07 00:00:03",
"2015-06-14 00:00:03",
"2015-06-21 00:00:03",
]
expected1 = DatetimeIndex(
expected1_list, dtype="datetime64[ns]", freq="Q-DEC", tz=None
)
expected2 = DatetimeIndex(
expected2_list, dtype="datetime64[ns]", freq="W-SUN", tz=None
)
tm.assert_index_equal(result1, expected1)
tm.assert_index_equal(result2, expected2)
dt1, dt2 = "2017-01-01", "2017-01-01"
tz1, tz2 = "US/Eastern", "Europe/London"
@pytest.mark.parametrize(
"start,end",
[
(pd.Timestamp(dt1, tz=tz1), pd.Timestamp(dt2)),
(pd.Timestamp(dt1), pd.Timestamp(dt2, tz=tz2)),
(pd.Timestamp(dt1, tz=tz1), pd.Timestamp(dt2, tz=tz2)),
(pd.Timestamp(dt1, tz=tz2), pd.Timestamp(dt2, tz=tz1)),
],
)
def test_mismatching_tz_raises_err(self, start, end):
# issue 18488
msg = "Start and end cannot both be tz-aware with different timezones"
with pytest.raises(TypeError, match=msg):
pd.date_range(start, end)
with pytest.raises(TypeError, match=msg):
pd.date_range(start, end, freq=BDay())
class TestBusinessDateRange:
def test_constructor(self):
bdate_range(START, END, freq=BDay())
bdate_range(START, periods=20, freq=BDay())
bdate_range(end=START, periods=20, freq=BDay())
msg = "periods must be a number, got B"
with pytest.raises(TypeError, match=msg):
date_range("2011-1-1", "2012-1-1", "B")
with pytest.raises(TypeError, match=msg):
bdate_range("2011-1-1", "2012-1-1", "B")
msg = "freq must be specified for bdate_range; use date_range instead"
with pytest.raises(TypeError, match=msg):
bdate_range(START, END, periods=10, freq=None)
def test_misc(self):
end = datetime(2009, 5, 13)
dr = bdate_range(end=end, periods=20)
firstDate = end - 19 * BDay()
assert len(dr) == 20
assert dr[0] == firstDate
assert dr[-1] == end
def test_date_parse_failure(self):
badly_formed_date = "2007/100/1"
msg = "could not convert string to Timestamp"
with pytest.raises(ValueError, match=msg):
Timestamp(badly_formed_date)
with pytest.raises(ValueError, match=msg):
bdate_range(start=badly_formed_date, periods=10)
with pytest.raises(ValueError, match=msg):
bdate_range(end=badly_formed_date, periods=10)
with pytest.raises(ValueError, match=msg):
bdate_range(badly_formed_date, badly_formed_date)
def test_daterange_bug_456(self):
# GH #456
rng1 = bdate_range("12/5/2011", "12/5/2011")
rng2 = bdate_range("12/2/2011", "12/5/2011")
assert rng2._data.freq == BDay()
result = rng1.union(rng2)
assert isinstance(result, DatetimeIndex)
@pytest.mark.parametrize("closed", ["left", "right"])
def test_bdays_and_open_boundaries(self, closed):
# GH 6673
start = "2018-07-21" # Saturday
end = "2018-07-29" # Sunday
result = pd.date_range(start, end, freq="B", closed=closed)
bday_start = "2018-07-23" # Monday
bday_end = "2018-07-27" # Friday
expected = pd.date_range(bday_start, bday_end, freq="D")
tm.assert_index_equal(result, expected)
# Note: we do _not_ expect the freqs to match here
def test_bday_near_overflow(self):
# GH#24252 avoid doing unnecessary addition that _would_ overflow
start = pd.Timestamp.max.floor("D").to_pydatetime()
rng = pd.date_range(start, end=None, periods=1, freq="B")
expected = pd.DatetimeIndex([start], freq="B")
tm.assert_index_equal(rng, expected)
def test_bday_overflow_error(self):
# GH#24252 check that we get OutOfBoundsDatetime and not OverflowError
msg = "Out of bounds nanosecond timestamp"
start = pd.Timestamp.max.floor("D").to_pydatetime()
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.date_range(start, periods=2, freq="B")
class TestCustomDateRange:
def test_constructor(self):
bdate_range(START, END, freq=CDay())
bdate_range(START, periods=20, freq=CDay())
bdate_range(end=START, periods=20, freq=CDay())
msg = "periods must be a number, got C"
with pytest.raises(TypeError, match=msg):
date_range("2011-1-1", "2012-1-1", "C")
with pytest.raises(TypeError, match=msg):
bdate_range("2011-1-1", "2012-1-1", "C")
def test_misc(self):
end = datetime(2009, 5, 13)
dr = bdate_range(end=end, periods=20, freq="C")
firstDate = end - 19 * CDay()
assert len(dr) == 20
assert dr[0] == firstDate
assert dr[-1] == end
def test_daterange_bug_456(self):
# GH #456
rng1 = bdate_range("12/5/2011", "12/5/2011", freq="C")
rng2 = bdate_range("12/2/2011", "12/5/2011", freq="C")
assert rng2._data.freq == CDay()
result = rng1.union(rng2)
assert isinstance(result, DatetimeIndex)
def test_cdaterange(self):
result = bdate_range("2013-05-01", periods=3, freq="C")
expected = DatetimeIndex(["2013-05-01", "2013-05-02", "2013-05-03"], freq="C")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_cdaterange_weekmask(self):
result = bdate_range(
"2013-05-01", periods=3, freq="C", weekmask="Sun Mon Tue Wed Thu"
)
expected = DatetimeIndex(
["2013-05-01", "2013-05-02", "2013-05-05"], freq=result.freq
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
# raise with non-custom freq
msg = (
"a custom frequency string is required when holidays or "
"weekmask are passed, got frequency B"
)
with pytest.raises(ValueError, match=msg):
bdate_range("2013-05-01", periods=3, weekmask="Sun Mon Tue Wed Thu")
def test_cdaterange_holidays(self):
result = bdate_range("2013-05-01", periods=3, freq="C", holidays=["2013-05-01"])
expected = DatetimeIndex(
["2013-05-02", "2013-05-03", "2013-05-06"], freq=result.freq
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
# raise with non-custom freq
msg = (
"a custom frequency string is required when holidays or "
"weekmask are passed, got frequency B"
)
with pytest.raises(ValueError, match=msg):
bdate_range("2013-05-01", periods=3, holidays=["2013-05-01"])
def test_cdaterange_weekmask_and_holidays(self):
result = bdate_range(
"2013-05-01",
periods=3,
freq="C",
weekmask="Sun Mon Tue Wed Thu",
holidays=["2013-05-01"],
)
expected = DatetimeIndex(
["2013-05-02", "2013-05-05", "2013-05-06"], freq=result.freq
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
# raise with non-custom freq
msg = (
"a custom frequency string is required when holidays or "
"weekmask are passed, got frequency B"
)
with pytest.raises(ValueError, match=msg):
bdate_range(
"2013-05-01",
periods=3,
weekmask="Sun Mon Tue Wed Thu",
holidays=["2013-05-01"],
)
@pytest.mark.parametrize(
"freq", [freq for freq in prefix_mapping if freq.startswith("C")]
)
def test_all_custom_freq(self, freq):
# should not raise
bdate_range(
START, END, freq=freq, weekmask="Mon Wed Fri", holidays=["2009-03-14"]
)
bad_freq = freq + "FOO"
msg = f"invalid custom frequency string: {bad_freq}"
with pytest.raises(ValueError, match=msg):
bdate_range(START, END, freq=bad_freq)
@pytest.mark.parametrize(
"start_end",
[
("2018-01-01T00:00:01.000Z", "2018-01-03T00:00:01.000Z"),
("2018-01-01T00:00:00.010Z", "2018-01-03T00:00:00.010Z"),
("2001-01-01T00:00:00.010Z", "2001-01-03T00:00:00.010Z"),
],
)
def test_range_with_millisecond_resolution(self, start_end):
# https://github.com/pandas-dev/pandas/issues/24110
start, end = start_end
result = pd.date_range(start=start, end=end, periods=2, closed="left")
expected = DatetimeIndex([start])
tm.assert_index_equal(result, expected)
def test_date_range_with_custom_holidays():
# GH 30593
freq = pd.offsets.CustomBusinessHour(start="15:00", holidays=["2020-11-26"])
result = pd.date_range(start="2020-11-25 15:00", periods=4, freq=freq)
expected = pd.DatetimeIndex(
[
"2020-11-25 15:00:00",
"2020-11-25 16:00:00",
"2020-11-27 15:00:00",
"2020-11-27 16:00:00",
],
freq=freq,
)
tm.assert_index_equal(result, expected)
| bsd-3-clause |
TinyOS-Camp/DDEA-DEV | Archive/[14_09_12] DDEA_example_code/df_data_analysis_ver6.py | 3 | 101804 | # coding: utf-8
"""
======================================================================
Learning and Visualizing the BMS sensor-time-weather data structure
======================================================================
This example employs several unsupervised learning techniques to extract
the energy data structure from variations in Building Automation System (BAS)
and historial weather data.
The fundermental timelet for analysis are 15 min, referred to as Q.
** currently use H (Hour) as a fundermental timelet, need to change later **
The following analysis steps are designed and to be executed.
Data Pre-processing
--------------------------
- Data Retrieval and Standardization
- Outlier Detection
- Interpolation
Data Summarization
--------------------------
- Data Transformation
- Sensor Clustering
Model Discovery Bayesian Network
--------------------------
- Automatic State Classification
- Structure Discovery and Analysis
"""
#print(__doc__)
# Author: Deokwooo Jung [email protected]
##################################################################
# General Moduels
from __future__ import division # To forace float point division
import os
import sys
import numpy as np
from numpy.linalg import inv
from numpy.linalg import norm
import uuid
import pylab as pl
from scipy import signal
from scipy import stats
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from multiprocessing import Pool
#from datetime import datetime
import datetime as dt
from dateutil import tz
import shlex, subprocess
import mytool as mt
import time
import retrieve_weather as rw
import itertools
import calendar
import random
from matplotlib.collections import LineCollection
from stackedBarGraph import StackedBarGrapher
import pprint
import radar_chart
##################################################################
# Machine Learing Modules
from sklearn import cluster, covariance, manifold # Machine Learning Packeage
from sklearn import metrics
from sklearn import mixture
from sklearn.cluster import Ward
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift
##################################################################
# Custom library
from data_tools import *
from data_retrieval import *
from pack_cluster import *
from data_preprocess import *
from shared_constants import *
from pre_bn_state_processing import *
##################################################################
##################################################################
# Processing Configuraiton Settings
##################################################################
# This option let you use data_dict object saved in hard-disk from the recent execution
IS_USING_SAVED_DICT=-1
# File selection method
Using_LoopUp=0
# Analysis period
ANS_START_T=dt.datetime(2013,7,1,0)
ANS_END_T=dt.datetime(2013,9,30,0)
# Interval of timelet, currently set to 1 Hour
#TIMELET_INV=dt.timedelta(hours=1)
TIMELET_INV=dt.timedelta(minutes=15)
# Interactive mode for plotting
plt.ion()
##################################################################
input_files=[]
###############################################################################
# This directly searches files from bin file name
if (Using_LoopUp==0) and (IS_USING_SAVED_DICT==0):
temp4 = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep _ACTIVE_POWER_|grep GW2", shell=True)
#temp = subprocess.check_output("ls "+data_dir+"*.bin |grep '_POWER_\|TK.*VAK'", shell=True)
ha_ = subprocess.check_output("ls "+DATA_DIR+"*.bin |grep '\.HA.._'", shell=True)
ha1_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep '\.HA1_'", shell=True)
ha2_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep '\.HA2_'", shell=True)
power_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep _POWER_", shell=True)
#ventilation
iv_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep IV_", shell=True)
# Solar
aurinko_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep AURINKO_", shell=True)
# weather
saa_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep '\.SAA'", shell=True)
# cooling
jaah_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep JAAH", shell=True)
# ground heat
mlp_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep MLP", shell=True)
# GW1 GEO Thermal
gw1geo_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep GW1.GEO", shell=True)
# GW2 GEO Thermal
gw2geo_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep GW2.GEO", shell=True)
# VAK1 GEO Thermal
vak1geo_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep VAK1.GEO", shell=True)
# VAK2 GEO Thermal
vak2geo_ = subprocess.check_output("ls "+DATA_DIR+"*"+FL_EXT+" |grep VAK2.GEO", shell=True)
temp=power_+iv_+aurinko_+mlp_+gw1geo_ +gw2geo_+vak1geo_+vak2geo_+ha1_+ha2_
#temp=temp4
input_files =shlex.split(temp)
# Get rid of duplicated files
input_files=list(set(input_files))
print 'The total number of sensors selected for analysis is ', len(input_files),'......'
###############################################################################
# This look-ups id description tables and find relavant bin files.
elif (Using_LoopUp==1) and (IS_USING_SAVED_DICT==0):
id_dict=get_id_dict('grep kW')
for id_name in id_dict.keys():
binfile_name=id_name+'.bin'
input_files.append(binfile_name)
else:
print 'Search data_dict.bin....'
###############################################################################
# Analysis script starts here ....
###############################################################################
if IS_USING_SAVED_DICT==0:
start__dictproc_t=time.time()
# IS_USING_PARALLEL_OPT
data_dict=construct_data_dict(input_files,ANS_START_T,ANS_END_T,TIMELET_INV,binfilename='data_dict', IS_USING_PARALLEL=IS_USING_PARALLEL_OPT)
end__dictproc_t=time.time()
print 'the time of construct data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
elif IS_USING_SAVED_DICT==1:
print 'Loading data dictionary......'
start__dictproc_t=time.time()
data_dict = mt.loadObjectBinaryFast('data_dict.bin')
end__dictproc_t=time.time()
print 'the time of loading data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
else:
print 'Skip data dict'
if IS_USING_SAVED_DICT>0:
# Copy related variables
time_slots=data_dict['time_slots'][:]
Conditions_dict=data_dict['Conditions_dict'].copy()
Events_dict=data_dict['Events_dict'].copy()
sensor_list=data_dict['sensor_list'][:]
weather_list=data_dict['weather_list'][:]
weather_list_used = [data_dict['weather_list'][i] for i in [1,2,3,10,11]]
# data_used is the list of refernece name for all measurements from now on.
data_used=sensor_list+weather_list_used
# This is a global ID for data_used measurement
data_used_idx=range(len(data_used))
sensor_idx=range(len(sensor_list))
weather_idx=range(len(sensor_list),len(data_used))
# [data_used[i] for i in sensor_idx]
# [data_used[i] for i in weather_idx]
# Verify there is no [] or N/A in the list
CHECK_DATA_FORMAT=0
if CHECK_DATA_FORMAT==1:
list_of_wrong_data_format=verify_data_format(data_used,data_dict,time_slots)
if len(list_of_wrong_data_format)>0:
print 'Measurement list below'
print '----------------------------------------'
print list_of_wrong_data_format
raise NameError('Errors in data format')
EVENT_RETRIEVAL=0
if EVENT_RETRIEVAL==1:
# sensor_list --> float or int --> clustering for float and int --> exemplar
# exemplar of floats --> states , int is states,
# weather_list --> float or int
####################################
# Regular Event Extraction
####################################
# Build feature matrix wiht data interpolation for both sensor and weather data
X_Feature,X_Time,X_names\
,X_zero_var_list, X_zero_var_val\
,X_int_type_list,X_int_type_idx\
,X_float_type_list,X_float_type_idx\
,X_weather_type_idx,X_sensor_type_idx\
=build_feature_matrix(data_dict,sensor_list,weather_list_used\
,time_slots,DO_INTERPOLATE=1\
,max_num_succ_idx_for_itpl=int(len(time_slots)*0.05))
if len(X_names+X_zero_var_list)!=len(data_used):
raise NameError('Missing name is found in X_names or X_zero_var_list')
else:
zero_var_idx=[data_used.index(name_str) for name_str in X_zero_var_list]
nzero_var_idx=list(set(data_used_idx)-set(zero_var_idx))
# From below all index are reference to X_Feature
sf_idx=list(set(X_sensor_type_idx)&set(X_float_type_idx));
# Equivalent to np.array(data_used)[np.array(nzero_var_idx)[sf_idx]]
sf_name=list(np.array(X_names)[sf_idx])
si_idx=list(set(X_sensor_type_idx)&set(X_int_type_idx));
si_name=list(np.array(X_names)[si_idx])
wf_idx=list(set(X_weather_type_idx)&set(X_float_type_idx));
wf_name=list(np.array(X_names)[wf_idx])
wi_idx=list(set(X_weather_type_idx)&set(X_int_type_idx));
wi_name=list(np.array(X_names)[wi_idx])
#Euclidian Distance Matrix of Floating type of data only wf+o
float_idx=list(set(sf_idx)| set(wf_idx))
int_idx=list(set(si_idx)| set(wi_idx))
# Float Type Measurement Clustering
X_Feature_sfe,sf_exemplars_dict,exemplars_,labels_\
=cluster_measurement_points(X_Feature[:,sf_idx],sf_name,corr_bnd=[0.1,0.9],alg='pack')
sfe_idx=list(np.array(sf_idx)[exemplars_])
# InT Type Measurement Clustering
X_Feature_sie,si_exemplars_dict,exemplars_,labels_\
=cluster_measurement_points(X_Feature[:,si_idx],si_name,corr_bnd=[0.0,0.9],alg='pack')
sie_idx=list(np.array(si_idx)[exemplars_])
sfe_state,sfe_corr_val=X_INPUT_to_states(X_Feature_sfe,CORR_VAL_OUT=1) # sensor -float type
sie_state=X_Feature_sie # sensor -integer type
wf_state,wf_corr_val=X_INPUT_to_states(X_Feature[:,wf_idx],CORR_VAL_OUT=1) # weather -float type
wi_state=X_Feature[:,wi_idx] # weather -integer type
empty_states=np.array([[] for i in range(len(X_Time))])
if len(sfe_state)==0: sfe_state=empty_states
if len(sie_state)==0: sie_state=empty_states
if len(wf_state)==0: wf_state=empty_states
if len(wi_state)==0: wi_state=empty_states
# Exemplar sensor only
X_Sensor_STATE=np.append(sfe_state,sie_state, axis=1)
X_Sensor_STATE=X_Sensor_STATE.astype(int)
X_Sensor_NAMES=list(np.array(X_names)[sfe_idx])+list(np.array(X_names)[sie_idx])
X_Weather_STATE=np.append(wf_state,wi_state, axis=1)
X_Weather_STATE=X_Weather_STATE.astype(int)
X_Weather_NAMES=list(np.array(X_names)[wf_idx])+list(np.array(X_names)[wi_idx])
# months of a year,days of a week, and hours of a day
# (Monday, Tuesday,Wendsday,Thursday,Saturday,Sunday) =(0,1,2,3,4,5,6)
X_Time_STATE_temp=build_time_states(X_Time)
X_Time_NAMES_temp=['MTH','WD','HR']
X_Time_STATE=[]
X_Time_NAMES=[]
for xt_col,xt_name in zip(X_Time_STATE_temp.T,X_Time_NAMES_temp):
if len(set(xt_col))>1:
X_Time_STATE.append(xt_col)
X_Time_NAMES.append(xt_name)
X_Time_STATE=np.array(X_Time_STATE).T
DO_PLOTTING=0
if DO_PLOTTING==1:
sensor_name_temp=['VAK1.HA1_SM_EP_KM','VAK1.HA1_SM_KAM','GW1.HA1_TE16_AH2_M']
plot_compare_sensors(sensor_name_temp,X_Time,X_Feature,X_names)
plot_compare_states(sensor_name_temp[0],data_dict,X_Time,X_Feature,X_names)
#################################################
# FORMATTED DATA FOR REGUALR EVENT
#################################################
#DO_PROB_EST=1 ** Save this variables***
#avgdata_mat = np.hstack([X_Sensor_STATE,X_Weather_STATE,X_Time_STATE])
#avgdata_names = X_Sensor_NAMES+X_Weather_NAMES+X_Time_NAMES
avgdata_exemplar=dict(sf_exemplars_dict.items()+si_exemplars_dict.items())
avgdata_zvar=X_zero_var_list
avgdata_dict={}
#avgdata_dict.update({'avgdata_mat':avgdata_mat})
avgdata_dict.update({'avgdata_state_mat':X_Sensor_STATE})
avgdata_dict.update({'avgdata_weather_mat':X_Weather_STATE})
avgdata_dict.update({'avgdata_time_mat':X_Time_STATE})
avgdata_dict.update({'avg_time_slot':X_Time})
#avgdata_dict.update({'avgdata_names':avgdata_names})
avgdata_dict.update({'avgdata_exemplar':avgdata_exemplar})
avgdata_dict.update({'avgdata_zvar':avgdata_zvar})
avgdata_dict.update({'sensor_names':X_Sensor_NAMES})
avgdata_dict.update({'weather_names':X_Weather_NAMES})
avgdata_dict.update({'time_names':X_Time_NAMES})
mt.saveObjectBinary(avgdata_dict,'avgdata_dict.bin')
####################################
# Irregular Event Extraction
####################################
# Interpolatoin with outlier removal, Here we exclude weather data from irregualr event analysis
# since weather data noramlly show slow changes in time.so we dont expect in any meaningful diffs values
measurement_point_set,num_type_set\
=interpolation_measurement(data_dict,sensor_list,err_rate=1,sgm_bnd=20)
# Irregualr matrix
Xdiff_Mat,Xdiff_Time,Xdiff_Names\
,Xdiff_zero_var_list, Xdiff_zero_var_val\
,Xdiff_int_type_list,Xdiff_int_type_idx\
,Xdiff_float_type_list,Xdiff_float_type_idx\
=build_diff_matrix(measurement_point_set,time_slots,num_type_set,sensor_list,PARALLEL=IS_USING_PARALLEL_OPT)
#==============================================================================
# This code is to fix the dimension difference in diff sensor and weather
# WARNING: this is just a quick fix. A more elegant solution should be implemented
#==============================================================================
time_slots_array = np.sort(np.array(list(set(Xdiff_Time) & set(X_Time))))
# Extract subset of X_Weather_STATE
removed_idx_list = []
for ridx,slot in enumerate(X_Time):
slot_idx = np.where(time_slots_array==slot)[0]
if len(slot_idx) == 0: # slot not in common time slots
removed_idx_list.append(ridx)
XDIFF_Weather_STATE = np.delete(X_Weather_STATE, removed_idx_list,axis=0)
# Extract subset of Xdiff_Mat
removed_idx_list = []
for ridx,slot in enumerate(Xdiff_Time):
slot_idx = np.where(time_slots_array==slot)[0]
if len(slot_idx) == 0: # slot not in common time slots
removed_idx_list.append(ridx)
Xdiff_Mat = np.delete(Xdiff_Mat,removed_idx_list,axis=0)
# Update Xdiff_Time
Xdiff_Time = time_slots_array
XDIFF_Weather_STATE = np.array(XDIFF_Weather_STATE)
#==============================================================================
# End of fix
#==============================================================================
# From below all index are reference to X_Feature
xdiff_sf_idx=Xdiff_float_type_idx;
xdiff_sf_name=Xdiff_float_type_list;
xdiff_si_idx=Xdiff_int_type_idx;
xdiff_si_name=Xdiff_int_type_list
# Float Type Measurement Clustering
X_Diff_sfe,sf_diff_exemplars_dict,exemplars_,labels_\
=cluster_measurement_points(Xdiff_Mat[:,xdiff_sf_idx],xdiff_sf_name,corr_bnd=[0.1,0.9])
xdiff_sfe_idx=list(np.array(xdiff_sf_idx)[exemplars_])
# InT Type Measurement Clustering
X_Diff_sie,si_diff_exemplars_dict,exemplars_,labels_\
=cluster_measurement_points(Xdiff_Mat[:,xdiff_si_idx],xdiff_si_name,corr_bnd=[0.1,0.9])
xdiff_sie_idx=list(np.array(xdiff_si_idx)[exemplars_])
xdiff_sfe_state,xdiff_sfe_corr_val\
=X_INPUT_to_states(X_Diff_sfe,CORR_VAL_OUT=1,PARALLEL =IS_USING_PARALLEL_OPT) # sensor -float type
xdiff_sie_state=X_Diff_sie # sensor -integer type
empty_states=np.array([[] for i in range(len(Xdiff_Time))])
if len(xdiff_sfe_state)==0: xdiff_sfe_state=empty_states
if len(xdiff_sie_state)==0: xdiff_sie_state=empty_states
if len(wf_state)==0: wf_state=empty_states
if len(wi_state)==0: wi_state=empty_states
# Exemplar sensor only
XDIFF_Sensor_STATE=np.append(xdiff_sfe_state,xdiff_sie_state, axis=1)
XDIFF_Sensor_STATE=XDIFF_Sensor_STATE.astype(int)
XDIFF_Sensor_NAMES=list(np.array(Xdiff_Names)[xdiff_sfe_idx])+list(np.array(Xdiff_Names)[xdiff_sie_idx])
# months of a year,days of a week, and hours of a day
# (Monday, Tuesday,Wendsday,Thursday,Saturday,Sunday) =(0,1,2,3,4,5,6)
XDIFF_Time_STATE_temp=build_time_states(Xdiff_Time)
XDIFF_Time_NAMES_temp=['MTH','WD','HR']
XDIFF_Time_STATE=[]
XDIFF_Time_NAMES=[]
for xt_col,xt_name in zip(XDIFF_Time_STATE_temp.T,XDIFF_Time_NAMES_temp):
if len(set(xt_col))>1:
XDIFF_Time_STATE.append(xt_col)
XDIFF_Time_NAMES.append(xt_name)
XDIFF_Time_STATE=np.array(XDIFF_Time_STATE).T
#################################################
# FORMATTED DATA FOR IRREGUALR EVENT
#################################################
#** Save this variables***
#diffdata_mat = np.hstack([XDIFF_Sensor_STATE,X_Weather_STATE,XDIFF_Time_STATE])
#diffdata_names = XDIFF_Sensor_NAMES+X_Weather_NAMES+XDIFF_Time_NAMES
diffdata_exemplar=dict(sf_diff_exemplars_dict.items()+si_diff_exemplars_dict.items())
diffdata_zvar=Xdiff_zero_var_list
diffdata_dict={}
#diffdata_dict.update({'diffdata_mat':diffdata_mat})
diffdata_dict.update({'diffdata_state_mat':XDIFF_Sensor_STATE})
#diffdata_dict.update({'diffdata_weather_mat':X_Weather_STATE})
diffdata_dict.update({'diffdata_weather_mat':XDIFF_Weather_STATE})
diffdata_dict.update({'diffdata_time_mat':XDIFF_Time_STATE})
diffdata_dict.update({'diff_time_slot':Xdiff_Time})
#diffdata_dict.update({'diffdata_names':diffdata_names})
diffdata_dict.update({'diffdata_exemplar':diffdata_exemplar})
diffdata_dict.update({'diffdata_zvar':diffdata_zvar})
diffdata_dict.update({'sensor_names':XDIFF_Sensor_NAMES})
diffdata_dict.update({'weather_names':X_Weather_NAMES})
diffdata_dict.update({'time_names':X_Time_NAMES})
mt.saveObjectBinary(diffdata_dict,'diffdata_dict.bin')
EVENT_ANALYSIS=0
if EVENT_ANALYSIS==1:
# 0-nb distance analysis
####################################################
# Probabiity Computatoin
#---------------------------------------------------
# - Ranking output.
# - Effect Prob Analysis
# - Causal Prob Analysis
####################################################
diffdata_dict = mt.loadObjectBinary('diffdata_dict.bin')
avgdata_dict = mt.loadObjectBinary('avgdata_dict.bin')
# Irregualr Events
diffdata_state_mat=diffdata_dict['diffdata_state_mat']
diffdata_weather_mat=diffdata_dict['diffdata_weather_mat']
diffdata_time_mat=diffdata_dict['diffdata_time_mat']
diff_time_slot=diffdata_dict['diff_time_slot']
diffdata_exemplar=diffdata_dict['diffdata_exemplar']
diffdata_zvar=diffdata_dict['diffdata_zvar']
diffsensor_names=diffdata_dict['sensor_names']
diffweather_names=diffdata_dict['weather_names']
difftime_names=diffdata_dict['time_names']
# Regualr Events
avgdata_state_mat=avgdata_dict['avgdata_state_mat']
avgdata_weather_mat=avgdata_dict['avgdata_weather_mat']
avgdata_time_mat=avgdata_dict['avgdata_time_mat']
avg_time_slot=avgdata_dict['avg_time_slot']
avgdata_exemplar=avgdata_dict['avgdata_exemplar']
avgdata_zvar=avgdata_dict['avgdata_zvar']
avgsensor_names=avgdata_dict['sensor_names']
avgweather_names=avgdata_dict['weather_names']
avgtime_names=avgdata_dict['time_names']
###############################################################################################
# Regualr Event Analysis
#avgdata_state_mat,avgdata_weather_mat, avgdata_time_mat, avg_time_slot
#avgdata_exemplar, avgdata_zvar, avgsensor_names, avgweather_names, avgtime_names
###############################################################################################
#****** Complete Analysis Script***** #
######################################################################
#1. effect prob - time dependecy analysis
######################################################################
# Temporary for correcting month change
######################################################################
# Use this for special cases
######################################################################
"""
monthly_variability,monthly_structure_score\
=time_effect_analysis_all(data_mat,data_name,avgtime_names,avgsensor_names)
start_t=time.time()
s_name=avgsensor_names[0]
state_list,s_prob_log,time_effect_mat_dist,score_in_structure,valid_mon_list,state_list=\
time_effect_analysis(data_mat,data_name,avgtime_names,avgsensor_names[0],DO_PLOT=True)
end_t=time.time()
print 'Total-- ',end_t-start_t, 'secs'
plot_time_effect(s_name,state_list,valid_mon_list,s_prob_log)
wf_tuple=wf_tuple_t
plot_weather_sensitivity(wf_tuple[0],wf_tuple[1],wf_tuple[2],wf_tuple[3],wf_tuple[4],\
avgsensor_names,Conditions_dict,Events_dict,sort_opt='desc',num_of_picks=9)
"""
Conditions_dict=data_dict['Conditions_dict'].copy()
Events_dict=data_dict['Events_dict'].copy()
data_state_mat=avgdata_state_mat
data_time_mat=avgdata_time_mat
data_weather_mat=avgdata_weather_mat
sensor_names=avgsensor_names
time_names=avgtime_names
weather_names=avgweather_names
bldg_tag='VAK_' # building tag
trf_tag='avg_' # transformation tag
dst_t='h'
vak_avg_wtf_tuple,vak_avg_weather_dict=wt_sensitivity_analysis(data_state_mat,data_time_mat,data_weather_mat,sensor_names,time_names,\
Conditions_dict,Events_dict,bldg_tag,trf_tag,weather_names,dst_t='h')
Conditions_dict=data_dict['Conditions_dict'].copy()
Events_dict=data_dict['Events_dict'].copy()
data_state_mat=diffdata_state_mat
data_time_mat=diffdata_time_mat
data_weather_mat=diffdata_weather_mat
sensor_names=diffsensor_names
time_names=difftime_names
weather_names=diffweather_names
bldg_tag='VAK_' # building tag
trf_tag='diff_' # transformation tag
dst_t='h'
vak_diff_wtf_tuple,vak_diff_weather_dict=wt_sensitivity_analysis(data_state_mat,data_time_mat,data_weather_mat,sensor_names,time_names,\
Conditions_dict,Events_dict,bldg_tag,trf_tag,weather_names,dst_t='h')
###############################################################################################
# Irregualr Event Analysis
#avgdata_state_mat,avgdata_weather_mat, avgdata_time_mat, avg_time_slot
#avgdata_exemplar, avgdata_zvar, avgsensor_names, avgweather_names, avgtime_names
###############################################################################################
#########################################################################
# Computes the maximum screwness of distribution of sensors
# max_{i,j} abs(p_i-p_j)/p_i*p_j such that p_i, p_j ~=0
#########################################################################
#plot(irr_state_mat[:,skewness_metric_sort_idx[12]],'-s')
num_of_picks=10
rare_event_sensors=list(np.array(diffsensor_names)[skewness_metric_sort_idx[0:num_of_picks]])
rare_event_sensors_scores=list(skewness_metric_sort[0:num_of_picks])
pprint.pprint(np.array([rare_event_sensors, rare_event_sensors_scores]).T)
data_mat = np.hstack([diffdata_state_mat,diffdata_time_mat])
# Temporary for correcting month change
#data_mat[:,-3]=data_mat[:,-3]-1
data_name = diffsensor_names+difftime_names
dst_t='h'
mth_prob_map,mth_state_map, mth_sensitivity,mth_list\
= param_sensitivity(data_mat,data_name,diffsensor_names,'MTH',dst_type=dst_t)
wday_prob_map,wday_state_map,wday_sensitivity,wday_list\
= param_sensitivity(data_mat,data_name,diffsensor_names,'WD',dst_type=dst_t)
dhr_prob_map,dhr_state_map,dhr_sensitivity,dhr_list\
= param_sensitivity(data_mat,data_name,diffsensor_names,'HR',dst_type=dst_t)
tf_tuple_mth=('MTH',mth_prob_map,mth_state_map,mth_sensitivity,mth_list)
tf_tuple_wday=('WD',wday_prob_map,wday_state_map,wday_sensitivity,wday_list)
tf_tuple_dhr=('HR',dhr_prob_map,dhr_state_map,dhr_sensitivity,dhr_list)
#tf_tuple=tf_tuple_mth
##########################################################################################
# Genarelize this plotting
#plot_xxx_sensitivity(tf_tuple[0],tf_tuple[1],tf_tuple[2],tf_tuple[3],tf_tuple[4],\
# avgsensor_names,Conditions_dict,Events_dict,sort_opt='desc',num_of_picks=9)
##########################################################################################
tf_sstv_tuple=np.array([tf_tuple_mth[3],tf_tuple_wday[3],tf_tuple_dhr[3]])
max_tf_sstv=tf_sstv_tuple[tf_sstv_tuple<np.inf].max()*2
tf_sstv_tuple[tf_sstv_tuple==np.inf]=max_tf_sstv
tf_sstv_total=np.sum(tf_sstv_tuple,0)
arg_idx_s=argsort(tf_sstv_total)[::-1]
arg_idx_is=argsort(tf_sstv_total)
num_of_picks=9
print 'Most time sensitive sensors'
print '---------------------------------------------'
Time_Sensitive_Sensors=list(np.array(diffsensor_names)[arg_idx_s[0:num_of_picks]])
pprint.pprint(Time_Sensitive_Sensors)
print 'Least time sensitive sensors'
print '---------------------------------------------'
Time_Insensitive_Sensors=list(np.array(diffsensor_names)[arg_idx_is[0:num_of_picks]])
pprint.pprint(Time_Insensitive_Sensors)
####################################################################
## Rador Plotting for Weather_Sensitive_Sensors
####################################################################
sensor_no = len(diffsensor_names)
# convert 'inf' to 1
sen_mth = [max_tf_sstv if val == float("inf") else val for val in tf_tuple_mth[3]]
sen_wday = [max_tf_sstv if val == float("inf") else val for val in tf_tuple_wday[3]]
sen_dhr = [max_tf_sstv if val == float("inf") else val for val in tf_tuple_dhr[3]]
SEN = [[sen_mth[i], sen_wday[i], sen_dhr[i]] for i in range(sensor_no)]
TOTAL_SEN = np.array([sum(SEN[i]) for i in range(sensor_no)])
idx = np.argsort(TOTAL_SEN)[-6:] # Best 6 sensors
spoke_labels = ["Month", "Day", "Hour"]
data = [SEN[i] for i in idx]
sensor_labels = [diffsensor_names[i] for i in idx]
#import radar_chart
radar_chart.plot(data, spoke_labels, sensor_labels, saveto="time_radar.png")
import pdb;pdb.set_trace()
"""
diffdata_state_mat=diffdata_dict['diffdata_state_mat']
diffdata_weather_mat=diffdata_dict['diffdata_weather_mat']
diffdata_time_mat=diffdata_dict['diffdata_time_mat']
diff_time_slot=diffdata_dict['diff_time_slot']
diffdata_exemplar=diffdata_dict['diffdata_exemplar']
diffdata_zvar=diffdata_dict['diffdata_zvar']
diffsensor_names=diffdata_dict['sensor_names']
diffweather_names=diffdata_dict['weather_names']
difftime_names=diffdata_dict['time_names']
"""
do_sampling_interval_plot=1
if do_sampling_interval_plot==1:
num_of_picks=5
fig=figure('sampling interval')
for k in range(num_of_picks):
ax=subplot(num_of_picks,1,k)
m_idx=skewness_metric_sort_idx[k]
sensor_name_=diffdata_names[m_idx]
t_=unix_to_dtime(data_dict[sensor_name_][2][0])
plot(t_[1:],abs(diff(data_dict[sensor_name_][2][0])))
plt.title(sensor_name_,fontsize=14,y=0.8)
ylabel('Sampling Intervals')
fig.savefig(fig_dir+'sampling_intervals.png')
do_rare_event_compare_plot=1
if do_rare_event_compare_plot==1:
num_of_picks=3
for k in range(num_of_picks):
fig=figure('irregualr event compare'+str(k))
m_idx=skewness_metric_sort_idx[k]
sensor_name_=diffdata_names[m_idx]
irr_idx=irr_data_name.index(sensor_name_)
t_=unix_to_dtime(data_dict[sensor_name_][2][0])
val_=data_dict[sensor_name_][2][1]
subplot(4,1,1)
plt.title(sensor_name_+' samples',fontsize=14,y=0.8)
plot(t_,val_)
subplot(4,1,2)
plt.title(sensor_name_+' differential',fontsize=14,y=0.8)
plot(t_[1:],abs(diff(val_)))
subplot(4,1,3)
plot(measurement_point_set[irr_idx][0],measurement_point_set[irr_idx][1])
subplot(4,1,4)
plt.title(sensor_name_+' irregular states',fontsize=14,y=0.8)
plot(diff_time_slot,irr_state_mat[:,m_idx])
plt.get_current_fig_manager().window.showMaximized()
fig.savefig(fig_dir+'irr_event_compare'+str(k)+'.png')
BLDG_ANALYSIS=1
if BLDG_ANALYSIS==1:
#########################################################################
# Case by Case Analysis.
#########################################################################
##############################
# VTT VTT_POWER data
##############################
VTT_LOAD=0
if VTT_LOAD==1:
print 'VTT_POWER data loading ...'
# VTT_POWER data loading ...
avgdata_dict = mt.loadObjectBinaryFast('./VTT_POWER/avgdata_dict.bin')
avgdata_dict=obj(avgdata_dict)
gw2_power=mt.loadObjectBinaryFast('./VTT_POWER/GW2.CG_PHASE1_ACTIVE_POWER_M.bin')
X_Feature=mt.loadObjectBinaryFast('./VTT_POWER/X_Feature.bin')
X_names=mt.loadObjectBinaryFast('./VTT_POWER/X_names.bin')
X_Time=mt.loadObjectBinaryFast('./VTT_POWER/X_Time.bin')
Xdiff_Mat=mt.loadObjectBinaryFast('./VTT_POWER/Xdiff_Mat.bin')
Xdiff_Names=mt.loadObjectBinaryFast('./VTT_POWER/Xdiff_Names.bin')
Xdiff_Time=mt.loadObjectBinaryFast('./VTT_POWER/Xdiff_Time.bin')
#########################################################################
### Load all builings bin files.
#########################################################################
bldg_dict={}
RUN_VTT_BLDG=0
if RUN_VTT_BLDG==1:
sig_tag_set=['avg','diff']
bldg_tag_set=['GW1_','GW2_','VAK1_','VAK2_']
dict_dir_set=['./GW1_results/','./GW2_results/','./VAK1_results/','./VAK2_results/']
pname_key='POWER'
for dict_dir,bldg_tag in zip(dict_dir_set,bldg_tag_set):
bldg_dict.update({bldg_tag:create_bldg_obj(dict_dir,bldg_tag,pname_key)})
bldg_=obj(bldg_dict)
else:
bldg_dict={'GW1_':mt.loadObjectBinaryFast('GW1_.bin'),'GW2_':mt.loadObjectBinaryFast('GW2_.bin')\
,'VAK1_':mt.loadObjectBinaryFast('VAK1_.bin'),'VAK2_':mt.loadObjectBinaryFast('VAK2_.bin')}
bldg_=obj(bldg_dict)
RUN_GSBC_BLDG=1
if RUN_GSBC_BLDG==1:
#gsbc_dict_dir_set=['./GSBC/allsensors/','./GSBC/seleceted/']
#gsbc_dict_dir_set=['./GSBC/allsensors/']
gsbc_dict_dir_set=['./GSBC/selected/']
bldg_tag_set=['GSBC_']
print 'Building for ',bldg_tag_set, '....'
gsbc_hcw_pname_key='3003....' # Hot and Cold water
gsbc_main_1_pname_key='300401..' # Maing Buiding F1
gsbc_main_2_pname_key='300402..' # Maing Buiding F2
gsbc_hvac_pname_key='3006....' # HVAC
for dict_dir,bldg_tag in zip(gsbc_dict_dir_set,bldg_tag_set):
bldg_dict.update({bldg_tag:create_bldg_obj(dict_dir,bldg_tag,gsbc_hcw_pname_key)})
bldg_=obj(bldg_dict)
import pdb;pdb.set_trace()
PLOTTING_LH=0
if PLOTTING_LH==1:
plotting_bldg_lh(bldg_,attr_class='sensor',num_picks=30)
plotting_bldg_lh(bldgbldg_obj_,attr_class='time',num_picks=30)
plotting_bldg_lh(bldg_,attr_class='weather',num_picks=30)
###################################################################################################################
###################################################################################################################
###################################################################################################################
###################################################################################################################
###################################################################################################################
###################################################################################################################
import lib_bnlearn as rbn
###########################
# BLDG = GW2_ Analysis
###########################
#plotting_bldg_lh(bldg_,bldg_key='GW2_',attr_class='sensor',num_picks=30)
#import pdb;pdb.set_trace()
"""
###############################################################
# 1. Regualr Events
###############################################################
def bn_anaylsis(bldg_obj,p_name,attr='sensor',sig_tag='avg',num_picks_bn=15,learning_alg='hc'):
cmd_str_='s_names=bldg_obj.'+sig_tag+'.sensor_names'
exec(cmd_str_)
p_idx=s_names.index(p_name)
cmd_str_='data_state_mat=bldg_obj.'+sig_tag+'.data_state_mat'
exec(cmd_str_)
if not (attr=='all') :
cmd_str_='optprob_set=bldg_obj.analysis.'+sig_tag+'.__dict__[p_name].'+attr+'.optprob_set'
exec(cmd_str_)
cmd_str_='optstate_set=bldg_obj.analysis.'+sig_tag+'.__dict__[p_name].'+attr+'.optstate_set'
sort_idx=np.argsort(optprob_set)[::-1]
if (attr=='sensor') :
print 'power - sensors...'
cmd_str_='s_names=bldg_obj.'+sig_tag+'.sensor_names'
exec(cmd_str_)
idx_select=[p_idx]+ list(sort_idx[:num_picks_bn])
cmd_str_='bndata_mat=bldg_obj.'+sig_tag+'.data_state_mat[:,idx_select]'
exec(cmd_str_)
cols=[s_names[k] for k in idx_select]
elif (attr=='weather'):
print 'power - weather...'
cmd_str_='w_names=bldg_obj.'+sig_tag+'.weather_names'
exec(cmd_str_)
cmd_str_='bndata_mat=np.vstack((bldg_obj.'+sig_tag+\
'.data_state_mat[:,p_idx].T,bldg_obj.'+sig_tag+'.data_weather_mat_.T)).T'
exec(cmd_str_)
cols=[p_name]+[w_name for w_name in w_names]
elif (attr=='time'):
print 'power - time...'
cmd_str_='t_names=bldg_obj.'+sig_tag+'.time_names'
exec(cmd_str_)
cmd_str_='bndata_mat=np.vstack((bldg_obj.'+sig_tag+\
'.data_state_mat[:,p_idx].T,bldg_obj.'+sig_tag+'.data_time_mat.T)).T'
exec(cmd_str_)
cols=[p_name]+[t_name for t_name in t_names]
elif (attr=='all'):
print 'power - sensors + weather + time ...'
s_cause_label,s_labels,s_hc=\
bn_anaylsis(bldg_obj,p_name,attr='sensor',sig_tag=sig_tag,num_picks_bn=num_picks_bn,learning_alg=learning_alg)
t_cause_label,t_labels,t_hc=\
bn_anaylsis(bldg_obj,p_name,attr='time',sig_tag=sig_tag,num_picks_bn=num_picks_bn,learning_alg=learning_alg)
w_cause_label,w_labels,w_hc=\
bn_anaylsis(bldg_obj,p_name,attr='weather',sig_tag=sig_tag,num_picks_bn=num_picks_bn,learning_alg=learning_alg)
#s_cause_label=s_labels; w_cause_label=w_labels;t_cause_label=t_labels
cmd_str_='s_cause_idx=[bldg_obj.'+sig_tag+'.sensor_names.index(name_) for name_ in s_cause_label]'
exec(cmd_str_)
cmd_str_='t_cause_idx=[bldg_obj.'+sig_tag+'.time_names.index(name_) for name_ in t_cause_label]'
exec(cmd_str_)
cmd_str_='w_cause_idx=[bldg_obj.'+sig_tag+'.weather_names.index(name_) for name_ in w_cause_label]'
exec(cmd_str_)
cmd_str_='bndata_mat=np.vstack((bldg_obj.'+sig_tag+'.data_state_mat[:,p_idx].T,\
bldg_obj.'+sig_tag+'.data_state_mat[:,s_cause_idx].T, \
bldg_obj.'+sig_tag+'.data_weather_mat_[:,w_cause_idx].T, \
bldg_obj.'+sig_tag+'.data_time_mat[:,t_cause_idx].T)).T'
exec(cmd_str_)
cmd_str_='cols=[name_ for name_ in [p_name]+s_cause_label+w_cause_label+t_cause_label]'
exec(cmd_str_)
else:
print 'error'
return 0
if (attr=='all'):
b_arc_list = pair_in_idx([p_name],s_cause_label+ w_cause_label+t_cause_label)+\
pair_in_idx(s_cause_label,w_cause_label+t_cause_label)+\
pair_in_idx(w_cause_label,t_cause_label)+\
pair_in_idx(t_cause_label,t_cause_label)
#import pdb;pdb.set_trace()
else:
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
if learning_alg=='tabu':
hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
elif learning_alg=='mmhc':
hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
else:
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
cause_label=list(np.array(cols)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols.index(label_) for label_ in cause_label]
return cause_label,cols,hc_b
"""
bldg_obj=bldg_.GW2_
p_name=bldg_obj.analysis.avg.__dict__.keys()[0]
s_cause_label,s_labels,s_hc=bn_anaylsis(bldg_obj,p_name,attr='sensor',sig_tag='avg',num_picks_bn=15)
t_cause_label,t_labels,t_hc=bn_anaylsis(bldg_obj,p_name,attr='time',sig_tag='avg',num_picks_bn=15)
w_cause_label,w_labels,w_hc=bn_anaylsis(bldg_obj,p_name,attr='weather',sig_tag='avg',num_picks_bn=15)
all_cause_label,all_labels,all_hc=bn_anaylsis(bldg_obj,p_name,attr='all',sig_tag='avg',num_picks_bn=15)
import pdb;pdb.set_trace()
bldg_obj=bldg_.GSBC_
p_name=bldg_obj.analysis.avg.__dict__.keys()[0]
s_cause_label,s_labels,s_hc=bn_anaylsis(bldg_obj,p_name,attr='sensor',sig_tag='avg',num_picks_bn=15)
t_cause_label,t_labels,t_hc=bn_anaylsis(bldg_obj,p_name,attr='time',sig_tag='avg',num_picks_bn=15)
w_cause_label,w_labels,w_hc=bn_anaylsis(bldg_obj,p_name,attr='weather',sig_tag='avg',num_picks_bn=15)
all_cause_label,all_labels,all_hc=bn_anaylsis(bldg_obj,p_name,attr='all',sig_tag='avg',num_picks_bn=15)
# Plotting....
#plt.ioff()
fig1=rbn.nx_plot(s_hc,s_labels)
fig2=rbn.nx_plot(t_hc,t_labels)
fig3=rbn.nx_plot(w_hc,w_labels)
fig4=rbn.nx_plot(all_hc,all_labels)
p_name=bldg_obj.analysis.avg.__dict__.keys()[0]
all_cause_label,all_labels,all_hc=bn_anaylsis(bldg_obj,p_name,attr='all',sig_tag='avg',num_picks_bn=20)
fig=rbn.nx_plot(all_hc,all_labels)
p_name=bldg_obj.analysis.diff.__dict__.keys()[0]
all_cause_label,all_labels,all_hc=bn_anaylsis(bldg_obj,p_name,attr='all',sig_tag='diff',num_picks_bn=20)
fig=rbn.nx_plot(all_hc,all_labels)
png_name=str(uuid.uuid4().get_hex().upper()[0:2])
plt.savefig(fig_dir+p_name+'_'+sig_tag+'_bn_sensors_'+png_name+'.png', bbox_inches='tight')
plt.close()
plt.ion()
import pdb;pdb.set_trace()
#fig=figure(figsize=(10,10))
plt.ioff()
fig=figure()
for k in range(len(cause_idx)):
effect_idx=cols.index(p_name)
peak_state_0, peak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[k]],[[effect_idx]],[[PEAK]])
lowpeak_state_0, lowpeak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[k]],[[effect_idx]],[[LOW_PEAK]])
sort_idx1=argsort(peak_state_0)
sort_idx2=argsort(lowpeak_state_0)
subplot(1,len(cause_idx),k+1)
plot(sort(peak_state_0), np.array(peak_prob_0)[sort_idx1],'-^')
plot(sort(lowpeak_state_0), np.array(lowpeak_prob_0)[sort_idx2],'-v')
plt.legend(('measurements','classified states'))
if k==0:
plt.ylabel('Probability of Peak Power Demand')
plt.grid()
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.xlabel(s_cause_label[k])
if len(peak_state_0)==len(stateDict.keys()):
if sum(abs(sort(stateDict.keys())-sort(peak_state_0)))==0:
plt.xticks(stateDict.keys(),stateDict.values(),rotation=0, fontsize=12)
png_name=str(uuid.uuid4().get_hex().upper()[0:2])
plt.savefig(fig_dir+p_name+'_'+sig_tag+'_bn_sensors_lh_out'+png_name+'.png', bbox_inches='tight')
plt.close()
plt.ion()
#############################################3
# plotting ant result
#############################################3
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fcause_label=list(np.array(cols_fnames)[np.nonzero(amat[:,0]==1)[0]])
start_t=datetime.datetime(2014, 1, 19, 0, 0, 0)
end_t=datetime.datetime(2014, 1, 25, 0, 0, 0)
data_2=get_data_set(fcause_label+effect_label,start_t,end_t)
# data_x=get_data_set([cause_label[1]]+[cause_label[3]]+effect_label,start_t,end_t)
png_namex=plot_data_x(data_2,stype='raw',smark='-')
#png_namex=plot_data_x(data_x,stype='diff',smark='-^')
# Check the probability
plt.plot(peak_state_temp,peak_prob_temp,'-^')
plt.plot(lowpeak_state_temp,lowpeak_prob_temp,'-v')
plt.title(cause_label)
plt.xlabel('Measurements')
plt.ylabel('Probability of State of Power Demand')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.savefig(fig_dir+p_name_+'_'+sig_tag+'_cause_prob.png', bbox_inches='tight')
data_1=get_data_set(cause_label+effect_label)
avg_png_name=plot_data_x(data_1)
###################################################################################################################
###################################################################################################################
###################################################################################################################
###################################################################################################################
###################################################################################################################
###################################################################################################################
###############################################################
# 1. Regualr Events for GW2.CG_SYSTEM_ACTIVE_POWER_M
###############################################################
num_picks=30
sig_tag='avg'
optprob_set=GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Sensor.optprob_set
optstate_set=GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Sensor.optstate_set
s_names=GW2_.avgsensor_names
p_name='GW2.CG_SYSTEM_ACTIVE_POWER_M'
sort_idx=argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx[:num_picks]].T
sort_state=optstate_set[sort_idx[:num_picks]].T
sort_label= list(np.array(s_names)[sort_idx[:num_picks]])
data_state_mat=GW2_.avgdata_state_mat
lh_threshold=0.9
cause_idx=list(np.nonzero(optprob_set>lh_threshold)[0])
cause_label=[GW2_.avgsensor_names[idx] for idx in cause_idx]
effect_idx=GW2_.avgsensor_names.index(p_name)
effect_label=[p_name]
# For PEAK Demand
obs_state=PEAK
peak_state_temp, peak_prob_temp=compute_cause_likelihood(data_state_mat,cause_idx,[[effect_idx]],[[obs_state]])
# For LOW PEAK Demand
obs_state=LOW_PEAK
lowpeak_state_temp,lowpeak_prob_temp=compute_cause_likelihood(data_state_mat,cause_idx,[[effect_idx]],[[obs_state]])
# Check the probability
plt.plot(peak_state_temp,peak_prob_temp,'-^')
plt.plot(lowpeak_state_temp,lowpeak_prob_temp,'-v')
plt.title(cause_label)
plt.xlabel('Measurements')
plt.ylabel('Probability of State of Power Demand')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.savefig(fig_dir+p_name_+'_'+sig_tag+'_cause_prob.png', bbox_inches='tight')
data_1=get_data_set(cause_label+effect_label)
avg_png_name=plot_data_x(data_1)
import lib_bnlearn as rbn
num_picks=10
p_idx=GW2_.avgsensor_names.index(p_name)
idx_select=[p_idx]+ list(sort_idx[:num_picks])
bndata_mat=GW2_.avgdata_state_mat[:,idx_select]
# File name format - allowing dot
cols_fnames=[GW2_.avgsensor_names[k] for k in idx_select]
# Variable name format - replacing dot with underscore
cols=[remove_dot(GW2_.avgsensor_names[k]) for k in idx_select]
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
fig=rbn.nx_plot(hc_b,cols)
amat = rbn.py_get_amat(hc_b)
plt.savefig(fig_dir+p_name+'_'+sig_tag+'bn_sensors.png', bbox_inches='tight')
s_cause_label=list(np.array(cols)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols.index(label_) for label_ in s_cause_label]
#fig=figure(figsize=(10,10))
fig=figure()
for k in range(len(cause_idx)):
effect_idx=cols_fnames.index(p_name)
peak_state_0, peak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[k]],[[effect_idx]],[[PEAK]])
lowpeak_state_0, lowpeak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[k]],[[effect_idx]],[[LOW_PEAK]])
sort_idx1=argsort(peak_state_0)
sort_idx2=argsort(lowpeak_state_0)
subplot(1,len(cause_idx),k+1)
plot(sort(peak_state_0), np.array(peak_prob_0)[sort_idx1],'-^')
plot(sort(lowpeak_state_0), np.array(lowpeak_prob_0)[sort_idx2],'-v')
plt.legend(('measurements','classified states'))
if k==0:
plt.ylabel('Probability of Peak Power Demand')
plt.grid()
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.xlabel(wcause_label[k])
if len(peak_state_0)==len(stateDict.keys()):
if sum(abs(sort(stateDict.keys())-sort(peak_state_0)))==0:
plt.xticks(stateDict.keys(),stateDict.values(),rotation=0, fontsize=12)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fcause_label=list(np.array(cols_fnames)[np.nonzero(amat[:,0]==1)[0]])
start_t=datetime.datetime(2014, 1, 19, 0, 0, 0)
end_t=datetime.datetime(2014, 1, 25, 0, 0, 0)
data_2=get_data_set(fcause_label+effect_label,start_t,end_t)
# data_x=get_data_set([cause_label[1]]+[cause_label[3]]+effect_label,start_t,end_t)
png_namex=plot_data_x(data_2,stype='raw',smark='-')
#png_namex=plot_data_x(data_x,stype='diff',smark='-^')
###############################################################
# 2. Irregualr Events for GW2.CG_SYSTEM_ACTIVE_POWER_M
###############################################################
bldg_tag='GW2_'
sig_tag='diff'
p_name='GW2.CG_PHASE2_ACTIVE_POWER_M'
cmd_str_='optprob_set='+bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name)+'.Sensor.optprob_set'
exec(cmd_str_)
cmd_str_='optstate_set='+bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name)+'.Sensor.optstate_set'
exec(cmd_str_)
cmd_str_='s_names='+bldg_tag+sig_tag+'sensor_names'
exec(cmd_str_)
sort_idx=argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx[:num_picks]].T
sort_state=optstate_set[sort_idx[:num_picks]].T
sort_label= list(np.array(s_names)[sort_idx[:num_picks]])
# BN Network Learning
import lib_bnlearn as rbn
num_picks=15
p_idx=GW2_.diffsensor_names.index(p_name)
idx_select=[p_idx]+ list(sort_idx[:num_picks])
bndata_mat=GW2_.diffdata_state_mat[:,idx_select]
# File name format - allowing dot
cols_fnames=[GW2_.diffsensor_names[k] for k in idx_select]
# Variable name format - replacing dot with underscore
cols=[remove_dot(GW2_.diffsensor_names[k]) for k in idx_select]
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
#fit = rbn.py_bn_fit(hc_b,data_frame)
#index_temp=2
#prob_dimnames,prob_factors,prob_mat = rbn.py_get_node_cond_mat(fit,index_temp)
data_state_mat=GW2_.diffdata_state_mat
cause_label=list(np.array(cols_fnames)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[GW2_.diffsensor_names.index(label_) for label_ in cause_label]
effect_idx=GW2_.diffsensor_names.index(p_name)
effect_label=[p_name]
obs_state=PEAK
peak_state_temp, peak_prob_temp=compute_cause_likelihood(data_state_mat,cause_idx,[[effect_idx]],[[obs_state]])
obs_state=LOW_PEAK
lowpeak_state_temp, lowpeak_prob_temp=compute_cause_likelihood(data_state_mat,cause_idx,[[effect_idx]],[[obs_state]])
plt.plot(peak_state_temp,peak_prob_temp,'-^')
plt.plot(lowpeak_state_temp,lowpeak_prob_temp,'-v')
plt.title(cause_label,fontsize='large')
plt.xlabel('Measurements',fontsize='large')
plt.ylabel('Probability of State of Power Demand Variation',fontsize='large')
plt.xticks(fontsize='large')
plt.yticks(fontsize='large')
plt.grid()
plt.legend(('High Variation', 'No Variation'),prop={'size':18})
plt.savefig(fig_dir+p_name_+'_'+sig_tag+'_variaiton_cause_prob.png', bbox_inches='tight')
data_2=get_data_set(cause_label+effect_label)
diff_png_name=plot_data_x(data_2,type='diff')
#sensors_=list(np.array(cols_fnames)[np.nonzero(amat[:,0]==1)[0]])
###############################################################
# 3. Time and Weahter Dependency Analysis
# Weather data dependency
# BN Network Learning
###############################################################
fig1=figure()
plot(GW2_.avg_time_slot,GW2_.avgdata_weather_mat[:,-1])
plot(GW2_.avg_time_slot,GW2_.avgdata_weather_mat_[:,-1],'*r')
ylabel(GW2_.avgweather_names[-1])
plt.legend(('measurements','classified states'))
mn_=min(GW2_.avgdata_weather_mat[:,-1])
mx_=max(GW2_.avgdata_weather_mat[:,-1])
ylim([mn_-0.1*abs(mn_),mx_+0.1*abs(mx_)])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig1.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig2=figure()
plot(GW2_.avg_time_slot,GW2_.avgdata_weather_mat[:,-2])
plot(GW2_.avg_time_slot,GW2_.avgdata_weather_mat_[:,-2],'*r')
plt.legend(('measurements','classified states'))
ylabel(GW2_.avgweather_names[-2])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig2.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig3=figure()
plot(GW2_.avg_time_slot,GW2_.avgdata_weather_mat[:,-3])
plot(GW2_.avg_time_slot,GW2_.avgdata_weather_mat_[:,-3],'*r')
plt.legend(('measurements','classified states'))
ylabel(GW2_.avgweather_names[-3])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig3.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# Likelihood of weather factors
optprob_set=GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.weather.optprob_set
w_names=GW2_.avg.weather_names
sort_idx=argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx].T
sort_state=optstate_set[sort_idx].T
figw=figure(figsize=(15.0,10.0))
#figw=figure()
plt.subplot(2,1,1)
plt.plot(sort_lh,'-s')
x_label= list(np.array(w_names)[sort_idx])
x_ticks=range(len(x_label))
#plt.xticks(x_ticks,x_label, fontsize="small")
plt.xticks(x_ticks,x_label,rotation=30, fontsize=12)
plt.tick_params(labelsize='large')
plt.ylabel('Likelihood (From 0 to 1)',fontsize=18)
#plt.get_current_fig_manager().window.showMaximized()
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
figw.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# regualr event
sig_tag='avg'
p_name='GW2_CG_SYSTEM_ACTIVE_POWER_M'
p_idx=GW2_.avg.sensor_names.index(p_name)
bndata_mat=np.vstack((GW2_.avg.data_state_mat[:,p_idx].T,GW2_.avg.data_weather_mat_.T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols=[p_name]+[w_name for w_name in GW2_.avg.weather_names]
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
wcause_label=list(np.array(cols)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols.index(label_) for label_ in wcause_label]
effect_idx=cols_fnames.index(p_name)
peak_state_0, peak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[0]],[[effect_idx]],[[PEAK]])
lowpeak_state_0, lowpeak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[0]],[[effect_idx]],[[LOW_PEAK]])
sort_idx1=argsort(peak_state_0)
sort_idx2=argsort(lowpeak_state_0)
fig0=figure()
plot(sort(peak_state_0), np.array(peak_prob_0)[sort_idx1],'-^')
plot(sort(lowpeak_state_0), np.array(lowpeak_prob_0)[sort_idx2],'-v')
plt.legend(('measurements','classified states'))
plt.ylabel('Probability of State of Power Demand')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.xlabel(wcause_label[0])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig0.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# Irregualr event
sig_tag='diff'
p_name='GW2.CG_PHASE2_ACTIVE_POWER_M'
p_idx=GW2_.diffsensor_names.index(p_name)
bndata_mat=np.vstack((GW2_.diffdata_state_mat[:,p_idx].T,GW2_.diffdata_weather_mat_.T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[p_name]+[w_name for w_name in GW2_.diffweather_names]
cols=[remove_dot(p_name)]+[remove_dot(w_name) for w_name in GW2_.diffweather_names]
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
wcause_label=list(np.array(cols)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols.index(label_) for label_ in wcause_label]
effect_idx=cols_fnames.index(p_name)
peak_state_0, peak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[0]],[[effect_idx]],[[PEAK]])
lowpeak_state_0, lowpeak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[0]],[[effect_idx]],[[LOW_PEAK]])
sort_idx1=argsort(peak_state_0)
sort_idx2=argsort(lowpeak_state_0)
fig0=figure()
plot(sort(peak_state_0), np.array(peak_prob_0)[sort_idx1],'-^')
plot(sort(lowpeak_state_0), np.array(lowpeak_prob_0)[sort_idx2],'-v')
plt.legend(('measurements','classified states'))
plt.ylabel('Probability of State of Power Demand')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.xlabel(wcause_label[0])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig0.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
"""
peak_state_1, peak_prob_1=compute_cause_likelihood(bndata_mat,[cause_idx[1]],[[effect_idx]],[[PEAK]])
lowpeak_state_1, lowpeak_prob_1=compute_cause_likelihood(bndata_mat,[cause_idx[1]],[[effect_idx]],[[LOW_PEAK]])
sort_idx1=argsort(peak_state_1)
sort_idx2=argsort(lowpeak_state_1)
fig1=figure()
plot(sort(peak_state_1), np.array(peak_prob_1)[sort_idx1],'-^')
plot(sort(lowpeak_state_1), np.array(lowpeak_prob_1)[sort_idx2],'-v')
plt.legend(('measurements','classified states'))
plt.ylabel('Probability of State of Power Demand')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.xlabel(wcause_label[1])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig1.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
"""
# Time data dependency - Likelihood of time factors
# BN Network Learning
# Regualr event
state_map=np.array(GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Time.state_map)
prob_map=np.array(GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Time.prob_map)
t_name_set=GW2_.avgtime_names
# [MTH', 'WD', 'HR']
sig_tag='avg'
p_name='GW2.CG_SYSTEM_ACTIVE_POWER_M'
p_idx=GW2_.avgsensor_names.index(p_name)
bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_time_mat.T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[p_name]+[w_name for w_name in GW2_.avgtime_names]
cols=[remove_dot(p_name)]+[remove_dot(w_name) for w_name in GW2_.avgtime_names]
effect_idx=cols_fnames.index(p_name)
time_high_peak_liklihood_set=[]
time_low_peak_liklihood_set=[]
for t_name in t_name_set:
idx_t=cols.index(t_name)
peak_state, peak_prob=compute_cause_likelihood(bndata_mat,[idx_t],[[effect_idx]],[[PEAK]])
time_high_peak_liklihood_set.append(np.array([peak_state,peak_prob]))
peak_state, peak_prob=compute_cause_likelihood(bndata_mat,[idx_t],[[effect_idx]],[[LOW_PEAK]])
time_low_peak_liklihood_set.append(np.array([peak_state,peak_prob]))
fig=figure()
subplot(3,1,1)
plot(time_high_peak_liklihood_set[0][0],time_high_peak_liklihood_set[0][1],'-^')
plot(time_low_peak_liklihood_set[0][0],time_low_peak_liklihood_set[0][1],'-v')
plt.xticks(monthDict.keys(),monthDict.values())
plt.xlabel('Months of a year',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
subplot(3,1,2)
plot(time_high_peak_liklihood_set[1][0],time_high_peak_liklihood_set[1][1],'-^')
plot(time_low_peak_liklihood_set[1][0],time_low_peak_liklihood_set[1][1],'-v')
plt.xticks(weekDict.keys(),weekDict.values())
plt.xlabel('Days of a Week',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.tick_params(labelsize='large')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
subplot(3,1,3)
plot(time_high_peak_liklihood_set[2][0],time_high_peak_liklihood_set[2][1],'-^')
plot(time_low_peak_liklihood_set[2][0],time_low_peak_liklihood_set[2][1],'-v')
plt.xticks(hourDict.keys(),hourDict.values())
plt.xlabel('Hours of a day',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Peak', 'Low Peak'))
plt.tick_params(labelsize='large')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# BN Network representaiton
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
#hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# Irregualr event
state_map=np.array(GW2_.analysis.diff.GW2_CG_PHASE2_ACTIVE_POWER_M.Time.state_map)
prob_map=np.array(GW2_.analysis.diff.GW2_CG_PHASE2_ACTIVE_POWER_M.Time.prob_map)
t_name_set=GW2_.avgtime_names
# [MTH', 'WD', 'HR']
sig_tag='diff'
p_name='GW2.CG_PHASE2_ACTIVE_POWER_M'
p_idx=GW2_.diffsensor_names.index(p_name)
bndata_mat=np.vstack((GW2_.diffdata_state_mat[:,p_idx].T,GW2_.diffdata_time_mat.T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[p_name]+[w_name for w_name in GW2_.difftime_names]
cols=[remove_dot(p_name)]+[remove_dot(w_name) for w_name in GW2_.difftime_names]
effect_idx=cols_fnames.index(p_name)
time_high_peak_liklihood_set=[]
time_low_peak_liklihood_set=[]
for t_name in t_name_set:
idx_t=cols.index(t_name)
peak_state, peak_prob=compute_cause_likelihood(bndata_mat,[idx_t],[[effect_idx]],[[PEAK]])
time_high_peak_liklihood_set.append(np.array([peak_state,peak_prob]))
peak_state, peak_prob=compute_cause_likelihood(bndata_mat,[idx_t],[[effect_idx]],[[LOW_PEAK]])
time_low_peak_liklihood_set.append(np.array([peak_state,peak_prob]))
fig=figure()
subplot(3,1,1)
plot(time_high_peak_liklihood_set[0][0],time_high_peak_liklihood_set[0][1],'-^')
plot(time_low_peak_liklihood_set[0][0],time_low_peak_liklihood_set[0][1],'-v')
plt.xticks(monthDict.keys(),monthDict.values())
plt.xlabel('Months of a year',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Variaiton', 'Low Variaiton'))
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
subplot(3,1,2)
plot(time_high_peak_liklihood_set[1][0],time_high_peak_liklihood_set[1][1],'-^')
plot(time_low_peak_liklihood_set[1][0],time_low_peak_liklihood_set[1][1],'-v')
plt.xticks(weekDict.keys(),weekDict.values())
plt.xlabel('Days of a Week',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Variaiton', 'Low Variaiton'))
plt.tick_params(labelsize='large')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
subplot(3,1,3)
plot(time_high_peak_liklihood_set[2][0],time_high_peak_liklihood_set[2][1],'-^')
plot(time_low_peak_liklihood_set[2][0],time_low_peak_liklihood_set[2][1],'-v')
plt.xticks(hourDict.keys(),hourDict.values())
plt.xlabel('Hours of a day',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Variaiton', 'Low Variaiton'))
plt.tick_params(labelsize='large')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# BN Network representaiton
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
#hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
###############################################################
# 4. Sensor, Weather Time Dependency Analysis
# BN Network Learning
###############################################################
# For regualr event.
state_map=np.array(GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Time.state_map)
prob_map=np.array(GW2_.analysis.avg.GW2_CG_SYSTEM_ACTIVE_POWER_M.Time.prob_map)
t_name_set=GW2_.avgtime_names
# [MTH', 'WD', 'HR']
sig_tag='avg'
p_name=['GW2.CG_SYSTEM_ACTIVE_POWER_M']
sensor_cause_label=['GW2.SAA_UV_INDEX_M','GW2.HA49_AS_TE_KH_FM']
weather_cause_label=['Humidity']
time_cause_label=['MTH', 'HR']
p_idx=[GW2_.avgsensor_names.index(temp) for temp in p_name]
s_idx=[GW2_.avgsensor_names.index(temp) for temp in sensor_cause_label]
w_idx=[GW2_.avgweather_names.index(temp) for temp in weather_cause_label]
t_idx=[GW2_.avgtime_names.index(temp) for temp in time_cause_label]
bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,\
GW2_.avgdata_state_mat[:,s_idx].T, \
GW2_.avgdata_weather_mat_[:,w_idx].T, \
GW2_.avgdata_time_mat[:,t_idx].T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[name_ for name_ in p_name+sensor_cause_label+weather_cause_label+time_cause_label]
cols=[remove_dot(name_) for name_ in p_name+sensor_cause_label+weather_cause_label+time_cause_label]
# BN Network representaiton
b_arc_list = pair_in_idx([cols[0]],cols[1:])+pair_in_idx([cols[1]],cols[2:])+pair_in_idx([cols[2]],cols[3:])+pair_in_idx([cols[3]],cols[4:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
cause_label=list(np.array(cols_fnames)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols_fnames.index(label_) for label_ in cause_label]
effect_idx=[cols_fnames.index(label_) for label_ in p_name]
effect_label=p_name
obs_state=PEAK
peak_state_temp, peak_prob_temp=compute_cause_likelihood(bndata_mat,cause_idx,[effect_idx],[[obs_state]])
obs_state=LOW_PEAK
lowpeak_state_temp, lowpeak_prob_temp=compute_cause_likelihood(bndata_mat,cause_idx,[effect_idx],[[obs_state]])
peak_state=np.array(peak_state_temp)
peak_prob=np.array(peak_prob_temp)
lowpeak_state=np.array(lowpeak_state_temp)
lowpeak_prob=np.array(lowpeak_prob_temp)
# Probability
fig=figure(figsize=(25.0,20.0))
for i,mon in enumerate(yearMonths):
subplot(3,4,mon+1)
idx=np.nonzero(peak_state[:,1]==mon)[0]
plot(peak_state[idx,0],peak_prob[idx],'-^')
idx=np.nonzero(lowpeak_state[:,1]==mon)[0]
plot(lowpeak_state[idx,0],lowpeak_prob[idx],'-v')
plt.ylabel('Likelihood',fontsize='small')
if i>7:
plt.xlabel(cause_label[0]+' Measurements',fontsize='small')
title(monthDict[mon]);plt.ylim([-0.05,1.05])
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.tick_params(labelsize='small')
plt.grid()
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
print '----------------------------------------'
print 'Likelihoods '
print '----------------------------------------'
print cause_label+['Low Peak','High Peak']
print '----------------------------------------'
print np.vstack((np.int0(peak_state).T,np.int0(100*lowpeak_prob).T,np.int0(100*peak_prob).T)).T
print '----------------------------------------'
s_val_set=set(peak_state[:,0])
m_val_set=set(peak_state[:,1])
Z_peak=np.ones((len(s_val_set),len(m_val_set)))*np.inf
for i,s_val in enumerate(s_val_set):
for j,m_val in enumerate(m_val_set):
idx=np.nonzero((peak_state[:,0]==s_val)&(peak_state[:,1]==m_val))[0][0]
Z_peak[i,j]=peak_prob[idx]
s_val_set=set(lowpeak_state[:,0])
m_val_set=set(lowpeak_state[:,1])
Z_lowpeak=np.ones((len(s_val_set),len(m_val_set)))*np.inf
for i,s_val in enumerate(s_val_set):
for j,m_val in enumerate(m_val_set):
idx=np.nonzero((lowpeak_state[:,0]==s_val)&(lowpeak_state[:,1]==m_val))[0][0]
Z_lowpeak[i,j]=lowpeak_prob[idx]
Z_lowpeak=lowpeak_prob.reshape((len(s_val_set),len(m_val_set)))
Z_peak=peak_prob.reshape((len(s_val_set),len(m_val_set)))
fig1=figure()
im = plt.imshow(Z_peak, cmap='hot',vmin=0, vmax=1,aspect='auto')
plt.colorbar(im, orientation='horizontal')
plt.xticks(monthDict.keys(),monthDict.values(),fontsize='large')
plt.yticks(range(len(s_val_set)),list(s_val_set),fontsize='large')
plt.xlabel(cause_label[1],fontsize='large')
plt.ylabel(cause_label[0],fontsize='large')
plt.title('Likelihood of High-Peak')
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig1.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig2=figure()
im = plt.imshow(Z_lowpeak, cmap='hot',vmin=0, vmax=1,aspect='auto')
plt.colorbar(im, orientation='horizontal')
plt.xticks(monthDict.keys(),monthDict.values(),fontsize='large')
plt.yticks(range(len(s_val_set)),list(s_val_set),fontsize='large')
plt.xlabel(cause_label[1],fontsize='large')
plt.ylabel(cause_label[0],fontsize='large')
plt.title('Likelihood of Low-Peak')
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig2.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
###############################################################
# 3. Irregualr Events for VAK1_CG_SYSTEM_REACTIVE_POWER_M
###############################################################
bldg_tag='VAK1_'
sig_tag='diff'
p_name='VAK1.CG_SYSTEM_REACTIVE_POWER_M'
cmd_str_='optprob_set='+bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name)+'.Sensor.optprob_set'
exec(cmd_str_)
cmd_str_='optstate_set='+bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name)+'.Sensor.optstate_set'
exec(cmd_str_)
cmd_str_='s_names='+bldg_tag+sig_tag+'sensor_names'
exec(cmd_str_)
optprob_set=VAK1_.analysis.diff.VAK1_CG_SYSTEM_REACTIVE_POWER_M.Sensor.optprob_set
optstate_set=VAK1_.analysis.diff.VAK1_CG_SYSTEM_REACTIVE_POWER_M.Sensor.optstate_set
s_names=VAK1_.diffsensor_names
sort_idx=argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx[:num_picks]].T
sort_state=optstate_set[sort_idx[:num_picks]].T
sort_label= list(np.array(s_names)[sort_idx[:num_picks]])
# BN Network Learning
import lib_bnlearn as rbn
num_picks=30
p_idx=VAK1_.diffsensor_names.index(p_name)
idx_select=[p_idx]+ list(sort_idx[:num_picks])
bndata_mat=VAK1_.diffdata_state_mat[:,idx_select]
# File name format - allowing dot
cols_fnames=[VAK1_.diffsensor_names[k] for k in idx_select]
# Variable name format - replacing dot with underscore
cols=[remove_dot(VAK1_.diffsensor_names[k]) for k in idx_select]
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
plt.savefig(fig_dir+p_name+'_'+sig_tag+'bn_sensors.png', bbox_inches='tight')
#fit = rbn.py_bn_fit(hc_b,data_frame)
#index_temp=2
#prob_dimnames,prob_factors,prob_mat = rbn.py_get_node_cond_mat(fit,index_temp)
data_state_mat=VAK1_.diffdata_state_mat
cause_label=list(np.array(cols_fnames)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[VAK1_.diffsensor_names.index(label_) for label_ in cause_label]
effect_idx=VAK1_.diffsensor_names.index(p_name)
effect_label=[p_name]
obs_state=PEAK
peak_state_13, peak_prob_13=compute_cause_likelihood(data_state_mat,[cause_idx[1],cause_idx[3]],[[effect_idx]],[[obs_state]])
print_cond_table(peak_state_13, peak_prob_13,[cause_label[1],cause_label[3]])
obs_state=LOW_PEAK
lowpeak_state_13, lowpeak_prob_13=compute_cause_likelihood(data_state_mat,[cause_idx[1],cause_idx[3]],[[effect_idx]],[[obs_state]])
print_cond_table(lowpeak_state_13, lowpeak_prob_13,[cause_label[1],cause_label[3]])
plt.plot(range(len(peak_state_13)), peak_prob_13,'-^')
plt.plot(range(len(lowpeak_state_13)), lowpeak_prob_13,'-v')
plt.title(cause_label[1]+cause_label[3],fontsize='large')
plt.xlabel('State',fontsize='large')
plt.ylabel('Probability of State of Reactuve Power Variation',fontsize='large')
plt.xticks(fontsize='large')
plt.yticks(fontsize='large')
plt.grid()
plt.legend(('High Variation', 'No Variation'),prop={'size':18})
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
start_t=datetime.datetime(2014, 2, 20, 15, 44, 52)
end_t=datetime.datetime(2014, 2, 24, 16, 5, 12)
data_x=get_data_set([cause_label[1]]+[cause_label[3]]+effect_label,start_t,end_t)
png_namex=plot_data_x(data_x,stype='raw',smark='-^')
png_namex=plot_data_x(data_x,stype='diff',smark='-^')
############################################################################
############################################################################
#<--------------------------------------------------------------------
#<--------------------------------------------------------------------
#<--------------------------------------------------------------------
###############################################################
# 3. Time and Weahter Dependency Analysis
# Weather data dependency
# BN Network Learning
###############################################################
bldg_tag='VAK1_'
sig_tag='diff'
p_name='VAK1.CG_SYSTEM_REACTIVE_POWER_M'
optprob_set=VAK1_.analysis.diff.VAK1_CG_SYSTEM_REACTIVE_POWER_M.Sensor.optprob_set
optstate_set=VAK1_.analysis.diff.VAK1_CG_SYSTEM_REACTIVE_POWER_M.Sensor.optstate_set
s_names=VAK1_.diffsensor_names
# Likelihood of weather factors
optprob_set=VAK1_.analysis.diff.VAK1_CG_SYSTEM_REACTIVE_POWER_M.Weather.optprob_set
optstate_set=VAK1_.analysis.diff.VAK1_CG_SYSTEM_REACTIVE_POWER_M.Weather.optstate_set
w_names=VAK1_.diffweather_names
sort_idx=argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx].T
sort_state=optstate_set[sort_idx].T
figw=figure(figsize=(15.0,10.0))
#figw=figure()
plt.subplot(2,1,1)
plt.plot(sort_lh,'-s')
x_label= list(np.array(w_names)[sort_idx])
x_ticks=range(len(x_label))
#plt.xticks(x_ticks,x_label, fontsize="small")
plt.xticks(x_ticks,x_label,rotation=30, fontsize=12)
plt.tick_params(labelsize='large')
plt.ylabel('Likelihood (From 0 to 1)',fontsize=18)
plt.title('Likelihood of peak differential measurement of '+p_name+' given weather factors')
#plt.get_current_fig_manager().window.showMaximized()
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
figw.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# regualr event
import lib_bnlearn as rbn
p_idx=VAK1_.diffsensor_names.index(p_name)
bndata_mat=np.vstack((VAK1_.diffdata_state_mat[:,p_idx].T,VAK1_.diffdata_weather_mat_.T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[p_name]+[w_name for w_name in VAK1_.diffweather_names]
cols=[remove_dot(p_name)]+[remove_dot(w_name) for w_name in VAK1_.diffweather_names]
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
wcause_label=list(np.array(cols)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols.index(label_) for label_ in wcause_label]
#fig=figure(figsize=(10,10))
fig=figure()
for k in range(len(cause_idx)):
effect_idx=cols_fnames.index(p_name)
peak_state_0, peak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[k]],[[effect_idx]],[[PEAK]])
lowpeak_state_0, lowpeak_prob_0=compute_cause_likelihood(bndata_mat,[cause_idx[k]],[[effect_idx]],[[LOW_PEAK]])
sort_idx1=argsort(peak_state_0)
sort_idx2=argsort(lowpeak_state_0)
subplot(1,len(cause_idx),k+1)
plot(sort(peak_state_0), np.array(peak_prob_0)[sort_idx1],'-^')
plot(sort(lowpeak_state_0), np.array(lowpeak_prob_0)[sort_idx2],'-v')
plt.legend(('measurements','classified states'))
if k==0:
plt.ylabel('Probability of Peak Rective Power Variation')
plt.grid()
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.xlabel(wcause_label[k])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# Time data dependency - Likelihood of time factors
# BN Network Learning
# Regualr event
t_name_set=VAK1_.difftime_names
# [MTH', 'WD', 'HR']
p_idx=VAK1_.diffsensor_names.index(p_name)
bndata_mat=np.vstack((VAK1_.diffdata_state_mat[:,p_idx].T,VAK1_.diffdata_time_mat.T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[p_name]+[w_name for w_name in VAK1_.difftime_names]
cols=[remove_dot(p_name)]+[remove_dot(w_name) for w_name in VAK1_.difftime_names]
effect_idx=cols_fnames.index(p_name)
time_high_peak_liklihood_set=[]
time_low_peak_liklihood_set=[]
for t_name in t_name_set:
idx_t=cols.index(t_name)
peak_state, peak_prob=compute_cause_likelihood(bndata_mat,[idx_t],[[effect_idx]],[[PEAK]])
time_high_peak_liklihood_set.append(np.array([peak_state,peak_prob]))
peak_state, peak_prob=compute_cause_likelihood(bndata_mat,[idx_t],[[effect_idx]],[[LOW_PEAK]])
time_low_peak_liklihood_set.append(np.array([peak_state,peak_prob]))
fig=figure()
subplot(3,1,1)
plot(time_high_peak_liklihood_set[0][0],time_high_peak_liklihood_set[0][1],'-^')
plot(time_low_peak_liklihood_set[0][0],time_low_peak_liklihood_set[0][1],'-v')
plt.xticks(monthDict.keys(),monthDict.values())
plt.xlabel('Months of a year',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
subplot(3,1,2)
plot(time_high_peak_liklihood_set[1][0],time_high_peak_liklihood_set[1][1],'-^')
plot(time_low_peak_liklihood_set[1][0],time_low_peak_liklihood_set[1][1],'-v')
plt.xticks(weekDict.keys(),weekDict.values())
plt.xlabel('Days of a Week',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.tick_params(labelsize='large')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
subplot(3,1,3)
plot(time_high_peak_liklihood_set[2][0],time_high_peak_liklihood_set[2][1],'-^')
plot(time_low_peak_liklihood_set[2][0],time_low_peak_liklihood_set[2][1],'-v')
plt.xticks(hourDict.keys(),hourDict.values())
plt.xlabel('Hours of a day',fontsize='large')
plt.ylabel('Likelihood',fontsize='large')
plt.grid()
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.tick_params(labelsize='large')
plt.tick_params(labelsize='large')
plt.ylim([-0.05,1.05])
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# BN Network representaiton
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().gVAK1_.diffet_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
###############################################################
# 4. Sensor, Weather Time Dependency Analysis
# BN Network Learning
###############################################################
# For regualr event.
t_name_set=VAK1_.difftime_names
# [MTH', 'WD', 'HR']
sig_tag='diff'
p_name=['VAK1.CG_SYSTEM_REACTIVE_POWER_M']
sensor_cause_label=['VAK1.GEO_LM5_TE1_FM','VAK1.AK_TE50_4_M']
weather_cause_label=['Dew PointC','Humidity']
time_cause_label=['MTH', 'HR']
p_idx=[VAK1_.diffsensor_names.index(temp) for temp in p_name]
s_idx=[VAK1_.diffsensor_names.index(temp) for temp in sensor_cause_label]
w_idx=[VAK1_.diffweather_names.index(temp) for temp in weather_cause_label]
t_idx=[VAK1_.difftime_names.index(temp) for temp in time_cause_label]
bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,\
GW2_.avgdata_state_mat[:,s_idx].T, \
GW2_.avgdata_weather_mat_[:,w_idx].T, \
GW2_.avgdata_time_mat[:,t_idx].T)).T
#bndata_mat=np.vstack((GW2_.avgdata_state_mat[:,p_idx].T,GW2_.avgdata_weather_mat.T)).T
cols_fnames=[name_ for name_ in p_name+sensor_cause_label+weather_cause_label+time_cause_label]
cols=[remove_dot(name_) for name_ in p_name+sensor_cause_label+weather_cause_label+time_cause_label]
# BN Network representaiton
b_arc_list = pair_in_idx([cols[0]],cols[1:])+pair_in_idx([cols[1]],cols[2:])+pair_in_idx([cols[2]],cols[3:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
#hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
fig=rbn.nx_plot(hc_b,cols)
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
plt.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
cause_label=list(np.array(cols_fnames)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols_fnames.index(label_) for label_ in cause_label]
effect_idx=[cols_fnames.index(label_) for label_ in p_name]
effect_label=p_name
obs_state=PEAK
peak_state_temp, peak_prob_temp=compute_cause_likelihood(bndata_mat,cause_idx,[effect_idx],[[obs_state]])
obs_state=LOW_PEAK
lowpeak_state_temp, lowpeak_prob_temp=compute_cause_likelihood(bndata_mat,cause_idx,[effect_idx],[[obs_state]])
peak_state=np.array(peak_state_temp)
peak_prob=np.array(peak_prob_temp)
lowpeak_state=np.array(lowpeak_state_temp)
lowpeak_prob=np.array(lowpeak_prob_temp)
# Probability
fig=figure(figsize=(30.0,25.0))
for i,mon in enumerate(yearMonths):
subplot(3,4,mon+1)
idx=np.nonzero(peak_state[:,2]==mon)[0]
x_set=peak_state[idx,0:2]
plot(range(len(x_set)),peak_prob[idx],'-^')
idx=np.nonzero(lowpeak_state[:,2]==mon)[0]
plot(range(len(x_set)),lowpeak_prob[idx],'-v')
x_label=[(stateDict[peak_tpl[0]],stateDict[peak_tpl[1]]) for peak_tpl in x_set]
x_ticks=range(len(x_set))
plt.ylabel('Likelihood',fontsize='small')
if i>7:
#plt.xlabel(cause_label[0]+' Measurements',fontsize='small')
plt.xticks(x_ticks,x_label,rotation=270, fontsize=10)
plt.tick_params(labelsize='small')
title(monthDict[mon]);plt.ylim([-0.05,1.05])
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.tick_params(labelsize='small')
plt.grid()
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
print '----------------------------------------'
print 'Likelihoods '
print '----------------------------------------'
print cause_label+['Low Peak','High Peak']
print '----------------------------------------'
print np.vstack((np.int0(peak_state).T,np.int0(100*lowpeak_prob).T,np.int0(100*peak_prob).T)).T
print '----------------------------------------'
#<----------------------------------------------------------------------
#import pdb;pdb.set_trace()
DO_BN_LEARN=0
# This is BN Learn example
if DO_BN_LEARN==1:
import lib_bnlearn as rbn
irr_state_mat,irr_state_prob,skewness_metric_sort,skewness_metric_sort_idx=irr_state_mapping(diffdata_state_mat,weight_coeff=10)
bndata_dict = mt.loadObjectBinary('diffdata_dict.bin')
bn_col=bndata_dict['diffdata_names']
bn_sn=bndata_dict['sensor_names']
bn_wn=bndata_dict['weather_names']
bn_tn=bndata_dict['time_names']
bndata_mat=bndata_dict['diffdata_mat']
# If the variable is discrete, we should convert the data into R's factor data type
#cols = X_Sensor_NAMES+X_Time_NAMES
for k,name_temp in enumerate(bn_wn):
try:
blank_idx=name_temp.index(' ')
#print blank_idx,X_Weather_NAMES[k][blank_idx]
bn_wn[k]=bn_wn[k].replace(' ','_')
except:
pass
for k,name_temp in enumerate(bn_col):
try:
blank_idx=name_temp.index(' ')
#print blank_idx,X_Weather_NAMES[k][blank_idx]
bn_col[k]=bn_col[k].replace(' ','_')
except:
pass
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat[:,:len(bn_sn)])
#cols = X_Sensor_NAMES+X_Weather_NAMES+X_Time_NAMES
cols =bn_col[:len(bn_sn)]
# Construct data frame, given data matrix (np.array) and column names
# if column names are not given, we use column index [0,1,..] as the column names
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
#arc_list = pair_in_idx(X_Sensor_NAMES,X_Time_NAMES)
# Black list
b_arc_list = pair_in_idx(bn_sn,bn_tn)\
+pair_in_idx(bn_sn,bn_wn)\
+pair_in_idx(bn_wn,bn_tn)\
+pair_in_idx(bn_wn,bn_wn)\
+pair_in_idx(bn_tn,bn_tn)
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
# White list
w_arc_list = pair_in_idx(bn_tn,bn_sn)\
+pair_in_idx(bn_tn,bn_wn)
white_arc_frame = rbn.construct_arcs_frame(w_arc_list)
"""
Step2: Using bnlearn to learn graph structure from data frame
"""
# Use hill-climbing learning algorithm
# With blacklisting arcs
hc = rbn.bnlearn.hc(data_frame,score='bic')
hc_score=rbn.bnlearn.score(hc,data_frame,type="bic")
hc_bw = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,whitelist=white_arc_frame,score='bic')
hc_bw_score=rbn.bnlearn.score(hc_bw,data_frame,type="bic")
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
hc_b_score=rbn.bnlearn.score(hc_b,data_frame,type="bic")
print 'hc_score: ',hc_score,'hc_b_score: ',hc_b_score,'hc_bw_score: ',hc_bw_score
# Print some output from the learning process
#print str(hc_b)
# Get the adjacent matrix from the graph structure
# the return is numpy array
amat = rbn.py_get_amat(hc_b)
"""
There are other learning algorithms available too
E.g.:
gs = rbn.bnlearn.gs(data_frame)
"""
"""
Step 3: Plotting the graph, given the graph structure
and the names of nodes
"""
#hc = rbn.bnlearn.hc(data_frame,score='k2')
figure(2)
rbn.nx_plot(hc_b,cols)
rbn.nx_plot(hc,cols)
#rbn.nx_plot(hc,rbn.bnlearn.nodes(hc))
"""
Step4: Fitting the data into graph structure
to estimate the conditional probability
NOTE: in order for fitting to happen, the graph must be completely directed
"""
fit = rbn.py_bn_fit(hc_b,data_frame)
#print str(fit)
#index_temp=cols.index('GW1.HA1_SM_K')
index_temp=1
prob_dimnames,prob_factors,prob_mat = rbn.py_get_node_cond_mat(fit,index_temp)
#rbn.write_to_file('fit.dat',str(fit))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
###############################################################################
DATA_EMBEDDING_ANALYSIS=0
if DATA_EMBEDDING_ANALYSIS==1:
# Covariance Estimation
edge_model = covariance.GraphLassoCV()
edge_model.fit(X_INPUT)
cov_mat=edge_model.covariance_
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=X_INPUT.shape[1]-1)
embedding = node_position_model.fit_transform(X_INPUT.T).T
plt.figure('Data strucutre map', facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.01)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100*d**2,c=labels, cmap=pl.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=pl.cm.hot_r,
norm=pl.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(zip(input_names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=12,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
DATA_NAME_ANALYSIS=0
if DATA_NAME_ANALYSIS==1:
#################################################################################
# Graph strucutre analysis of sensor naming
#################################################################################
print '--------------------------------------------------'
print 'Graph strucutre analysis of sensor naming'
print '--------------------------------------------------'
print 'get simialirty matrix of sensor naming'
#sim_mat, uuid_list, phrases, key_description, phrase_count = get_sim_mat()
sim_mat = mt.loadObjectBinary('../data_year/sim_mat.bin')
uuid_list = mt.loadObjectBinary('../data_year/uuid_list.bin')
phrases = mt.loadObjectBinary('../data_year/phrases.bin')
key_description = mt.loadObjectBinary('../data_year/key_description.bin')
phrase_count = mt.loadObjectBinary('../data_year/phrase_count.bin')
print 'build tree.....'
for sensor_name in uuid_list:
print len(sensor_name)
print '**************************** End of Program ****************************'
"""
# Obslete Lines
###########################################################################
# Float Type Measurement Clustering
###########################################################################
DIST_MAT_sf=find_norm_dist_matrix(X_Feature[:,sf_idx])
# Find representative set of sensor measurements
min_dist_=np.sqrt(2*(1-(0.9)))
max_dist_=np.sqrt(2*(1-(0.1)))
distmat_input=DIST_MAT_sf
DO_CLUSTERING_TEST=0
if DO_CLUSTERING_TEST==1:
CLUSTERING_TEST(distmat_input,min_corr=0.1,max_corr=0.9)
pack_exemplars_float,pack_labels_float=max_pack_cluster(distmat_input,min_dist=min_dist_,max_dist=max_dist_)
pack_num_clusters_float=int(pack_labels_float.max()+1)
print '-------------------------------------------------------------------------'
print pack_num_clusters_float, 'clusters out of ', len(pack_labels_float), ' float type measurements'
print '-------------------------------------------------------------------------'
validity,intra_dist,inter_dist=compute_cluster_err(distmat_input,pack_labels_float)
print 'validity:',round(validity,2),', intra_dist: ',np.round(intra_dist,2),', inter_dist: ',np.round(inter_dist,2)
print '-------------------------------------------------------------------------'
sf_exemplars_dict={}
sfe_name=list(np.array(sf_name)[pack_exemplars_float])
sfe_idx=np.array(sf_idx)[pack_exemplars_float]
for label_id,(m_idx,exemplar_label) in enumerate(zip(pack_exemplars_float,sfe_name)):
print exemplar_label
children_set=list(set(np.nonzero(pack_labels_float==label_id)[0])-set([m_idx]))
print 'Label ', label_id, ': ',m_idx,'<--', children_set
sf_exemplars_dict.update({exemplar_label:list(np.array(sf_name)[children_set])})
# exemplar index
###########################################################################
# InT Type Measurement Clustering
###########################################################################
DIST_MAT_si=find_norm_dist_matrix(X_Feature[:,si_idx])
# Find representative set of sensor measurements
min_dist_=np.sqrt(2*(1-(0.9)))
max_dist_=np.sqrt(2*(1-(0.1)))
distmat_input=DIST_MAT_si
DO_CLUSTERING_TEST=0
if DO_CLUSTERING_TEST==1:
CLUSTERING_TEST(distmat_input,min_corr=0.1,max_corr=0.9)
pack_exemplars_int,pack_labels_int=max_pack_cluster(distmat_input,min_dist=min_dist_,max_dist=max_dist_)
pack_num_clusters_int=int(pack_labels_int.max()+1)
print '-------------------------------------------------------------------------'
print pack_num_clusters_int, 'clusters out of ', len(pack_labels_int), ' int type measurements'
print '-------------------------------------------------------------------------'
validity,intra_dist,inter_dist=compute_cluster_err(distmat_input,pack_labels_int)
print 'validity:',round(validity,2),', intra_dist: ',np.round(intra_dist,2),', inter_dist: ',np.round(inter_dist,2)
print '-------------------------------------------------------------------------'
si_exemplars_dict={}
sie_name=list(np.array(si_name)[pack_exemplars_int])
sie_idx=np.array(si_idx)[pack_exemplars_int]
for label_id,(m_idx,exemplar_label_int) in enumerate(zip(pack_exemplars_int,sie_name)):
print exemplar_label_int
children_set=list(set(np.nonzero(pack_labels_int==label_id)[0])-set([m_idx]))
print 'Label ', label_id, ': ',m_idx,'<--', children_set
si_exemplars_dict.update({exemplar_label_int:list(np.array(si_name)[children_set])})
# If no data availalbe, then imputes the data by weighted mean
print 'Before imputation'
for i,key in enumerate(data_used):
plt.figure(1)
print key
print [k for k in np.nonzero(X[:,i]==np.infty)[0]]
plt.subplot(len(data_used),1,i+1)
plt.plot(time_slots,X[:,i],'.')
plt.title(key,fontsize=6)
plt.xticks(fontsize=6);plt.yticks(fontsize=6)
# If no data availalbe, then imputes the data by weighted mean
print 'Impute misssing data'
for i,key in enumerate(data_used):
for inf_idx in np.nonzero(X[:,i]==np.infty)[0]:
whgt_bottom_sum=0;whgt_top_sum=0
for h_idx in np.nonzero(hr_set==hr_set[inf_idx])[0]:
#import pdb; pdb.set_trace()
sample_temp=X[h_idx,i]
if (sample_temp<np.infty and h_idx!=inf_idx):
wght=1/np.abs(daycount_set[h_idx]-daycount_set[inf_idx])
whgt_bottom_sum=whgt_bottom_sum+wght
whgt_top_sum=whgt_top_sum+wght*sample_temp
new_sample=whgt_top_sum/whgt_bottom_sum
X[inf_idx,i]=new_sample
# If no data availalbe, then imputes the data by weighted mean
print 'After imputation'
for i,key in enumerate(data_used):
plt.figure(1)
print key
print [k for k in np.nonzero(X[:,i]==np.infty)[0]]
plt.subplot(len(data_used),1,i+1)
plt.plot(time_slots,X[:,i])
plt.title(key,fontsize=6)
plt.xticks(fontsize=6);plt.yticks(fontsize=6)
gmm_labels=gmm.predict(obs)
labels=gmm_labels
#kmean=KMeans(n_clusters=2).fit(obs[:,newaxis])
#labels=kmean.labels_
subplot(3,1,1)
for i in range(num_cluster):
plot(t_new[labels==i]-t_new[0],val_new[labels==i],'s')
title(input_names[k])
subplot(3,1,2)
plot(t_new[1:]-t_new[0],abs(diff(val_new))/max(abs(diff(val_new))))
subplot(3,1,3)
a=diff(val_new)
plot(t_new[1:]-t_new[0],a/max(abs(a)))
#labels=kmean.labels_ len(sie_idx
subplot(2,1,1)
for i in range(opt_num_cluster):
plot(t_new[label==i]-t_new[0],val_new[label==i],'*')
title(input_names[k])
subplot(2,1,2)
plot(t_new[1:]-t_new[0],abs(diff(val_new))/max(abs(diff(val_new))))
plot(t_new[0:50],label[0:50],'s')
#plt.ioff()
# Only do state classification for number of samples greater than
k=0
dt=intpl_intv[k]
# Reference time unit is 5 min, 15 min, 30 min and 1 hour
num_samples_set=np.round(np.array([60*5,60*15,60*30, 60*60 ])*(1/dt))
min_num_samples_for_analysis=2**5
for i,nfft_temp in enumerate(num_samples_set):
if nfft_temp>min_num_samples_for_analysis:
NFFT=int(2**ceil(log2(nfft_temp)));break;
window_duration=NFFT*dt
Fs = (1.0/dt) # the sampling frequency
# Pxx is the segments x freqs array of instantaneous power, freqs is
# the frequency vector, bins are the centers of the time bins in which
# the power is computed, and im is the matplotlib.image.AxesImage
# instance
"""
| gpl-2.0 |
muxiaobai/CourseExercises | python/kaggle/competition/Digit-Recognizer/use-sklearn_knn_svm_NB.py | 1 | 4137 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 16 21:59:00 2014
@author: wepon
@blog:http://blog.csdn.net/u012162613
"""
from numpy import *
import csv
def toInt(array):
array=mat(array)
m,n=shape(array)
newArray=zeros((m,n))
for i in xrange(m):
for j in xrange(n):
newArray[i,j]=int(array[i,j])
return newArray
def nomalizing(array):
m,n=shape(array)
for i in xrange(m):
for j in xrange(n):
if array[i,j]!=0:
array[i,j]=1
return array
def loadTrainData():
l=[]
with open('train.csv') as file:
lines=csv.reader(file)
for line in lines:
l.append(line) #42001*785
l.remove(l[0])
l=array(l)
label=l[:,0]
data=l[:,1:]
return nomalizing(toInt(data)),toInt(label) #label 1*42000 data 42000*784
#return trainData,trainLabel
def loadTestData():
l=[]
with open('test.csv') as file:
lines=csv.reader(file)
for line in lines:
l.append(line)#28001*784
l.remove(l[0])
data=array(l)
return nomalizing(toInt(data)) # data 28000*784
#return testData
def loadTestResult():
l=[]
with open('knn_benchmark.csv') as file:
lines=csv.reader(file)
for line in lines:
l.append(line)#28001*2
l.remove(l[0])
label=array(l)
return toInt(label[:,1]) # label 28000*1
#result是结果列表
#csvName是存放结果的csv文件名
def saveResult(result,csvName):
with open(csvName,'wb') as myFile:
myWriter=csv.writer(myFile)
for i in result:
tmp=[]
tmp.append(i)
myWriter.writerow(tmp)
#调用scikit的knn算法包
from sklearn.neighbors import KNeighborsClassifier
def knnClassify(trainData,trainLabel,testData):
knnClf=KNeighborsClassifier()#default:k = 5,defined by yourself:KNeighborsClassifier(n_neighbors=10)
knnClf.fit(trainData,ravel(trainLabel))
testLabel=knnClf.predict(testData)
saveResult(testLabel,'sklearn_knn_Result.csv')
return testLabel
#调用scikit的SVM算法包
from sklearn import svm
def svcClassify(trainData,trainLabel,testData):
svcClf=svm.SVC(C=5.0) #default:C=1.0,kernel = 'rbf'. you can try kernel:‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’, ‘precomputed’
svcClf.fit(trainData,ravel(trainLabel))
testLabel=svcClf.predict(testData)
saveResult(testLabel,'sklearn_SVC_C=5.0_Result.csv')
return testLabel
#调用scikit的朴素贝叶斯算法包,GaussianNB和MultinomialNB
from sklearn.naive_bayes import GaussianNB #nb for 高斯分布的数据
def GaussianNBClassify(trainData,trainLabel,testData):
nbClf=GaussianNB()
nbClf.fit(trainData,ravel(trainLabel))
testLabel=nbClf.predict(testData)
saveResult(testLabel,'sklearn_GaussianNB_Result.csv')
return testLabel
from sklearn.naive_bayes import MultinomialNB #nb for 多项式分布的数据
def MultinomialNBClassify(trainData,trainLabel,testData):
nbClf=MultinomialNB(alpha=0.1) #default alpha=1.0,Setting alpha = 1 is called Laplace smoothing, while alpha < 1 is called Lidstone smoothing.
nbClf.fit(trainData,ravel(trainLabel))
testLabel=nbClf.predict(testData)
saveResult(testLabel,'sklearn_MultinomialNB_alpha=0.1_Result.csv')
return testLabel
def digitRecognition():
trainData,trainLabel=loadTrainData()
testData=loadTestData()
#使用不同算法
result1=knnClassify(trainData,trainLabel,testData)
result2=svcClassify(trainData,trainLabel,testData)
result3=GaussianNBClassify(trainData,trainLabel,testData)
result4=MultinomialNBClassify(trainData,trainLabel,testData)
#将结果与跟给定的knn_benchmark对比,以result1为例
resultGiven=loadTestResult()
m,n=shape(testData)
different=0 #result1中与benchmark不同的label个数,初始化为0
for i in xrange(m):
if result1[i]!=resultGiven[0,i]:
different+=1
print different
| gpl-2.0 |
henrykironde/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
mtb-za/fatiando | fatiando/gravmag/__init__.py | 1 | 2110 | """
Gravity and magnetics forward modeling, inversion, transformations and
utilities.
Forward modeling
----------------
The forward modeling modules provide ways to calculate the gravitational and
magnetic field of various types of geometric objects:
* :mod:`~fatiando.gravmag.prism`: 3D right rectangular prisms
* :mod:`~fatiando.gravmag.polyprism`: 3D prisms with polygonal horizontal
cross-sections
* :mod:`~fatiando.gravmag.sphere`: Spheres in Cartesian coordinates
* :mod:`~fatiando.gravmag.tesseroid`: Tesseroids (spherical prisms) for
modeling in spherical coordinates
* :mod:`~fatiando.gravmag.talwani`: 2D bodies with polygonal vertical
cross-sections
Inversion
---------
The inversion modules use the forward modeling models and the
:mod:`fatiando.inversion` package to solve potential field inverse problems:
* :mod:`~fatiando.gravmag.basin2d`: 2D inversion of the shape of sedimentary
basins and other outcropping bodies
* :mod:`~fatiando.gravmag.harvester`: 3D inversion of compact bodies by
planting anomalous densities
* :mod:`~fatiando.gravmag.euler`: 3D Euler deconvolution methods to estimate
source location
* :mod:`~fatiando.gravmag.magdir`: Inversion methods to estimate the total
magnetization vector of multiple sources.
Processing
----------
The processing modules offer tools to prepare potential field data before or
after modeling.
* :mod:`~fatiando.gravmag.normal_gravity`: Compute normal gravity and
reductions.
* :mod:`~fatiando.gravmag.eqlayer`: Equivalent layer processing
* :mod:`~fatiando.gravmag.transform`: Potential field transformations,
like upward continuation, derivatives, etc
* :mod:`~fatiando.gravmag.imaging`: Imaging methods for potential fields for
estimating physical property distributions
* :mod:`~fatiando.gravmag.tensor`: Utilities for operating on the gradient
tensor
Interactivity
-------------
Module :mod:`~fatiando.gravmag.interactive` implements matplotlib GUIs and
IPython HTML widgets for interacting with the modeling and processing
functions.
----
"""
from .euler import EulerDeconv, EulerDeconvMW, EulerDeconvEW
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | dynamic_test/Newmark/gamma0.5/post.py | 9 | 2369 | #!/usr/bin/python
#Standard python libs
import sys
import os
# import datetime
import numpy as np
import h5py
import matplotlib.pyplot as plt
from math import *
#Libs related to scipy and matplotlib
from scipy import *
from scipy.fftpack import fft
from scipy.fftpack.helper import fftfreq
sys.path.append("./" )
# time_integrator_analysis was created by Jose.
# Jose's function was used directly here.
from time_integrator_analysis import findpeaks, measure_damping, hht_damping_and_shift
h5in_file_name=sys.argv[1]
gamma=sys.argv[2]
gamma=float(gamma)
h5file_in=h5py.File(h5in_file_name,"r")
# h5file_in=h5py.File("veri_newmark_dynamic.h5.feioutput","r")
disp=h5file_in['/Model/Nodes/Generalized_Displacements'][()]
time=h5file_in['/time'][()]
# The required displacement.
node_displ=disp[6][:]
last=len(node_displ)-1
len_time=len(time)
node_displ=np.delete(node_displ,[last],None)
dt=time[1]-time[0]
peak_indices=findpeaks(node_displ)
measured_period=time[peak_indices[2]]-time[peak_indices[1]]
alpha=0.0
N = node_displ.shape[0]
D = fft(node_displ[:])
f = fftfreq(N, dt)
xi, fs, Ys = measure_damping(f[0:N/2], abs(D[0:N/2]))
T_system=1.0
w = 2*pi/T_system
beta = 0.25*(0.5 + gamma)**2
wbar, xibar = hht_damping_and_shift(beta, gamma, alpha, w, dt)
T_theory = 2*pi/wbar
T_shift = (T_theory - T_system)/T_system*100
print "gamma=", gamma
print "xi=", xibar
print "T_shift=",T_shift
print "\n"
# My own method to calculate the theoretical wbar for Newmark method:
# I got the same result with the pre-existing one.
# dtw=dt*w
# numerator=dtw*sqrt(1+dtw**2*(beta-0.25*(gamma+0.5)**2))
# denominator=1+dtw**2*(beta-0.5*(gamma+0.5))w
# Phi=arctan(numerator/denominator)
# wbarmy=Phi/dt
# wbarmy
# # time=np.transpose(time)
# print ("%16.8f \n" %len(node_displ))
# print ("%16.8f \n" %len_time)
# print ("%16.8f \n" %node_displ[0])
# print ("%16.8f \n" %node_displ[1])
# print ("%16.8f \n" %time[0])
# print ("%16.8f \n" %time[1])
# xi=0.1
# u_0=0.1
# w_n=2*pi
# w_D=w_n*np.sqrt(1-xi**2)
# u_exact=np.exp(-xi*w_n*time)*(u_0*np.cos(time*w_D)+(xi*w_n*u_0)/w_D*np.sin(w_D*time))
# # print("time")
# # , (comma) cannot be ignored.
# u_essi,=plt.plot(time, node_displ,'ro-')
# u_disp,=plt.plot(time, u_exact,'b^--')
# plt.xlabel('Time')
# plt.ylabel('Displacement')
# plt.legend([u_essi, u_disp], ["ESSI", "Exact"])
# plt.show()
| cc0-1.0 |
thientu/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 157 | 13799 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
larsmans/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 26 | 2870 | import numpy as np
from scipy.sparse import csr_matrix
from .... import datasets
from ..unsupervised import silhouette_score
from ... import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
"""Tests the Silhouette Coefficient. """
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
"""Assert Silhouette Coefficient != nan when there is 1 sample in a class.
This tests for the condition that caused issue 960.
"""
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
""" Assert 2 <= n_labels <= nsample -1 """
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
"Number of labels is %d "
"but should be more than 2"
"and less than n_samples - 1" % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
"Number of labels is %d "
"but should be more than 2"
"and less than n_samples - 1" % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
rgommers/statsmodels | statsmodels/regression/mixed_linear_model.py | 1 | 63016 | """
Linear mixed effects models for Statsmodels
The data are partitioned into disjoint groups. The probability model
for group i is:
Y = X*beta + Z*gamma + epsilon
where
* n_i is the number of observations in group i
* Y is a n_i dimensional response vector
* X is a n_i x k_fe dimensional matrix of fixed effects
coefficients
* beta is a k_fe-dimensional vector of fixed effects slopes
* Z is a n_i x k_re dimensional matrix of random effects
coefficients
* gamma is a k_re-dimensional random vector with mean 0
and covariance matrix Psi; note that each group
gets its own independent realization of gamma.
* epsilon is a n_i dimensional vector of iid normal
errors with mean 0 and variance sigma^2; the epsilon
values are independent both within and between groups
Y, X and Z must be entirely observed. beta, Psi, and sigma^2 are
estimated using ML or REML estimation, and gamma and epsilon are
random so define the probability model.
The mean structure is E[Y|X,Z] = X*beta. If only the mean structure
is of interest, GEE is a good alternative to mixed models.
The primary reference for the implementation details is:
MJ Lindstrom, DM Bates (1988). "Newton Raphson and EM algorithms for
linear mixed effects models for repeated measures data". Journal of
the American Statistical Association. Volume 83, Issue 404, pages
1014-1022.
See also this more recent document:
http://econ.ucsb.edu/~doug/245a/Papers/Mixed%20Effects%20Implement.pdf
All the likelihood, gradient, and Hessian calculations closely follow
Lindstrom and Bates.
The following two documents are written more from the perspective of
users:
http://lme4.r-forge.r-project.org/lMMwR/lrgprt.pdf
http://lme4.r-forge.r-project.org/slides/2009-07-07-Rennes/3Longitudinal-4.pdf
Notation:
* `cov_re` is the random effects covariance matrix (referred to above
as Psi) and `scale` is the (scalar) error variance. For a single
group, the marginal covariance matrix of endog given exog is scale*I
+ Z * cov_re * Z', where Z is the design matrix for the random
effects in one group.
Notes:
1. Three different parameterizations are used here in different
places. The regression slopes (usually called `fe_params`) are
identical in all three parameterizations, but the variance parameters
differ. The parameterizations are:
* The "natural parameterization" in which cov(endog) = scale*I + Z *
cov_re * Z', as described above. This is the main parameterization
visible to the user.
* The "profile parameterization" in which cov(endog) = I +
Z * cov_re1 * Z'. This is the parameterization of the profile
likelihood that is maximized to produce parameter estimates.
(see Lindstrom and Bates for details). The "natural" cov_re is
equal to the "profile" cov_re1 times scale.
* The "square root parameterization" in which we work with the
Cholesky factor of cov_re1 instead of cov_re1 directly.
All three parameterizations can be "packed" by concatenating fe_params
together with the lower triangle of the dependence structure. Note
that when unpacking, it is important to either square or reflect the
dependence structure depending on which parameterization is being
used.
2. The situation where the random effects covariance matrix is
singular is numerically challenging. Small changes in the covariance
parameters may lead to large changes in the likelihood and
derivatives.
3. The optimization strategy is to optionally perform a few EM steps,
followed by optionally performing a few steepest descent steps,
followed by conjugate gradient descent using one of the scipy gradient
optimizers. The EM and steepest descent steps are used to get
adequate starting values for the conjugate gradient optimization,
which is much faster.
"""
import numpy as np
import statsmodels.base.model as base
from scipy.optimize import fmin_ncg, fmin_cg, fmin_bfgs, fmin
from scipy.stats.distributions import norm
import pandas as pd
import patsy
from statsmodels.compat.collections import OrderedDict
from statsmodels.compat import range
import warnings
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.base._penalties import Penalty
# This is a global switch to use direct linear algebra calculations
# for solving factor-structured linear systems and calculating
# factor-structured determinants. If False, use the
# Sherman-Morrison-Woodbury update which is more efficient for
# factor-structured matrices. Should be False except when testing.
_no_smw = False
def _smw_solve(s, A, B, BI, rhs):
"""
Solves the system (s*I + A*B*A') * x = rhs for x and returns x.
Parameters:
-----------
s : scalar
See above for usage
A : square symmetric ndarray
See above for usage
B : square symmetric ndarray
See above for usage
BI : square symmetric ndarray
The inverse of `B`. Can be None if B is singular
rhs : ndarray
See above for usage
Returns:
--------
x : ndarray
See above
If the global variable `_no_smw` is True, this routine uses direct
linear algebra calculations. Otherwise it uses the
Sherman-Morrison-Woodbury identity to speed up the calculation.
"""
# Direct calculation
if _no_smw or BI is None:
mat = np.dot(A, np.dot(B, A.T))
mat += s * np.eye(A.shape[0])
return np.linalg.solve(mat, rhs)
# Use SMW identity
qmat = BI + np.dot(A.T, A) / s
u = np.dot(A.T, rhs)
qmat = np.linalg.solve(qmat, u)
qmat = np.dot(A, qmat)
rslt = rhs / s - qmat / s**2
return rslt
def _smw_logdet(s, A, B, BI, B_logdet):
"""
Use the matrix determinant lemma to accelerate the calculation of
the log determinant of s*I + A*B*A'.
Parameters:
-----------
s : scalar
See above for usage
A : square symmetric ndarray
See above for usage
B : square symmetric ndarray
See above for usage
BI : square symmetric ndarray
The inverse of `B`; can be None if B is singular.
B_logdet : real
The log determinant of B
"""
if _no_smw or BI is None:
mat = np.dot(A, np.dot(B, A.T))
mat += s * np.eye(A.shape[0])
_, ld = np.linalg.slogdet(mat)
return ld
p = A.shape[0]
ld = p * np.log(s)
qmat = BI + np.dot(A.T, A) / s
_, ld1 = np.linalg.slogdet(qmat)
return B_logdet + ld + ld1
class MixedLM(base.LikelihoodModel):
"""
An object specifying a linear mixed effects model. Use the `fit`
method to fit the model and obtain a results object.
Arguments:
----------
endog : 1d array-like
The dependent variable
exog : 2d array-like
A matrix of covariates used to determine the
mean structure (the "fixed effects" covariates).
groups : 1d array-like
A vector of labels determining the groups -- data from
different groups are independent
exog_re : 2d array-like
A matrix of covariates used to determine the variance and
covariance structure (the "random effects" covariates). If
None, defaults to a random intercept for each of the groups.
May also be set from a formula using a call to `set_random`.
use_sqrt : bool
If True, optimization is carried out using the lower
triangle of the square root of the random effects
covariance matrix, otherwise it is carried out using the
lower triangle of the random effects covariance matrix.
missing : string
The approach to missing data handling
Notes:
------
The covariates in `exog` and `exog_re` may (but need not)
partially or wholly overlap.
`use_sqrt` should almost always be set to True. The main use case
for use_sqrt=False is when complicated patterns of fixed values in
the covariance structure are set (using the `free` argument to
`fit`) that cannot be expressed in terms of the Cholesky factor L.
"""
def __init__(self, endog, exog, groups, exog_re=None,
use_sqrt=True, missing='none'):
self.use_sqrt = use_sqrt
# Some defaults
self.reml = True
self.fe_pen = None
self.re_pen = None
self.score_pat = 1.
# If there is one covariate, it may be passed in as a column
# vector, convert these to 2d arrays.
# TODO: Can this be moved up in the class hierarchy?
if exog is not None and exog.ndim == 1:
exog = exog[:,None]
if exog_re is not None and exog_re.ndim == 1:
exog_re = exog_re[:,None]
# Calling super creates self.endog, etc. as ndarrays and the
# original exog, endog, etc. are self.data.endog, etc.
super(MixedLM, self).__init__(endog, exog, groups=groups,
exog_re=exog_re, missing=missing)
if exog_re is None:
# Default random effects structure (random intercepts).
self.exog_re = np.ones((len(endog), 1), dtype=np.float64)
self.data.exog_re = self.exog_re
else:
# Process exog_re the same way that exog is handled
# upstream
self.data.exog_re = exog_re
self.exog_re = np.asarray(exog_re)
# Model dimensions
self.k_fe = exog.shape[1] # Number of fixed effects parameters
if exog_re is not None:
# Number of random effect covariates
self.k_re = exog_re.shape[1]
# Number of covariance parameters
self.k_re2 = self.k_re * (self.k_re + 1) // 2
else:
self.k_re = 1 # Default (random intercepts model)
self.k_re2 = 1
# Override the default value
self.nparams = self.k_fe + self.k_re2
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
group_labels = list(set(groups))
group_labels.sort()
row_indices = dict((s, []) for s in group_labels)
for i,g in enumerate(groups):
row_indices[g].append(i)
self.row_indices = row_indices
self.group_labels = group_labels
self.n_groups = len(self.group_labels)
# Split the data by groups
self.endog_li = self.group_list(self.endog)
self.exog_li = self.group_list(self.exog)
self.exog_re_li = self.group_list(self.exog_re)
# The total number of observations, summed over all groups
self.n_totobs = sum([len(y) for y in self.endog_li])
# Set the fixed effects parameter names
if self.exog_names is None:
self.exog_names = ["FE%d" % (k + 1) for k in
range(self.exog.shape[1])]
# Set the random effect parameter names
if isinstance(self.exog_re, pd.DataFrame):
self.exog_re_names = list(self.exog_re.columns)
else:
self.exog_re_names = ["Z%d" % (k+1) for k in
range(self.exog_re.shape[1])]
@classmethod
def from_formula(cls, formula, data, re_formula=None, subset=None,
*args, **kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array-like
The data for the model. See Notes.
re_formula : string
A one-sided formula defining the variance structure of the
model. The default gives a random intercept for each
group.
subset : array-like
An array-like object of booleans, integers, or index
values that indicate the subset of df to use in the
model. Assumes df is a `pandas.DataFrame`
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model.
Returns
-------
model : Model instance
Notes
------
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
If `re_formula` is not provided, the default is a random
intercept for each group.
This method currently does not correctly handle missing
values, so missing values should be explicitly dropped from
the DataFrame before calling this method.
"""
if "groups" not in kwargs.keys():
raise AttributeError("'groups' is a required keyword argument in MixedLM.from_formula")
# If `groups` is a variable name, retrieve the data for the
# groups variable.
if type(kwargs["groups"]) == str:
kwargs["groups"] = np.asarray(data[kwargs["groups"]])
if re_formula is not None:
exog_re = patsy.dmatrix(re_formula, data)
exog_re_names = exog_re.design_info.column_names
exog_re = np.asarray(exog_re)
else:
exog_re = np.ones((data.shape[0], 1),
dtype=np.float64)
exog_re_names = ["Intercept",]
mod = super(MixedLM, cls).from_formula(formula, data,
subset=None,
exog_re=exog_re,
*args, **kwargs)
return mod
def group_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
grouping structure.
"""
if array.ndim == 1:
return [np.array(array[self.row_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.row_indices[k], :])
for k in self.group_labels]
def fit_regularized(self, start_params=None, method='l1', alpha=0,
ceps=1e-4, ptol=1e-6, maxit=200, **fit_args):
"""
Fit a model in which the fixed effects parameters are
penalized. The dependence parameters are held fixed at their
estimated values in the unpenalized model.
Parameters:
-----------
method : string of Penalty object
Method for regularization. If a string, must be 'l1'.
alpha : array-like
Scalar or vector of penalty weights. If a scalar, the
same weight is applied to all coefficients; if a vector,
it contains a weight for each coefficient. If method is a
Penalty object, the weights are scaled by alpha. For L1
regularization, the weights are used directly.
ceps : positive real scalar
Fixed effects parameters smaller than this value
in magnitude are treaded as being zero.
ptol : positive real scalar
Convergence occurs when the sup norm difference
between successive values of `fe_params` is less than
`ptol`.
maxit : integer
The maximum number of iterations.
fit_args :
Additional arguments passed to fit.
Returns:
--------
A MixedLMResults instance containing the results.
Notes:
------
The covariance structure is not updated as the fixed effects
parameters are varied.
The algorithm used here for L1 regularization is a"shooting"
or cyclic coordinate descent algorithm.
If method is 'l1', then `fe_pen` and `cov_pen` are used to
obtain the covariance structure, but are ignored during the
L1-penalized fitting.
References:
-----------
Friedman, J. H., Hastie, T. and Tibshirani, R. Regularized
Paths for Generalized Linear Models via Coordinate
Descent. Journal of Statistical Software, 33(1) (2008)
http://www.jstatsoft.org/v33/i01/paper
http://statweb.stanford.edu/~tibs/stat315a/Supplements/fuse.pdf
"""
if type(method) == str and (method.lower() != 'l1'):
raise ValueError("Invalid regularization method")
# If method is a smooth penalty just optimize directly.
if isinstance(method, Penalty):
fit_args = dict(fit_args)
# Scale the penalty weights by alpha
method.alpha = alpha
fit_args.update({"fe_pen": method})
return self.fit(**fit_args)
if np.isscalar(alpha):
alpha = alpha * np.ones(self.k_fe, dtype=np.float64)
# Fit the unpenalized model to get the dependence structure.
mdf = self.fit(**fit_args)
fe_params = mdf.fe_params
cov_re = mdf.cov_re
scale = mdf.scale
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
for itr in range(maxit):
fe_params_s = fe_params.copy()
for j in range(self.k_fe):
if abs(fe_params[j]) < ceps:
continue
# The residuals
fe_params[j] = 0.
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
# The loss function has the form
# a*x^2 + b*x + pwt*|x|
a, b = 0., 0.
for k, lab in enumerate(self.group_labels):
exog = self.exog_li[k]
ex_r = self.exog_re_li[k]
resid = resid_all[self.row_indices[lab]]
x = exog[:,j]
u = _smw_solve(scale, ex_r, cov_re, cov_re_inv, x)
a += np.dot(u, x)
b -= 2 * np.dot(u, resid)
pwt1 = alpha[j]
if b > pwt1:
fe_params[j] = -(b - pwt1) / (2 * a)
elif b < -pwt1:
fe_params[j] = -(b + pwt1) / (2 * a)
if np.abs(fe_params_s - fe_params).max() < ptol:
break
# Replace the fixed effects estimates with their penalized
# values, leave the dependence parameters in their unpenalized
# state.
params_prof = mdf.params.copy()
params_prof[0:self.k_fe] = fe_params
scale = self.get_scale(fe_params, mdf.cov_re_unscaled)
# Get the Hessian including only the nonzero fixed effects,
# then blow back up to the full size after inverting.
hess = self.hessian(params_prof)
pcov = np.nan * np.ones_like(hess)
ii = np.abs(params_prof) > ceps
ii[self.k_fe:] = True
ii = np.flatnonzero(ii)
hess1 = hess[ii, :][:, ii]
pcov[np.ix_(ii,ii)] = np.linalg.inv(-hess1)
results = MixedLMResults(self, params_prof, pcov / scale)
results.fe_params = fe_params
results.cov_re = cov_re
results.scale = scale
results.cov_re_unscaled = mdf.cov_re_unscaled
results.method = mdf.method
results.converged = True
results.cov_pen = self.cov_pen
results.likeval = self.loglike(params_prof)
results.k_fe = self.k_fe
results.k_re = self.k_re
results.k_re2 = self.k_re2
return results
def hessian(self, params):
"""
Hessian of log-likelihood evaluated at `params`, calculated
numerically.
Parameters
----------
params : array-like
The model parameters, packed into a 1d array.
Returns
-------
The Hessian matrix of the log likelihood function, evaluated
at params.
Notes
-----
The Hessian is calculated with respect to either the lower
triangle of `cov_re` or its lower triangular square root (L)
depending on the value of `use_sqrt`.
`hessian_full` is an analytic Hessian calculation, always
calculated with respect to cov_re
"""
from statsmodels.tools.numdiff import approx_hess_cs
hmat = approx_hess_cs(params, self.loglike)
return hmat.real
def _unpack(self, params, sym=True):
"""
Takes as input the packed parameter vector and returns a
vector containing the regression slopes and a matrix defining
the dependence structure.
Arguments:
----------
params : array-like
The packed parameters
sym : bool
If true, the variance parameters are returned as a symmetric
matrix; if False, the variance parameters are returned as a
lower triangular matrix.
Returns:
--------
params : 1d ndarray
The fixed effects coefficients
cov_re : 2d ndarray
The random effects covariance matrix
"""
fe_params = params[0:self.k_fe]
re_params = params[self.k_fe:]
# Unpack the covariance matrix of the random effects.
# approx_hess_cs uses complex params values, so cov_re needs
# to hold complex values if params is complex
cov_re = np.zeros((self.k_re, self.k_re), dtype=params.dtype)
ix = np.tril_indices(self.k_re)
cov_re[ix] = re_params
if sym:
cov_re = (cov_re + cov_re.T) - np.diag(np.diag(cov_re))
return fe_params, cov_re
def _pack(self, vec, mat):
"""
Packs the model parameters into a single vector.
Arguments
---------
vec : 1d ndarray
A vector
mat : 2d ndarray
An (assumed) symmetric matrix
Returns
-------
params : 1d ndarray
The vector and the lower triangle of the matrix,
concatenated.
"""
ix = np.tril_indices(mat.shape[0])
return np.concatenate((vec, mat[ix]))
def loglike_full(self, params):
"""
Evaluate the (profile) log-likelihood of the linear mixed
effects model, using a parameterization in which the random
effects covariance matrix is represented by its lower
triangle. Note that this is the profile likelihood in which
the scale parameter scale has been profiled out.
Arguments
---------
params : 1d ndarray
The parameter values, packed into a single vector. See
below for details.
Returns
-------
likeval : scalar
The log-likelihood value at `params`.
Notes
-----
The first p elements of the packed vector are the regression
slopes, and the remaining q*(q+1)/2 elements are the lower
triangle of the random effects covariance matrix Psi, packed
row-wise. The matrix Psi is used to form the covariance
matrix V = I + Z * Psi * Z', where Z is the design matrix for
the random effects structure. To convert this to the full
likelihood (not profiled) parameterization, calculate the
error variance scale, and divide Psi by scale.
"""
fe_params, cov_re = self._unpack(params)
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
_, cov_re_logdet = np.linalg.slogdet(cov_re)
# The residuals
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
likeval = 0.
# Handle the covariance penalty
if self.cov_pen is not None:
likeval -= self.cov_pen.func(cov_re, cov_re_inv)
# Handle the fixed effects penalty
if self.fe_pen is not None:
likeval -= self.fe_pen.func(fe_params)
xvx, qf = 0., 0.
for k, lab in enumerate(self.group_labels):
exog = self.exog_li[k]
ex_r = self.exog_re_li[k]
resid = resid_all[self.row_indices[lab]]
# Part 1 of the log likelihood (for both ML and REML)
ld = _smw_logdet(1., ex_r, cov_re, cov_re_inv,
cov_re_logdet)
likeval -= ld / 2.
# Part 2 of the log likelihood (for both ML and REML)
u = _smw_solve(1., ex_r, cov_re, cov_re_inv, resid)
qf += np.dot(resid, u)
# Adjustment for REML
if self.reml:
mat = _smw_solve(1., ex_r, cov_re, cov_re_inv, exog)
xvx += np.dot(exog.T, mat)
if self.reml:
likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.
_,ld = np.linalg.slogdet(xvx)
likeval -= ld / 2.
likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.
likeval += ((self.n_totobs - self.k_fe) *
np.log(self.n_totobs - self.k_fe) / 2.)
likeval -= (self.n_totobs - self.k_fe) / 2.
else:
likeval -= self.n_totobs * np.log(qf) / 2.
likeval -= self.n_totobs * np.log(2 * np.pi) / 2.
likeval += self.n_totobs * np.log(self.n_totobs) / 2.
likeval -= self.n_totobs / 2.
return likeval
def loglike(self, params):
"""
Evaluate the (profile) log-likelihood of the linear mixed
effects model. Note that this is the profile likelihood in
which the scale parameter scale has been profiled out.
Arguments
---------
params : 1d ndarray
The parameter values, packed into a single vector. See
below for details.
Returns
-------
likeval : scalar
The log-likelihood value at `params`.
"""
if self.use_sqrt:
return self.loglike_sqrt(params)
else:
return self.loglike_full(params)
def _gen_dV_dPsi(self, ex_r, max_ix=None):
"""
A generator that yields the derivative of the covariance
matrix V (=I + Z*Psi*Z') with respect to the free elements of
Psi. Each call to the generator yields the index of Psi with
respect to which the derivative is taken, and the derivative
matrix with respect to that element of Psi. Psi is a
symmetric matrix, so the free elements are the lower triangle.
If max_ix is not None, the iterations terminate after max_ix
values are yielded.
"""
jj = 0
for j1 in range(self.k_re):
for j2 in range(j1 + 1):
if max_ix is not None and jj > max_ix:
return
mat = np.outer(ex_r[:,j1], ex_r[:,j2])
if j1 != j2:
mat += mat.T
yield jj,mat
jj += 1
def score_full(self, params):
"""
Calculates the score vector for the mixed effects model, using
a parameterization in which the random effects covariance
matrix is represented by its lower triangle. Note that this
is the score for the profile likelihood in which the scale
parameter scale has been profiled out.
Parameters
----------
params : 1d ndarray
The model parameters in packed form
Returns
-------
scorevec : 1d ndarray
The score vector, calculated at `params`.
"""
fe_params, cov_re = self._unpack(params)
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
score_fe = np.zeros(self.k_fe, dtype=np.float64)
score_re = np.zeros(self.k_re2, dtype=np.float64)
# Handle the covariance penalty.
if self.cov_pen is not None:
score_re -= self.cov_pen.grad(cov_re, cov_re_inv)
# Handle the fixed effects penalty.
if self.fe_pen is not None:
score_fe -= self.fe_pen.grad(fe_params)
# resid' V^{-1} resid, summed over the groups (a scalar)
rvir = 0.
# exog' V^{-1} resid, summed over the groups (a k_fe
# dimensional vector)
xtvir = 0.
# exog' V^{_1} exog, summed over the groups (a k_fe x k_fe
# matrix)
xtvix = 0.
# V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th
# covariance parameter.
xtax = [0.,] * self.k_re2
# Temporary related to the gradient of log |V|
dlv = np.zeros(self.k_re2, dtype=np.float64)
# resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)
rvavr = np.zeros(self.k_re2, dtype=np.float64)
for k in range(self.n_groups):
exog = self.exog_li[k]
ex_r = self.exog_re_li[k]
# The residuals
expval = np.dot(exog, fe_params)
resid = self.endog_li[k] - expval
if self.reml:
viexog = _smw_solve(1., ex_r, cov_re, cov_re_inv, exog)
xtvix += np.dot(exog.T, viexog)
# Contributions to the covariance parameter gradient
jj = 0
vex = _smw_solve(1., ex_r, cov_re, cov_re_inv, ex_r)
vir = _smw_solve(1., ex_r, cov_re, cov_re_inv, resid)
for jj,mat in self._gen_dV_dPsi(ex_r):
dlv[jj] = np.trace(_smw_solve(1., ex_r, cov_re,
cov_re_inv, mat))
rvavr[jj] += np.dot(vir, np.dot(mat, vir))
if self.reml:
xtax[jj] += np.dot(viexog.T, np.dot(mat, viexog))
# Contribution of log|V| to the covariance parameter
# gradient.
score_re -= 0.5 * dlv
# Nededed for the fixed effects params gradient
rvir += np.dot(resid, vir)
xtvir += np.dot(exog.T, vir)
fac = self.n_totobs
if self.reml:
fac -= self.exog.shape[1]
score_fe += fac * xtvir / rvir
score_re += 0.5 * fac * rvavr / rvir
if self.reml:
for j in range(self.k_re2):
score_re[j] += 0.5 * np.trace(np.linalg.solve(
xtvix, xtax[j]))
return np.concatenate((score_fe, score_re))
def score(self, params):
"""
Calculates the score vector for the mixed effects model. Note
that this is the score vector for the profile likelihood in
which the scale parameter scale has been profiled out.
Parameters
----------
params : 1d ndarray
All model parameters in packed form
Returns
-------
scorevec : 1d ndarray
The score vector, calculated at `params`.
"""
if self.use_sqrt:
scr = self.score_pat * self.score_sqrt(params)
else:
scr = self.score_pat * self.score_full(params)
return scr
def loglike_sqrt(self, params):
"""
Returns the log likelihood evaluated at a given point, for the
parameterization in which the random effects covariance matrix
is represented by the lower triangle of its Cholesky factor.
Arguments:
----------
params : array-like
The model parameters (for the profile likelihood) in
packed form. The first p elements are the regression
slopes, and the remaining elements are the lower triangle
of a lower triangular matrix L such that Psi = LL'
Returns:
--------
The value of the log-likelihood or REML criterion.
"""
fe_params, L = self._unpack(params, sym=False)
cov_re = np.dot(L, L.T)
params_r = self._pack(fe_params, cov_re)
likeval = self.loglike_full(params_r)
return likeval
def score_sqrt(self, params):
"""
Returns the score vector evaluated at a given point, using a
parameterization in which the random effects covariance matrix
is represented by the lower triangle of its Cholesky factor.
Arguments:
----------
params : array-like
The model parameters (for the profile likelihood) in
packed form. The first p elements are the regression
slopes, and the remaining elements are the lower triangle
of a lower triangular matrix L such that Psi = LL'
Returns:
--------
The score vector for the log-likelihood or REML criterion.
"""
fe_params, L = self._unpack(params, sym=False)
cov_re = np.dot(L, L.T)
params_f = self._pack(fe_params, cov_re)
svec = self.score_full(params_f)
s_fe, s_re = self._unpack(svec, sym=False)
# Use the chain rule to get d/dL from d/dPsi
s_l = np.zeros(self.k_re2, dtype=np.float64)
jj = 0
for i in range(self.k_re):
for j in range(i+1):
s_l[jj] += np.dot(s_re[:,i], L[:,j])
s_l[jj] += np.dot(s_re[i,:], L[:,j])
jj += 1
gr = np.concatenate((s_fe, s_l))
return gr
def hessian_full(self, params):
"""
Calculates the Hessian matrix for the mixed effects model.
Specifically, this is the Hessian matrix for the profile
likelihood in which the scale parameter scale has been profiled
out. The parameters are passed in packed form, with only the
lower triangle of the covariance passed.
Parameters
----------
params : 1d ndarray
All model parameters in packed form
Returns
-------
hess : 2d ndarray
The Hessian matrix, evaluated at `params`.
"""
fe_params, cov_re = self._unpack(params)
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
# Blocks for the fixed and random effects parameters.
hess_fe = 0.
hess_re = np.zeros((self.k_re2, self.k_re2), dtype=np.float64)
hess_fere = np.zeros((self.k_re2, self.k_fe),
dtype=np.float64)
fac = self.n_totobs
if self.reml:
fac -= self.exog.shape[1]
rvir = 0.
xtvix = 0.
xtax = [0.,] * self.k_re2
B = np.zeros(self.k_re2, dtype=np.float64)
D = np.zeros((self.k_re2, self.k_re2), dtype=np.float64)
F = [[0.,]*self.k_re2 for k in range(self.k_re2)]
for k in range(self.n_groups):
exog = self.exog_li[k]
ex_r = self.exog_re_li[k]
# The residuals
expval = np.dot(exog, fe_params)
resid = self.endog_li[k] - expval
viexog = _smw_solve(1., ex_r, cov_re, cov_re_inv, exog)
xtvix += np.dot(exog.T, viexog)
vir = _smw_solve(1., ex_r, cov_re, cov_re_inv, resid)
rvir += np.dot(resid, vir)
for jj1,mat1 in self._gen_dV_dPsi(ex_r):
hess_fere[jj1,:] += np.dot(viexog.T,
np.dot(mat1, vir))
if self.reml:
xtax[jj1] += np.dot(viexog.T, np.dot(mat1, viexog))
B[jj1] += np.dot(vir, np.dot(mat1, vir))
E = _smw_solve(1., ex_r, cov_re, cov_re_inv, mat1)
for jj2,mat2 in self._gen_dV_dPsi(ex_r, jj1):
Q = np.dot(mat2, E)
Q1 = Q + Q.T
vt = np.dot(vir, np.dot(Q1, vir))
D[jj1, jj2] += vt
if jj1 != jj2:
D[jj2, jj1] += vt
R = _smw_solve(1., ex_r, cov_re, cov_re_inv, Q)
rt = np.trace(R) / 2
hess_re[jj1, jj2] += rt
if jj1 != jj2:
hess_re[jj2, jj1] += rt
if self.reml:
F[jj1][jj2] += np.dot(viexog.T,
np.dot(Q, viexog))
hess_fe -= fac * xtvix / rvir
hess_re -= 0.5 * fac * (D / rvir - np.outer(B, B) / rvir**2)
hess_fere = -fac * hess_fere / rvir
if self.reml:
for j1 in range(self.k_re2):
Q1 = np.linalg.solve(xtvix, xtax[j1])
for j2 in range(j1 + 1):
Q2 = np.linalg.solve(xtvix, xtax[j2])
a = np.trace(np.dot(Q1, Q2))
a -= np.trace(np.linalg.solve(xtvix, F[j1][j2]))
a *= 0.5
hess_re[j1, j2] += a
if j1 > j2:
hess_re[j2, j1] += a
# Put the blocks together to get the Hessian.
m = self.k_fe + self.k_re2
hess = np.zeros((m, m), dtype=np.float64)
hess[0:self.k_fe, 0:self.k_fe] = hess_fe
hess[0:self.k_fe, self.k_fe:] = hess_fere.T
hess[self.k_fe:, 0:self.k_fe] = hess_fere
hess[self.k_fe:, self.k_fe:] = hess_re
return hess
def Estep(self, fe_params, cov_re, scale):
"""
The E-step of the EM algorithm. This is for ML (not REML),
but it seems to be good enough to use for REML starting
values.
Parameters
----------
fe_params : 1d ndarray
The current value of the fixed effect coefficients
cov_re : 2d ndarray
The current value of the covariance matrix of random
effects
scale : positive scalar
The current value of the error variance
Returns
-------
m1x : 1d ndarray
sum_groups X'*Z*E[gamma | Y], where X and Z are the fixed
and random effects covariates, gamma is the random
effects, and Y is the observed data
m1y : scalar
sum_groups Y'*E[gamma | Y]
m2 : 2d ndarray
sum_groups E[gamma * gamma' | Y]
m2xx : 2d ndarray
sum_groups Z'*Z * E[gamma * gamma' | Y]
"""
m1x, m1y, m2, m2xx = 0., 0., 0., 0.
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
for k in range(self.n_groups):
# Get the residuals
expval = np.dot(self.exog_li[k], fe_params)
resid = self.endog_li[k] - expval
# Contruct the marginal covariance matrix for this group
ex_r = self.exog_re_li[k]
vr1 = _smw_solve(scale, ex_r, cov_re, cov_re_inv, resid)
vr1 = np.dot(ex_r.T, vr1)
vr1 = np.dot(cov_re, vr1)
vr2 = _smw_solve(scale, ex_r, cov_re, cov_re_inv,
self.exog_re_li[k])
vr2 = np.dot(vr2, cov_re)
vr2 = np.dot(ex_r.T, vr2)
vr2 = np.dot(cov_re, vr2)
rg = np.dot(ex_r, vr1)
m1x += np.dot(self.exog_li[k].T, rg)
m1y += np.dot(self.endog_li[k].T, rg)
egg = cov_re - vr2 + np.outer(vr1, vr1)
m2 += egg
m2xx += np.dot(np.dot(ex_r.T, ex_r), egg)
return m1x, m1y, m2, m2xx
def EM(self, fe_params, cov_re, scale, niter_em=10,
hist=None):
"""
Run the EM algorithm from a given starting point. This is for
ML (not REML), but it seems to be good enough to use for REML
starting values.
Returns
-------
fe_params : 1d ndarray
The final value of the fixed effects coefficients
cov_re : 2d ndarray
The final value of the random effects covariance
matrix
scale : float
The final value of the error variance
Notes
-----
This uses the parameterization of the likelihood scale*I +
Z'*V*Z, note that this differs from the profile likelihood
used in the gradient calculations.
"""
xxtot = 0.
for x in self.exog_li:
xxtot += np.dot(x.T, x)
xytot = 0.
for x,y in zip(self.exog_li, self.endog_li):
xytot += np.dot(x.T, y)
pp = []
for itr in range(niter_em):
m1x, m1y, m2, m2xx = self.Estep(fe_params, cov_re, scale)
fe_params = np.linalg.solve(xxtot, xytot - m1x)
cov_re = m2 / self.n_groups
scale = 0.
for x,y in zip(self.exog_li, self.endog_li):
scale += np.sum((y - np.dot(x, fe_params))**2)
scale -= 2 * m1y
scale += 2 * np.dot(fe_params, m1x)
scale += np.trace(m2xx)
scale /= self.n_totobs
if hist is not None:
hist.append(["EM", fe_params, cov_re, scale])
return fe_params, cov_re, scale
def get_scale(self, fe_params, cov_re):
"""
Returns the estimated error variance based on given estimates
of the slopes and random effects covariance matrix.
Arguments:
----------
fe_params : array-like
The regression slope estimates
cov_re : 2d array
Estimate of the random effects covariance matrix (Psi).
Returns:
--------
scale : float
The estimated error variance.
"""
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
qf = 0.
for k in range(self.n_groups):
exog = self.exog_li[k]
ex_r = self.exog_re_li[k]
# The residuals
expval = np.dot(exog, fe_params)
resid = self.endog_li[k] - expval
mat = _smw_solve(1., ex_r, cov_re, cov_re_inv, resid)
qf += np.dot(resid, mat)
if self.reml:
qf /= (self.n_totobs - self.k_fe)
else:
qf /= self.n_totobs
return qf
def _steepest_descent(self, func, params, score, gtol=1e-4,
max_iter=50):
"""
Uses the steepest descent algorithm to minimize a function.
Arguments:
----------
func : function
The real-valued function to minimize.
params : array-like
A point in the domain of `func`, used as the starting
point for the iterative minimization.
score : function
A function implementing the score vector (gradient) of
`func`.
gtol : non-negative float
Return if the sup norm of the score vector is less than
this value.
max_iter: non-negative integer
Return once this number of iterations have occured.
Returns:
--------
params_out : array-like
The final value of the iterations
success : bool
True if the final score vector has sup-norm no larger
than `gtol`.
"""
fval = func(params)
if max_iter == False:
return params, False
for itr in range(max_iter):
gro = score(params)
gr = gro / np.max(np.abs(gro))
sl = 1.
success = False
while sl > 1e-20:
params1 = params - sl * gr
fval1 = func(params1)
if fval1 < fval:
params = params1
fval = fval1
success = True
break
sl /= 2
if not success:
break
return params, np.max(np.abs(gro)) < gtol
def _starting_values(self, start_params):
if type(start_params) in [np.ndarray, pd.Series]:
return np.asarray(start_params)
ix = np.tril_indices(self.k_re)
if start_params is None:
start_params = {}
if "fe" in start_params:
fe_params = start_params["fe"]
else:
fe_params = np.zeros(self.exog.shape[1], dtype=np.float64)
if "cov_re_sqrt_unscaled" in start_params:
re_params = start_params["cov_re_sqrt_unscaled"]
if not self.use_sqrt:
mat = np.zeros((self.k_re, self.k_re), dtype=np.float64)
mat[ix] = re_params
mat = np.dot(mat, mat.T)
re_params = mat[ix]
elif "cov_re" in start_params:
cov_re_unscaled = start_params["cov_re"] / start_params["scale"]
if self.use_sqrt:
cov_re_sqrt_unscaled = np.linalg.cholesky(cov_re_unscaled)
re_params = cov_re_sqrt_unscaled[ix]
else:
re_params = cov_re_unscaled[ix]
else:
re_params = np.eye(self.k_re)[ix]
return np.concatenate((fe_params, re_params))
def fit(self, start_params=None, reml=True, niter_sd=1,
niter_em=0, do_cg=True, fe_pen=None, cov_pen=None,
free=None, full_output=False, **kwargs):
"""
Fit a linear mixed model to the data.
Parameters
----------
start_params: array-like or dict
If array-like, it is a 1d vector containing the starting
values in the internal parameterization. If it is a
dictionary, it contains starting values for each component
separately. `start["fe"]` contains starting values for
the fixed effects regression slopes. `start["cov_re"]`
contains the covariance matrix of random effects as found
in the `cov_re` component of MixedLMResults. If
`start["cov_re"]` is provided, then `start["scale"]` must
also be provided (this is the error variance).
Alternatively, the random effects may be specified as
`start["cov_re_sqrt_unscaled"]`, which is the packed lower
triangle of the covariance matrix in the
profile parameterization (in this case scale is not used).
reml : bool
If true, fit according to the REML likelihood, else
fit the standard likelihood using ML.
niter_sd : integer
The number of steepest descent iterations
niter_em : non-negative integer
The number of EM steps. The EM steps always
preceed steepest descent and conjugate gradient
optimization. The EM algorithm implemented here
is for ML estimation.
do_cg : bool
If True, a conjugate gradient algorithm is
used for optimization (following any steepest
descent or EM steps).
cov_pen : CovariancePenalty object
A penalty for the random effects covariance matrix
fe_pen : Penalty object
A penalty on the fixed effects
free : tuple of ndarrays
If not `None`, this is a tuple of length 2 containing 2
0/1 indicator arrays. The first element of `free`
corresponds to the regression slopes and the second
element of `free` corresponds to the random effects
covariance matrix (if `use_sqrt` is False) or it square root
(if `use_sqrt` is True). A 1 in either array indicates that
the corresponding parameter is estimated, a 0 indicates
that it is fixed at its starting value. One use case if
to set free[1] to the identity matrix to estimate a model
with independent random effects.
full_output : bool
If true, attach iteration history to results
Returns
-------
A MixedLMResults instance.
"""
self.reml = reml
self.cov_pen = cov_pen
self.fe_pen = fe_pen
self._set_score_pattern(free)
# Needed for steepest descent
neg_like = lambda x: -self.loglike(x)
neg_score = lambda x: -self.score(x)
if full_output:
hist = []
else:
hist = None
params_prof = self._starting_values(start_params)
success = False
# EM iterations
if niter_em > 0:
if self.use_sqrt:
fe_params, cov_re = self._unpack(params_prof)
cov_re = np.dot(cov_re, cov_re.T)
else:
fe_params, cov_re = self._unpack(params_prof, sym=True)
scale = 1.
fe_params, cov_re, scale = self.EM(fe_params, cov_re, scale,
niter_em, hist)
# Gradient algorithms use a different parameterization
# that profiles out sigma^2.
if self.use_sqrt:
params_prof = self._pack(fe_params, cov_re / scale)
else:
cov_re_rt = np.linalg.cholesky(cov_re / scale)
params_prof = self._pack(fe_params, cov_re_rt)
# Try up to 10 times to make the optimization work, using
# additional steepest descent steps to improve the starting
# values. Usually only one cycle is used.
for cycle in range(10):
# Steepest descent iterations
params_prof, success = self._steepest_descent(neg_like,
params_prof, neg_score,
max_iter=niter_sd)
if success:
break
# Gradient iterations
if do_cg:
try:
fit_args = dict(kwargs)
fit_args["retall"] = hist is not None
if "disp" not in fit_args:
fit_args["disp"] = False
# Only bfgs seems to work for some reason.
fit_args["method"] = "bfgs"
rslt = super(MixedLM, self).fit(start_params=params_prof, **fit_args)
except np.linalg.LinAlgError:
continue
# The optimization succeeded
params_prof = rslt.params
success = True
if hist is not None:
hist.append(rslt.allvecs)
break
if not success:
msg = "Gradient optimization failed."
warnings.warn(msg, ConvergenceWarning)
# Convert to the final parameterization (i.e. undo the square
# root transform of the covariance matrix, and the profiling
# over the error variance).
fe_params, cov_re_ltri = self._unpack(params_prof, sym=False)
if self.use_sqrt:
cov_re_unscaled = np.dot(cov_re_ltri, cov_re_ltri.T)
else:
cov_re_unscaled = cov_re_ltri
scale = self.get_scale(fe_params, cov_re_unscaled)
cov_re = scale * cov_re_unscaled
if np.min(np.abs(np.diag(cov_re))) < 0.01:
msg = "The MLE may be on the boundary of the parameter space."
warnings.warn(msg, ConvergenceWarning)
# Compute the Hessian at the MLE. Note that this is the
# hessian with respet to the random effects covariance matrix
# (not its square root). It is used for obtaining standard
# errors, not for optimization.
params_hess = self._pack(fe_params, cov_re_unscaled)
hess = self.hessian_full(params_hess)
if free is not None:
ii = np.flatnonzero(self.score_pat)
hess1 = hess[ii,:][:,ii]
pcov = np.zeros_like(hess)
pcov[np.ix_(ii,ii)] = np.linalg.inv(-hess1)
else:
pcov = np.linalg.inv(-hess)
# Prepare a results class instance
results = MixedLMResults(self, params_prof, pcov / scale)
results.fe_params = fe_params
results.cov_re = cov_re
results.scale = scale
results.cov_re_unscaled = cov_re_unscaled
results.method = "REML" if self.reml else "ML"
results.converged = success
results.hist = hist
results.reml = self.reml
results.cov_pen = self.cov_pen
results.likeval = -neg_like(params_prof)
results.k_fe = self.k_fe
results.k_re = self.k_re
results.k_re2 = self.k_re2
return results
def _set_score_pattern(self, free):
# TODO: could the pattern be set by a formula?
if free is not None:
pat_slopes = free[0]
ix = np.tril_indices(self.k_re)
pat_cov_re = free[1][ix]
self.score_pat = np.concatenate((pat_slopes, pat_cov_re))
else:
self.score_pat = np.ones(self.nparams)
class MixedLMResults(base.LikelihoodModelResults):
'''
Class to contain results of fitting a linear mixed effects model.
MixedLMResults inherits from statsmodels.LikelihoodModelResults
Parameters
----------
See statsmodels.LikelihoodModelResults
Returns
-------
**Attributes**
model : class instance
Pointer to PHreg model instance that called fit.
normalized_cov_params : array
The sampling covariance matrix of the estimates
fe_params : array
The fitted fixed-effects coefficients
re_params : array
The fitted random-effects covariance matrix
bse_fe : array
The standard errors of the fitted fixed effects coefficients
bse_re : array
The standard errors of the fitted random effects covariance
matrix
See Also
--------
statsmodels.LikelihoodModelResults
'''
def __init__(self, model, params, cov_params):
super(MixedLMResults, self).__init__(model, params,
normalized_cov_params=cov_params)
def bse_fe(self):
"""
Returns the standard errors of the fixed effect regression
coefficients.
"""
p = self.model.exog.shape[1]
return np.sqrt(np.diag(self.cov_params())[0:p])
def bse_re(self):
"""
Returns the standard errors of the variance parameters. Note
that the sampling distribution of variance parameters is
strongly skewed unless the sample size is large, so these
standard errors may not give meaningful confidence intervals
of p-values if used in the usual way.
"""
p = self.model.exog.shape[1]
return np.sqrt(self.scale * np.diag(self.cov_params())[p:])
def ranef(self):
"""
Returns posterior means of all random effects.
Returns:
--------
ranef_dict : dict
A dictionary mapping the distinct values of the `group`
variable to the conditional means of the random effects
given the data.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
ranef_dict = {}
for k in range(self.model.n_groups):
endog = self.model.endog_li[k]
exog = self.model.exog_li[k]
ex_r = self.model.exog_re_li[k]
label = self.model.group_labels[k]
# Get the residuals
expval = np.dot(exog, self.fe_params)
resid = endog - expval
vresid = _smw_solve(self.scale, ex_r, self.cov_re,
cov_re_inv, resid)
ranef_dict[label] = np.dot(self.cov_re,
np.dot(ex_r.T, vresid))
return ranef_dict
def ranef_cov(self):
"""
Returns the conditional covariance matrix of the random
effects for each group.
Returns:
--------
ranef_dict : dict
A dictionary mapping the distinct values of the `group`
variable to the conditional covariance matrix of the
random effects given the data.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
ranef_dict = {}
for k in range(self.model.n_groups):
endog = self.model.endog_li[k]
exog = self.model.exog_li[k]
ex_r = self.model.exog_re_li[k]
label = self.model.group_labels[k]
mat1 = np.dot(ex_r, self.cov_re)
mat2 = _smw_solve(self.scale, ex_r, self.cov_re, cov_re_inv,
mat1)
mat2 = np.dot(mat1.T, mat2)
ranef_dict[label] = self.cov_re - mat2
return ranef_dict
def summary(self, yname=None, xname_fe=None, xname_re=None,
title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname_fe : list of strings, optional
Fixed effects covariate names
xname_re : list of strings, optional
Random effects covariate names
title : string, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
info = OrderedDict()
info["Model:"] = "MixedLM"
if yname is None:
yname = self.model.endog_names
info["No. Observations:"] = str(self.model.n_totobs)
info["No. Groups:"] = str(self.model.n_groups)
gs = np.array([len(x) for x in self.model.endog_li])
info["Min. group size:"] = "%.0f" % min(gs)
info["Max. group size:"] = "%.0f" % max(gs)
info["Mean group size:"] = "%.1f" % np.mean(gs)
info["Dependent Variable:"] = yname
info["Method:"] = self.method
info["Scale:"] = self.scale
info["Likelihood:"] = self.likeval
info["Converged:"] = "Yes" if self.converged else "No"
smry.add_dict(info)
smry.add_title("Mixed Linear Model Regression Results")
float_fmt = "%.3f"
names = list(self.model.exog_names)
sdf = np.nan * np.ones((self.k_fe + self.k_re2, 6),
dtype=np.float64)
# Coefficient estimates
sdf[0:self.k_fe, 0] = self.fe_params
# Standard errors
sdf[0:self.k_fe, 1] =\
np.sqrt(np.diag(self.cov_params()[0:self.k_fe]))
# Z-scores
sdf[0:self.k_fe, 2] = sdf[0:self.k_fe, 0] / sdf[0:self.k_fe, 1]
# p-values
sdf[0:self.k_fe, 3] = 2 * norm.cdf(-np.abs(sdf[0:self.k_fe, 2]))
# Confidence intervals
qm = -norm.ppf(alpha / 2)
sdf[0:self.k_fe, 4] = sdf[0:self.k_fe, 0] - qm * sdf[0:self.k_fe, 1]
sdf[0:self.k_fe, 5] = sdf[0:self.k_fe, 0] + qm * sdf[0:self.k_fe, 1]
# Names for all pairs of random effects
jj = self.k_fe
for i in range(self.k_re):
for j in range(i + 1):
if i == j:
names.append(self.model.exog_re_names[i] + " RE")
else:
names.append(self.model.exog_re_names[j] + " x " +
self.model.exog_re_names[i] + " RE")
sdf[jj, 0] = self.cov_re[i, j]
sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]
jj += 1
sdf = pd.DataFrame(index=names, data=sdf)
sdf.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
for col in sdf.columns:
sdf[col] = [float_fmt % x if np.isfinite(x) else ""
for x in sdf[col]]
smry.add_df(sdf, align='r')
return smry
def profile_re(self, re_ix, num_low=5, dist_low=1., num_high=5,
dist_high=1.):
"""
Calculate a series of values along a 1-dimensional profile
likelihood.
Arguments:
----------
re_ix : integer
The index of the variance parameter for which to construct
a profile likelihood.
num_low : integer
The number of points at which to calculate the likelihood
below the MLE of the parameter of interest.
dist_low : float
The distance below the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
num_high : integer
The number of points at which to calculate the likelihood
abov the MLE of the parameter of interest.
dist_high : float
The distance above the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
Result
------
A matrix with two columns. The first column contains the
values to which the parameter of interest is constrained. The
second column contains the corresponding likelihood values.
"""
model = self.model
p = model.exog.shape[1]
pr = model.exog_re.shape[1]
# Need to permute the variables so that the profiled variable
# is first.
exog_re_li_save = [x.copy() for x in model.exog_re_li]
ix = list(range(pr))
ix[0] = re_ix
ix[re_ix] = 0
for k in range(len(model.exog_re_li)):
model.exog_re_li[k] = model.exog_re_li[k][:,ix]
# Permute the covariance structure to match the permuted data.
ru = self.params[p:]
ik = np.tril_indices(pr)
mat = np.zeros((pr ,pr), dtype=np.float64)
mat[ik] = ru
mat = np.dot(mat, mat.T)
mat = mat[ix,:][:,ix]
ix = np.tril_indices(pr)
re_params = np.linalg.cholesky(mat)[ix]
# Define the values to which the parameter of interest will be
# constrained.
ru0 = re_params[0]
left = np.linspace(ru0 - dist_low, ru0, num_low + 1)
right = np.linspace(ru0, ru0 + dist_high, num_high+1)[1:]
rvalues = np.concatenate((left, right))
# Indicators of which parameters are free and fixed.
free_slopes = np.ones(p, dtype=np.float64)
free_cov_re = np.ones((pr, pr), dtype=np.float64)
free_cov_re[0] = 0
start = {"fe": self.fe_params}
likev = []
for x in rvalues:
re_params[0] = x
start["cov_re_sqrt_unscaled"] = re_params
md1 = model.fit(start=start,
free=(free_slopes, free_cov_re),
reml=self.reml, cov_pen=self.cov_pen)
likev.append([md1.cov_re[0,0], md1.likeval])
likev = np.asarray(likev)
model.exog_re = exog_re_li_save
return likev
| bsd-3-clause |
Moriadry/tensorflow | tensorflow/python/estimator/inputs/inputs.py | 94 | 1290 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility methods to create simple input_fns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.python.estimator.inputs.numpy_io import numpy_input_fn
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long
_allowed_symbols = [
'numpy_input_fn',
'pandas_input_fn'
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
| apache-2.0 |
RachitKansal/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 159 | 10196 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
94KeyboardsSmashed/SLI2017_Hurgus_Madison | Python_Data_Analysis/Accelerometer_Data_Synthesis_Graph.py | 1 | 6559 | <<<<<<< HEAD
# -*- coding: utf-8 -*-
"""
Created on Tue May 9 12:13:27 2017
@author: Hyun-seok
"""
import numpy as np
import math
import matplotlib.pyplot as plt
def mag(x):
#Calculates Magnitude (sqrt(x^2+y^2+z^2))
#Takes list of tuples (x, y, z)
return math.sqrt(sum(float(i)**2 for i in x))
def linesplit(number):
#Parses log data into values into a iterable list
#Takes an integer between 0 and 3. 0 is time, 1 is x, 2 is y, 3 is z.
#Be sure to sanitize the created list to make it readable for the program
return [line.split(",")[number] for line in lines]
def sanitize(lst):
#Gets rid of unnessesary spaces from the log file.
#Use to after linesplit to make lists readable by python
return [s.rstrip() for s in lst]
#Have log.txt in the same folder as this code.
#Name of log has to be same as name in the open function
with open('log.txt') as log:
lines = log.readlines()
rawtime = linesplit(0)
rawX = linesplit(1)
rawY = linesplit(2)
rawZ = linesplit(3)
rawMag = linesplit(4)
time = sanitize(rawtime)
x = sanitize(rawX)
y = sanitize(rawY)
z = sanitize(rawZ)
mag = sanitize(rawMag)
total = zip(x, y, z) #Creates a list of tuples (x,y,z)
total2 = zip(x, y, z) #Ibid. Repeat for anytime you do list comprehensions.
#v is the absolute value of the (magnitude of the data minus 9.81)
v = [abs(mag(val)-9.81) for val in total]
#m(magnitude) is simply the magnitude of the data
m = [mag(val) for val in total2]
#t (transform) is the fast fourier transform of v (google is your friend)
t = np.fft.fft(np.array(v))
#deltatime is the length of the experiment (literaly change in time)
deltatime = float(time[-1]) - float(time[0])
#Data outputs in the shell
print ("Delta Time (s): ")
print (deltatime)
print ("Area Determined by Trapizoid Method (m/s**3):")
print (np.trapz(np.array(v)))
print ("Area Determined by Trapizoid Method/Delta Time (m/s**2): ")
print ((np.trapz(np.array(v)))/deltatime)
print ("Mean Value of Data Set: ")
print (np.mean(np.array(v)))
print ("Standard Diviation of Data Set: ")
print (np.std(np.array(v), dtype=np.float64))
print ("Sum of Graph Range: ")
print (sum(v))
#consult matplotlib libraries, esp pyplot.
plt.figure()
#subplot 1. The fast fourier transforms graphed on a log scale. Green
plt.subplot(3, 1, 1)
plt.semilogy(time, abs(t), 'g')
plt.axhline(y=0, color='b', linestyle='-') #just a line on y=0 for reference
plt.ylabel("Amplitude (normalized log scale)")
plt.xlabel("Frequency (in Hertz)")
#subplot 2. Variable v graphed on a regular graph. Red
plt.subplot(3, 1, 2)
plt.plot(time, v, 'r')
plt.axhline(y=0, color='b', linestyle='-') #just a line on y=0 for reference
plt.ylabel("acceleration m/s**2")
plt.xlabel("time (s)")
#subplot 3. Variable m graphed on a regular graph. Blue
plt.subplot(3, 1, 3)
plt.plot(time, m)
plt.axhline(y=0, color='b', linestyle='-') #just a line on y=0 for reference
plt.ylabel("acceleration m/s**2")
plt.xlabel("time (s)")
plt.show()
=======
# -*- coding: utf-8 -*-
"""
Created on Tue May 9 12:13:27 2017
@author: Hyun-seok
"""
import numpy as np
import math
import matplotlib.pyplot as plt
def mag(x):
#Calculates Magnitude (sqrt(x^2+y^2+z^2))
#Takes list of tuples (x, y, z)
return math.sqrt(sum(float(i)**2 for i in x))
def linesplit(number):
#Parses log data into values into a iterable list
#Takes an integer between 0 and 3. 0 is time, 1 is x, 2 is y, 3 is z.
#Be sure to sanitize the created list to make it readable for the program
return [line.split(",")[number] for line in lines]
def sanitize(lst):
#Gets rid of unnessesary spaces from the log file.
#Use to after linesplit to make lists readable by python
return [s.rstrip() for s in lst]
#Have log.txt in the same folder as this code.
#Name of log has to be same as name in the open function
with open('log.txt') as log:
lines = log.readlines()
rawtime = linesplit(0)
rawX = linesplit(1)
rawY = linesplit(2)
rawZ = linesplit(3)
rawMag = linesplit(4)
time = sanitize(rawtime)
x = sanitize(rawX)
y = sanitize(rawY)
z = sanitize(rawZ)
mag = sanitize(rawMag)
total = zip(x, y, z) #Creates a list of tuples (x,y,z)
total2 = zip(x, y, z) #Ibid. Repeat for anytime you do list comprehensions.
#v is the absolute value of the (magnitude of the data minus 9.81)
v = [abs(mag(val)-9.81) for val in total]
#m(magnitude) is simply the magnitude of the data
m = [mag(val) for val in total2]
#t (transform) is the fast fourier transform of v (google is your friend)
t = np.fft.fft(np.array(v))
#deltatime is the length of the experiment (literaly change in time)
deltatime = float(time[-1]) - float(time[0])
#Data outputs in the shell
print ("Delta Time (s): ")
print (deltatime)
print ("Area Determined by Trapizoid Method (m/s**3):")
print (np.trapz(np.array(v)))
print ("Area Determined by Trapizoid Method/Delta Time (m/s**2): ")
print ((np.trapz(np.array(v)))/deltatime)
print ("Mean Value of Data Set: ")
print (np.mean(np.array(v)))
print ("Standard Diviation of Data Set: ")
print (np.std(np.array(v), dtype=np.float64))
print ("Sum of Graph Range: ")
print (sum(v))
#consult matplotlib libraries, esp pyplot.
plt.figure()
#subplot 1. The fast fourier transforms graphed on a log scale. Green
plt.subplot(3, 1, 1)
plt.semilogy(time, abs(t), 'g')
plt.axhline(y=0, color='b', linestyle='-') #just a line on y=0 for reference
plt.ylabel("Amplitude (normalized log scale)")
plt.xlabel("Frequency (in Hertz)")
#subplot 2. Variable v graphed on a regular graph. Red
plt.subplot(3, 1, 2)
plt.plot(time, v, 'r')
plt.axhline(y=0, color='b', linestyle='-') #just a line on y=0 for reference
plt.ylabel("acceleration m/s**2")
plt.xlabel("time (s)")
#subplot 3. Variable m graphed on a regular graph. Blue
plt.subplot(3, 1, 3)
plt.plot(time, m)
plt.axhline(y=0, color='b', linestyle='-') #just a line on y=0 for reference
plt.ylabel("acceleration m/s**2")
plt.xlabel("time (s)")
plt.show()
>>>>>>> 30f08022b85e2a73a674a63069ca1383533668f0
| mit |
khkaminska/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
pthaike/SFrame | oss_src/unity/python/sframe/data_structures/sgraph.py | 9 | 58636 | """
.. warning:: This product is currently in a beta release. The API reference is
subject to change.
This package defines the GraphLab Create SGraph, Vertex, and Edge objects. The SGraph
is a directed graph, consisting of a set of Vertex objects and Edges that
connect pairs of Vertices. The methods in this module are available from the top
level import of the graphlab package.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from .. import connect as _mt
from ..connect import main as glconnect
from .sframe import SFrame
from .sarray import SArray
from .gframe import GFrame, VERTEX_GFRAME, EDGE_GFRAME
from ..cython.cy_graph import UnityGraphProxy
from ..cython.context import debug_trace as cython_context
from ..util import _make_internal_url
from ..deps import pandas as pd
from ..deps import HAS_PANDAS
import inspect
import copy
## \internal Default column name for vertex id.
_VID_COLUMN = '__id'
## \internal Default column name for source vid.
_SRC_VID_COLUMN = '__src_id'
## \internal Default column name for target vid.
_DST_VID_COLUMN = '__dst_id'
#/**************************************************************************/
#/* */
#/* SGraph Related Classes */
#/* */
#/**************************************************************************/
class Vertex(object):
"""
A vertex object, consisting of a vertex ID and a dictionary of vertex
attributes. The vertex ID can be an integer, string, or float.
Parameters
----------
vid : int or string or float
Vertex ID.
attr : dict, optional
Vertex attributes. A Dictionary of string keys and values with one of
the following types: int, float, string, array of floats.
See Also
--------
Edge, SGraph
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
"""
__slots__ = ['vid', 'attr']
def __init__(self, vid, attr={}, _series=None):
"""__init__(self, vid, attr={})
Construct a new vertex.
"""
if not _series is None:
self.vid = _series[_VID_COLUMN]
self.attr = _series.to_dict()
self.attr.pop(_VID_COLUMN)
else:
self.vid = vid
self.attr = attr
def __repr__(self):
return "V(" + str(self.vid) + ", " + str(self.attr) + ")"
def __str__(self):
return "V(" + str(self.vid) + ", " + str(self.attr) + ")"
class Edge(object):
"""
A directed edge between two Vertex objects. An Edge object consists of a
source vertex ID, a destination vertex ID, and a dictionary of edge
attributes.
Parameters
----------
src_vid : int or string or float
Source vertex ID.
dst_vid : int or string or float
Target vertex ID.
attr : dict
Edge attributes. A Dictionary of string keys and values with one of the
following types: integer, float, string, array of floats.
See Also
--------
Vertex, SGraph
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'vizsla'})]
>>> edges = [Edge(0, 1, attr={'size': 'larger_than'})]
>>> g = SGraph()
>>> g = g.add_vertices(verts).add_edges(edges)
"""
__slots__ = ['src_vid', 'dst_vid', 'attr']
def __init__(self, src_vid, dst_vid, attr={}, _series=None):
"""__init__(self, vid, attr={})
Construct a new edge.
"""
if not _series is None:
self.src_vid = _series[_SRC_VID_COLUMN]
self.dst_vid = _series[_DST_VID_COLUMN]
self.attr = _series.to_dict()
self.attr.pop(_SRC_VID_COLUMN)
self.attr.pop(_DST_VID_COLUMN)
else:
self.src_vid = src_vid
self.dst_vid = dst_vid
self.attr = attr
def __repr__(self):
return ("E(" + str(self.src_vid) + " -> " + str(self.dst_vid) + ", " +
str(self.attr) + ")")
def __str__(self):
return ("E(" + str(self.src_vid) + " -> " + str(self.dst_vid) + ", " +
str(self.attr) + ")")
class SGraph(object):
"""
A scalable graph data structure. The SGraph data structure allows arbitrary
dictionary attributes on vertices and edges, provides flexible vertex and
edge query functions, and seamless transformation to and from
:class:`~graphlab.SFrame`.
There are several ways to create an SGraph. The simplest way is to make an
empty SGraph then add vertices and edges with the :py:func:`add_vertices`
and :py:func:`add_edges` methods. SGraphs can also be created from vertex
and edge lists stored in :class:`~graphlab.SFrames`. Columns of these
SFrames not used as vertex IDs are assumed to be vertex or edge attributes.
Please see the `User Guide
<https://dato.com/learn/userguide/sgraph/sgraph.html>`_
for a more detailed introduction to creating and working with SGraphs.
Parameters
----------
vertices : SFrame, optional
Vertex data. Must include an ID column with the name specified by
`vid_field.` Additional columns are treated as vertex attributes.
edges : SFrame, optional
Edge data. Must include source and destination ID columns as specified
by `src_field` and `dst_field`. Additional columns are treated as edge
attributes.
vid_field : str, optional
The name of vertex ID column in the `vertices` SFrame.
src_field : str, optional
The name of source ID column in the `edges` SFrame.
dst_field : str, optional
The name of destination ID column in the `edges` SFrame.
See Also
--------
SFrame
Notes
-----
- SGraphs are *structurally immutable*. In the example below, the
:func:`~add_vertices` and :func:`~add_edges` commands both return a new
graph; the old graph gets garbage collected.
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
>>> g = g.add_edges(Edge(1, 2))
"""
__slots__ = ['__proxy__', '_vertices', '_edges']
def __init__(self, vertices=None, edges=None, vid_field='__id',
src_field='__src_id', dst_field='__dst_id', _proxy=None):
"""
__init__(vertices=None, edges=None, vid_field='__id', src_field='__src_id', dst_field='__dst_id')
By default, construct an empty graph when vertices and edges are None.
Otherwise construct an SGraph with given vertices and edges.
Parameters
----------
vertices : SFrame, optional
An SFrame containing vertex id columns and optional vertex data
columns.
edges : SFrame, optional
An SFrame containing source and target id columns and optional edge
data columns.
vid_field : str, optional
The name of vertex id column in the `vertices` SFrame.
src_field : str, optional
The name of source id column in the `edges` SFrame.
dst_field : str, optional
The name of target id column in the `edges` SFrame.
"""
if (_proxy is None):
self.__proxy__ = UnityGraphProxy(glconnect.get_client())
if vertices is not None:
self.__proxy__ = self.add_vertices(vertices, vid_field).__proxy__
if edges is not None:
self.__proxy__ = self.add_edges(edges, src_field, dst_field).__proxy__
else:
self.__proxy__ = _proxy
self._vertices = GFrame(self, VERTEX_GFRAME)
self._edges = GFrame(self, EDGE_GFRAME)
def __str__(self):
"""Returns a readable string representation summarizing the graph."""
return "SGraph(%s)" % str(self.summary())
def __repr__(self):
"""Returns a readable string representation summarizing the graph."""
return "SGraph(%s)\nVertex Fields:%s\nEdge Fields:%s" % \
(str(self.summary()), str(self.get_vertex_fields()), str(self.get_edge_fields()))
def __copy__(self):
return SGraph(_proxy=self.__proxy__)
def copy(self):
"""
Returns a shallow copy of the SGraph.
"""
return self.__copy__()
@property
def vertices(self):
"""
Special vertex SFrame of the SGraph. Modifying the contents of this
SFrame changes the vertex data of the SGraph. To preserve the graph
structure, the ``__id`` column of this SFrame is read-only.
See Also
--------
edges
Examples
--------
>>> from graphlab import SGraph, Vertex
>>> g = SGraph().add_vertices([Vertex('cat', {'fluffy': 1}),
Vertex('dog', {'fluffy': 1, 'woof': 1}),
Vertex('hippo', {})])
Copy the 'woof' vertex attribute into a new 'bark' vertex attribute:
>>> g.vertices['bark'] = g.vertices['woof']
Remove the 'woof' attribute:
>>> del g.vertices['woof']
Create a new field 'likes_fish':
>>> g.vertices['likes_fish'] = g.vertices['__id'] == 'cat'
+-------+--------+------+------------+
| __id | fluffy | bark | likes_fish |
+-------+--------+------+------------+
| dog | 1.0 | 1.0 | 0 |
| cat | 1.0 | nan | 1 |
| hippo | nan | nan | 0 |
+-------+--------+------+------------+
Replace missing values with zeros:
>>> for col in g.vertices.column_names():
... if col != '__id':
... g.vertices.fillna(col, 0)
+-------+--------+------+------------+
| __id | fluffy | bark | likes_fish |
+-------+--------+------+------------+
| dog | 1.0 | 1.0 | 0 |
| cat | 1.0 | 0.0 | 1 |
| hippo | 0.0 | 0.0 | 0 |
+-------+--------+------+------------+
"""
_mt._get_metric_tracker().track('sgraph.vertices')
return self._vertices
@property
def edges(self):
"""
Special edge SFrame of the SGraph. Modifying the contents of this SFrame
changes the edge data of the SGraph. To preserve the graph structure,
the ``__src_id``, and ``__dst_id`` columns of this SFrame are read-only.
See Also
--------
vertices
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> g = g.add_vertices([Vertex(x) for x in ['cat', 'dog', 'fossa']])
>>> g = g.add_edges([Edge('cat', 'dog', attr={'relationship': 'dislikes'}),
Edge('dog', 'cat', attr={'relationship': 'likes'}),
Edge('dog', 'fossa', attr={'relationship': 'likes'})])
>>> g.edges['size'] = ['smaller than', 'larger than', 'equal to']
+----------+----------+--------------+--------------+
| __src_id | __dst_id | relationship | size |
+----------+----------+--------------+--------------+
| cat | dog | dislikes | smaller than |
| dog | cat | likes | larger than |
| dog | fossa | likes | equal to |
+----------+----------+--------------+--------------+
"""
_mt._get_metric_tracker().track('sgraph.edges')
return self._edges
def summary(self):
"""
Return the number of vertices and edges as a dictionary.
Returns
-------
out : dict
A dictionary containing the number of vertices and edges.
See Also
--------
show, vertices, edges
Examples
--------
>>> from graphlab import SGraph, Vertex
>>> g = SGraph().add_vertices([Vertex(i) for i in range(10)])
>>> n_vertex = g.summary()['num_vertices']
10
>>> n_edge = g.summary()['num_edges']
0
"""
_mt._get_metric_tracker().track('sgraph.summary')
ret = self.__proxy__.summary()
return dict(ret.items())
def get_vertices(self, ids=[], fields={}, format='sframe'):
"""
get_vertices(self, ids=list(), fields={}, format='sframe')
Return a collection of vertices and their attributes.
Parameters
----------
ids : list [int | float | str] or SArray
List of vertex IDs to retrieve. Only vertices in this list will be
returned. Also accepts a single vertex id.
fields : dict | pandas.DataFrame
Dictionary specifying equality constraint on field values. For
example ``{'gender': 'M'}``, returns only vertices whose 'gender'
field is 'M'. ``None`` can be used to designate a wild card. For
example, {'relationship': None} will find all vertices with the
field 'relationship' regardless of the value.
format : {'sframe', 'list'}
Output format. The SFrame output (default) contains a column
``__src_id`` with vertex IDs and a column for each vertex attribute.
List output returns a list of Vertex objects.
Returns
-------
out : SFrame or list [Vertex]
An SFrame or list of Vertex objects.
See Also
--------
vertices, get_edges
Examples
--------
Return all vertices in the graph.
>>> from graphlab import SGraph, Vertex
>>> g = SGraph().add_vertices([Vertex(0, attr={'gender': 'M'}),
Vertex(1, attr={'gender': 'F'}),
Vertex(2, attr={'gender': 'F'})])
>>> g.get_vertices()
+------+--------+
| __id | gender |
+------+--------+
| 0 | M |
| 2 | F |
| 1 | F |
+------+--------+
Return vertices 0 and 2.
>>> g.get_vertices(ids=[0, 2])
+------+--------+
| __id | gender |
+------+--------+
| 0 | M |
| 2 | F |
+------+--------+
Return vertices with the vertex attribute "gender" equal to "M".
>>> g.get_vertices(fields={'gender': 'M'})
+------+--------+
| __id | gender |
+------+--------+
| 0 | M |
+------+--------+
"""
_mt._get_metric_tracker().track('sgraph.get_vertices')
if not hasattr(ids, '__iter__'):
ids = [ids]
if type(ids) not in (list, SArray):
raise TypeError('ids must be list or SArray type')
with cython_context():
sf = SFrame(_proxy=self.__proxy__.get_vertices(ids, fields))
if (format == 'sframe'):
return sf
elif (format == 'dataframe'):
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
if sf.num_rows() == 0:
return pd.DataFrame()
else:
df = sf.head(sf.num_rows()).to_dataframe()
return df.set_index('__id')
elif (format == 'list'):
return _dataframe_to_vertex_list(sf.to_dataframe())
else:
raise ValueError("Invalid format specifier")
def get_edges(self, src_ids=[], dst_ids=[], fields={}, format='sframe'):
"""
get_edges(self, src_ids=list(), dst_ids=list(), fields={}, format='sframe')
Return a collection of edges and their attributes. This function is used
to find edges by vertex IDs, filter on edge attributes, or list in-out
neighbors of vertex sets.
Parameters
----------
src_ids, dst_ids : list or SArray, optional
Parallel arrays of vertex IDs, with each pair corresponding to an
edge to fetch. Only edges in this list are returned. ``None`` can be
used to designate a wild card. For instance, ``src_ids=[1, 2,
None]``, ``dst_ids=[3, None, 5]`` will fetch the edge 1->3, all
outgoing edges of 2 and all incoming edges of 5. src_id and dst_id
may be left empty, which implies an array of all wild cards.
fields : dict, optional
Dictionary specifying equality constraints on field values. For
example, ``{'relationship': 'following'}``, returns only edges whose
'relationship' field equals 'following'. ``None`` can be used as a
value to designate a wild card. e.g. ``{'relationship': None}`` will
find all edges with the field 'relationship' regardless of the
value.
format : {'sframe', 'list'}, optional
Output format. The 'sframe' output (default) contains columns
__src_id and __dst_id with edge vertex IDs and a column for each
edge attribute. List output returns a list of Edge objects.
Returns
-------
out : SFrame | list [Edge]
An SFrame or list of edges.
See Also
--------
edges, get_vertices
Examples
--------
Return all edges in the graph.
>>> from graphlab import SGraph, Edge
>>> g = SGraph().add_edges([Edge(0, 1, attr={'rating': 5}),
Edge(0, 2, attr={'rating': 2}),
Edge(1, 2)])
>>> g.get_edges(src_ids=[None], dst_ids=[None])
+----------+----------+--------+
| __src_id | __dst_id | rating |
+----------+----------+--------+
| 0 | 2 | 2 |
| 0 | 1 | 5 |
| 1 | 2 | None |
+----------+----------+--------+
Return edges with the attribute "rating" of 5.
>>> g.get_edges(fields={'rating': 5})
+----------+----------+--------+
| __src_id | __dst_id | rating |
+----------+----------+--------+
| 0 | 1 | 5 |
+----------+----------+--------+
Return edges 0 --> 1 and 1 --> 2 (if present in the graph).
>>> g.get_edges(src_ids=[0, 1], dst_ids=[1, 2])
+----------+----------+--------+
| __src_id | __dst_id | rating |
+----------+----------+--------+
| 0 | 1 | 5 |
| 1 | 2 | None |
+----------+----------+--------+
"""
_mt._get_metric_tracker().track('sgraph.get_edges')
if not hasattr(src_ids, '__iter__'):
src_ids = [src_ids]
if not hasattr(dst_ids, '__iter__'):
dst_ids = [dst_ids]
if type(src_ids) not in (list, SArray):
raise TypeError('src_ids must be list or SArray type')
if type(dst_ids) not in (list, SArray):
raise TypeError('dst_ids must be list or SArray type')
# implicit Nones
if len(src_ids) == 0 and len(dst_ids) > 0:
src_ids = [None] * len(dst_ids)
# implicit Nones
if len(dst_ids) == 0 and len(src_ids) > 0:
dst_ids = [None] * len(src_ids)
with cython_context():
sf = SFrame(_proxy=self.__proxy__.get_edges(src_ids, dst_ids, fields))
if (format == 'sframe'):
return sf
if (format == 'dataframe'):
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
if sf.num_rows() == 0:
return pd.DataFrame()
else:
return sf.head(sf.num_rows()).to_dataframe()
elif (format == 'list'):
return _dataframe_to_edge_list(sf.to_dataframe())
else:
raise ValueError("Invalid format specifier")
def add_vertices(self, vertices, vid_field=None):
"""
Add vertices to the SGraph. Vertices should be input as a list of
:class:`~graphlab.Vertex` objects, an :class:`~graphlab.SFrame`, or a
pandas DataFrame. If vertices are specified by SFrame or DataFrame,
``vid_field`` specifies which column contains the vertex ID. Remaining
columns are assumed to hold additional vertex attributes. If these
attributes are not already present in the graph's vertex data, they are
added, with existing vertices acquiring the value ``None``.
Parameters
----------
vertices : Vertex | list [Vertex] | pandas.DataFrame | SFrame
Vertex data. If the vertices are in an SFrame or DataFrame, then
``vid_field`` specifies the column containing the vertex IDs.
Additional columns are treated as vertex attributes.
vid_field : string, optional
Column in the DataFrame or SFrame to use as vertex ID. Required if
vertices is an SFrame. If ``vertices`` is a DataFrame and
``vid_field`` is not specified, the row index is used as vertex ID.
Returns
-------
out : SGraph
A new SGraph with vertices added.
See Also
--------
vertices, SFrame, add_edges
Notes
-----
- If vertices are added with indices that already exist in the graph,
they are overwritten completely. All attributes for these vertices
will conform to the specification in this method.
Examples
--------
>>> from graphlab import SGraph, Vertex, SFrame
>>> g = SGraph()
Add a single vertex.
>>> g = g.add_vertices(Vertex(0, attr={'breed': 'labrador'}))
Add a list of vertices.
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
Add vertices from an SFrame.
>>> sf_vert = SFrame({'id': [0, 1, 2], 'breed':['lab', 'lab', 'vizsla']})
>>> g = g.add_vertices(sf_vert, vid_field='id')
"""
_mt._get_metric_tracker().track('sgraph.add_vertices')
sf = _vertex_data_to_sframe(vertices, vid_field)
with cython_context():
proxy = self.__proxy__.add_vertices(sf.__proxy__, _VID_COLUMN)
return SGraph(_proxy=proxy)
def add_edges(self, edges, src_field=None, dst_field=None):
"""
Add edges to the SGraph. Edges should be input as a list of
:class:`~graphlab.Edge` objects, an :class:`~graphlab.SFrame`, or a
Pandas DataFrame. If the new edges are in an SFrame or DataFrame, then
``src_field`` and ``dst_field`` are required to specify the columns that
contain the source and destination vertex IDs; additional columns are
treated as edge attributes. If these attributes are not already present
in the graph's edge data, they are added, with existing edges acquiring
the value ``None``.
Parameters
----------
edges : Edge | list [Edge] | pandas.DataFrame | SFrame
Edge data. If the edges are in an SFrame or DataFrame, then
``src_field`` and ``dst_field`` are required to specify the columns
that contain the source and destination vertex IDs. Additional
columns are treated as edge attributes.
src_field : string, optional
Column in the SFrame or DataFrame to use as source vertex IDs. Not
required if ``edges`` is a list.
dst_field : string, optional
Column in the SFrame or Pandas DataFrame to use as destination
vertex IDs. Not required if ``edges`` is a list.
Returns
-------
out : SGraph
A new SGraph with `edges` added.
See Also
--------
edges, SFrame, add_vertices
Notes
-----
- If an edge is added whose source and destination IDs match edges that
already exist in the graph, a new edge is added to the graph. This
contrasts with :py:func:`add_vertices`, which overwrites existing
vertices.
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge, SFrame
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'breed': 'labrador'}),
Vertex(1, attr={'breed': 'labrador'}),
Vertex(2, attr={'breed': 'vizsla'})]
>>> g = g.add_vertices(verts)
Add a single edge.
>>> g = g.add_edges(Edge(1, 2))
Add a list of edges.
>>> g = g.add_edges([Edge(0, 2), Edge(1, 2)])
Add edges from an SFrame.
>>> sf_edge = SFrame({'source': [0, 1], 'dest': [2, 2]})
>>> g = g.add_edges(sf_edge, src_field='source', dst_field='dest')
"""
_mt._get_metric_tracker().track('sgraph.add_edges')
sf = _edge_data_to_sframe(edges, src_field, dst_field)
with cython_context():
proxy = self.__proxy__.add_edges(sf.__proxy__, _SRC_VID_COLUMN, _DST_VID_COLUMN)
return SGraph(_proxy=proxy)
def get_fields(self):
"""
Return a list of vertex and edge attribute fields in the SGraph. If a
field is common to both vertex and edge attributes, it will show up
twice in the returned list.
Returns
-------
out : list
Names of fields contained in the vertex or edge data.
See Also
--------
get_vertex_fields, get_edge_fields
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'name': 'alex'}),
Vertex(1, attr={'name': 'barbara'})]
>>> g = g.add_vertices(verts)
>>> g = g.add_edges(Edge(0, 1, attr={'frequency': 6}))
>>> fields = g.get_fields()
['__id', 'name', '__src_id', '__dst_id', 'frequency']
"""
_mt._get_metric_tracker().track('sgraph.get_fields')
return self.get_vertex_fields() + self.get_edge_fields()
def get_vertex_fields(self):
"""
Return a list of vertex attribute fields in the SGraph.
Returns
-------
out : list
Names of fields contained in the vertex data.
See Also
--------
get_fields, get_edge_fields
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'name': 'alex'}),
Vertex(1, attr={'name': 'barbara'})]
>>> g = g.add_vertices(verts)
>>> g = g.add_edges(Edge(0, 1, attr={'frequency': 6}))
>>> fields = g.get_vertex_fields()
['__id', 'name']
"""
_mt._get_metric_tracker().track('sgraph.')
with cython_context():
return self.__proxy__.get_vertex_fields()
def get_edge_fields(self):
"""
Return a list of edge attribute fields in the graph.
Returns
-------
out : list
Names of fields contained in the vertex data.
See Also
--------
get_fields, get_vertex_fields
Examples
--------
>>> from graphlab import SGraph, Vertex, Edge
>>> g = SGraph()
>>> verts = [Vertex(0, attr={'name': 'alex'}),
Vertex(1, attr={'name': 'barbara'})]
>>> g = g.add_vertices(verts)
>>> g = g.add_edges(Edge(0, 1, attr={'frequency': 6}))
>>> fields = g.get_vertex_fields()
['__src_id', '__dst_id', 'frequency']
"""
_mt._get_metric_tracker().track('sgraph.get_edge_fields')
with cython_context():
return self.__proxy__.get_edge_fields()
def select_fields(self, fields):
"""
Return a new SGraph with only the selected fields. Other fields are
discarded, while fields that do not exist in the SGraph are ignored.
Parameters
----------
fields : string | list [string]
A single field name or a list of field names to select.
Returns
-------
out : SGraph
A new graph whose vertex and edge data are projected to the selected
fields.
See Also
--------
get_fields, get_vertex_fields, get_edge_fields
Examples
--------
>>> from graphlab import SGraph, Vertex
>>> verts = [Vertex(0, attr={'breed': 'labrador', 'age': 5}),
Vertex(1, attr={'breed': 'labrador', 'age': 3}),
Vertex(2, attr={'breed': 'vizsla', 'age': 8})]
>>> g = SGraph()
>>> g = g.add_vertices(verts)
>>> g2 = g.select_fields(fields=['breed'])
"""
_mt._get_metric_tracker().track('sgraph.select_fields')
if (type(fields) is str):
fields = [fields]
if not isinstance(fields, list) or not all(type(x) is str for x in fields):
raise TypeError('\"fields\" must be a str or list[str]')
vfields = self.__proxy__.get_vertex_fields()
efields = self.__proxy__.get_edge_fields()
selected_vfields = []
selected_efields = []
for f in fields:
found = False
if f in vfields:
selected_vfields.append(f)
found = True
if f in efields:
selected_efields.append(f)
found = True
if not found:
raise ValueError('Field %s not in graph' % f)
with cython_context():
proxy = self.__proxy__
proxy = proxy.select_vertex_fields(selected_vfields)
proxy = proxy.select_edge_fields(selected_efields)
return SGraph(_proxy=proxy)
def triple_apply(self, triple_apply_fn, mutated_fields, input_fields=None):
'''
Apply a transform function to each edge and its associated source and
target vertices in parallel. Each edge is visited once and in parallel.
Modification to vertex data is protected by lock. The effect on the
returned SGraph is equivalent to the following pseudocode:
>>> PARALLEL FOR (source, edge, target) AS triple in G:
... LOCK (triple.source, triple.target)
... (source, edge, target) = triple_apply_fn(triple)
... UNLOCK (triple.source, triple.target)
... END PARALLEL FOR
Parameters
----------
triple_apply_fn : function : (dict, dict, dict) -> (dict, dict, dict)
The function to apply to each triple of (source_vertex, edge,
target_vertex). This function must take as input a tuple of
(source_data, edge_data, target_data) and return a tuple of
(new_source_data, new_edge_data, new_target_data). All variables in
the both tuples must be of dict type.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
mutated_fields : list[str] | str
Fields that ``triple_apply_fn`` will mutate. Note: columns that are
actually mutated by the triple apply function but not specified in
``mutated_fields`` will have undetermined effects.
input_fields : list[str] | str, optional
Fields that ``triple_apply_fn`` will have access to.
The default is ``None``, which grants access to all fields.
``mutated_fields`` will always be included in ``input_fields``.
Returns
-------
out : SGraph
A new SGraph with updated vertex and edge data. Only fields
specified in the ``mutated_fields`` parameter are updated.
Notes
-----
- ``triple_apply`` does not currently support creating new fields in the
lambda function.
Examples
--------
Import graphlab-create and set up the graph.
>>> edges = graphlab.SFrame({'source': range(9), 'dest': range(1, 10)})
>>> g = graphlab.SGraph()
>>> g = g.add_edges(edges, src_field='source', dst_field='dest')
>>> g.vertices['degree'] = 0
Define the function to apply to each (source_node, edge, target_node)
triple.
>>> def degree_count_fn (src, edge, dst):
src['degree'] += 1
dst['degree'] += 1
return (src, edge, dst)
Apply the function to the SGraph.
>>> g = g.triple_apply(degree_count_fn, mutated_fields=['degree'])
Using native toolkit extension function:
.. code-block:: c++
#include <graphlab/sdk/toolkit_function_macros.hpp>
#include <vector>
using namespace graphlab;
std::vector<variant_type> connected_components_parameterized(
std::map<std::string, flexible_type>& src,
std::map<std::string, flexible_type>& edge,
std::map<std::string, flexible_type>& dst,
std::string column) {
if (src[column] < dst[column]) dst[column] = src[column];
else src[column] = dst[column];
return {to_variant(src), to_variant(edge), to_variant(dst)};
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(connected_components_parameterized, "src", "edge", "dst", "column");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> from example import connected_components_parameterized as cc
>>> e = gl.SFrame({'__src_id':[1,2,3,4,5], '__dst_id':[3,1,2,5,4]})
>>> g = gl.SGraph().add_edges(e)
>>> g.vertices['cid'] = g.vertices['__id']
>>> for i in range(2):
... g = g.triple_apply(lambda src, edge, dst: cc(src, edge, dst, 'cid'), ['cid'], ['cid'])
>>> g.vertices['cid']
dtype: int
Rows: 5
[4, 1, 1, 1, 4]
'''
_mt._get_metric_tracker().track('sgraph.triple_apply')
assert inspect.isfunction(triple_apply_fn), "Input must be a function"
if not (type(mutated_fields) is list or type(mutated_fields) is str):
raise TypeError('mutated_fields must be str or list of str')
if not (input_fields is None or type(input_fields) is list or type(input_fields) is str):
raise TypeError('input_fields must be str or list of str')
if type(mutated_fields) == str:
mutated_fields = [mutated_fields]
if len(mutated_fields) is 0:
raise ValueError('mutated_fields cannot be empty')
for f in ['__id', '__src_id', '__dst_id']:
if f in mutated_fields:
raise ValueError('mutated_fields cannot contain %s' % f)
all_fields = self.get_fields()
if not set(mutated_fields).issubset(set(all_fields)):
extra_fields = list(set(mutated_fields).difference(set(all_fields)))
raise ValueError('graph does not contain fields: %s' % str(extra_fields))
# select input fields
if input_fields is None:
input_fields = self.get_fields()
elif type(input_fields) is str:
input_fields = [input_fields]
# make input fields a superset of mutated_fields
input_fields_set = set(input_fields + mutated_fields)
input_fields = [x for x in self.get_fields() if x in input_fields_set]
g = self.select_fields(input_fields)
nativefn = None
try:
from .. import extensions
nativefn = extensions._build_native_function_call(triple_apply_fn)
except:
# failure are fine. we just fall out into the next few phases
pass
if nativefn is not None:
with cython_context():
return SGraph(_proxy=g.__proxy__.lambda_triple_apply_native(nativefn, mutated_fields))
else:
with cython_context():
return SGraph(_proxy=g.__proxy__.lambda_triple_apply(triple_apply_fn, mutated_fields))
def save(self, filename, format='auto'):
"""
Save the SGraph to disk. If the graph is saved in binary format, the
graph can be re-loaded using the :py:func:`load_sgraph` method.
Alternatively, the SGraph can be saved in JSON format for a
human-readable and portable representation.
Parameters
----------
filename : string
Filename to use when saving the file. It can be either a local or
remote url.
format : {'auto', 'binary', 'json'}, optional
File format. If not specified, the format is detected automatically
based on the filename. Note that JSON format graphs cannot be
re-loaded with :py:func:`load_sgraph`.
See Also
--------
load_sgraph
Examples
--------
>>> g = graphlab.SGraph()
>>> g = g.add_vertices([graphlab.Vertex(i) for i in range(5)])
Save and load in binary format.
>>> g.save('mygraph')
>>> g2 = graphlab.load_graph('mygraph')
Save in JSON format.
>>> g.save('mygraph.json', format='json')
"""
_mt._get_metric_tracker().track('sgraph.save')
if format is 'auto':
if filename.endswith(('.json', '.json.gz')):
format = 'json'
else:
format = 'binary'
if format not in ['binary', 'json', 'csv']:
raise ValueError('Invalid format: %s. Supported formats are: %s'
% (format, ['binary', 'json', 'csv']))
with cython_context():
self.__proxy__.save_graph(_make_internal_url(filename), format)
def show(self, vlabel=None, vlabel_hover=False, vcolor=[0.522, 0.741, 0.],
highlight={}, highlight_color=[0.69, 0., 0.498], node_size=300,
elabel=None, elabel_hover=False, ecolor=[0.37, 0.33, 0.33],
ewidth=1, v_offset=0.03, h_offset=0., arrows=False,
vertex_positions=None):
"""
show(vlabel=None, vlabel_hover=False, vcolor=[0.522, 0.741, 0.], highlight={}, highlight_color=[0.69, 0., 0.498], node_size=300, elabel=None, elabel_hover=False, ecolor=[0.37, 0.33, 0.33], ewidth=1, v_offset=0.03, h_offset=0., arrows=False, vertex_positions=None)
Visualize the SGraph with GraphLab Create :mod:`~graphlab.canvas`. This
function starts Canvas if it is not already running. If the graph has
already been plotted, this function will update the plot. SGraphs must
have fewer than 1,000 edges and 1,000 vertices to be visualized in
Canvas.
Parameters
----------
vlabel : string, optional
Field name for the label on each vertex. The default is None,
which omits vertex labels. Set to 'id' to use the vertex ID as the
label.
vlabel_hover : bool, optional
If True, vertex labels, if specified, appear only on mouse hover.
Otherwise (the default), vertex labels, if specified are always
visible.
vcolor : list[float], optional
RGB triple for the primary vertex color. Default is green
([0.522, 0.741, 0.]).
highlight : dict or list or SArray, optional
As a dict, mapping of Vertex ID to RGB color triple (list of float,
as in vcolor).
As a list or SArray (DEPRECATED): Vertex IDs to highlight in
a different color.
highlight_color : list[float], optional
RGB triple for the color of highlighted vertices, when the
highlighted parameter is a list or SArray. Default is fuchsia
([0.69, 0.,0.498]). For fine-grained control over vertex coloring,
use the `highlight` parameter with a dictionary of Vertex IDs and
color values.
node_size : int, optional
Size of plotted vertices.
elabel : string, optional
Field name for the label on each edge.
elabel_hover : bool, optional
If True, edge labels, if specified, appear only on mouse hover.
Otherwise (the default), specified edge labels are always visible.
ecolor : string, optional
RGB triple for edge color. Default is gray ([0.37, 0.33, 0.33]).
ewidth : int, optional
Edge width.
v_offset : float, optional
Vertical offset of vertex labels, as a fraction of total plot
height. For example, the default of 0.03 moves the label 3% of the
plot height higher in the canvas.
h_offset : float, optional
Horizontal offset of vertex labels, as a fraction of total plot
width. For example, an offset of 0.03 moves the label 3% of the plot
width to the right. Default is 0.0.
arrows : bool, optional
If True, draw arrows indicating edge direction.
vertex_positions : tuple, optional
If a 2-element tuple of column names in self.vertices is specified,
those two columns will be used as the X and Y coordinates of
vertices in the graph layout. The 2d space represented by the X and
Y coordinates will be translated to a square display coordinate
space, preserving aspect ratio. If you want to fill both dimensions
entirely, normalize the positions to represent a square 2d space.
If vertex_positions is not specified, vertices will be arranged
according to a standard graph layout algorithm without regard to
vertex or edge attributes.
See Also
--------
canvas
Notes
-----
- Graphs with more than 1,000 vertices or 1,000 edges cannot be
displayed as-is. For such graphs, construct a subgraph by selecting
some vertices and edges, then call this method on the result.
- See the `user guide
<https://dato.com/learn/userguide/sframe/visualization.html>`_ for more details and extended examples.
Examples
--------
>>> g = graphlab.SGraph()
>>> g = sg.add_edges([graphlab.Edge(i, i+1) for i in range(5)])
>>> g.show(highlight=[2, 3], vlabel='id', arrows=True)
"""
from ..visualization.show import show
show(self,
vlabel=vlabel,
vlabel_hover=vlabel_hover,
vcolor=vcolor,
highlight=highlight,
highlight_color=highlight_color,
node_size=node_size,
elabel=elabel,
elabel_hover=elabel_hover,
ecolor=ecolor,
ewidth=ewidth,
v_offset=v_offset,
h_offset=h_offset,
arrows=arrows,
vertex_positions=vertex_positions)
def get_neighborhood(self, ids, radius=1, full_subgraph=True):
"""
Retrieve the graph neighborhood around a set of vertices, ignoring edge
directions. Note that setting radius greater than two often results in a
time-consuming query for a very large subgraph.
Parameters
----------
ids : list [int | float | str]
List of target vertex IDs.
radius : int, optional
Radius of the neighborhood. Every vertex in the returned subgraph is
reachable from at least one of the target vertices on a path of
length no longer than ``radius``. Setting radius larger than 2 may
result in a very large subgraph.
full_subgraph : bool, optional
If True, return all edges between vertices in the returned
neighborhood. The result is also known as the subgraph induced by
the target nodes' neighbors, or the egocentric network for the
target nodes. If False, return only edges on paths of length <=
``radius`` from the target node, also known as the reachability
graph.
Returns
-------
out : Graph
The subgraph with the neighborhoods around the target vertices.
See Also
--------
get_edges, get_vertices
References
----------
- Marsden, P. (2002) `Egocentric and sociocentric measures of network
centrality <http://www.sciencedirect.com/science/article/pii/S03788733
02000163>`_.
- `Wikipedia - Reachability <http://en.wikipedia.org/wiki/Reachability>`_
Examples
--------
>>> sf_edge = graphlab.SFrame({'source': range(9), 'dest': range(1, 10)})
>>> g = graphlab.SGraph()
>>> g = g.add_edges(sf_edge, src_field='source', dst_field='dest')
>>> subgraph = g.get_neighborhood(ids=[1, 7], radius=2,
full_subgraph=True)
"""
_mt._get_metric_tracker().track('sgraph.get_neighborhood')
verts = ids
## find the vertices within radius (and the path edges)
for i in range(radius):
edges_out = self.get_edges(src_ids=verts)
edges_in = self.get_edges(dst_ids=verts)
verts = list(edges_in['__src_id']) + list(edges_in['__dst_id']) + \
list(edges_out['__src_id']) + list(edges_out['__dst_id'])
verts = list(set(verts))
## make a new graph to return and add the vertices
g = SGraph()
g = g.add_vertices(self.get_vertices(verts), vid_field='__id')
## add the requested edge set
if full_subgraph is True:
induced_edge_out = self.get_edges(src_ids=verts)
induced_edge_in = self.get_edges(dst_ids=verts)
df_induced = induced_edge_out.append(induced_edge_in)
df_induced = df_induced.groupby(df_induced.column_names(), {})
verts_sa = SArray(list(verts))
edges = df_induced.filter_by(verts_sa, "__src_id")
edges = edges.filter_by(verts_sa, "__dst_id")
else:
path_edges = edges_out.append(edges_in)
edges = path_edges.groupby(path_edges.column_names(), {})
g = g.add_edges(edges, src_field='__src_id', dst_field='__dst_id')
return g
#/**************************************************************************/
#/* */
#/* Module Function */
#/* */
#/**************************************************************************/
def load_graph(filename, format='binary', delimiter='auto'):
import warnings
warnings.warn("load_graph has been renamed to load_sgraph. This function will be removed in the next release.", PendingDeprecationWarning)
return load_sgraph(filename, format=format)
def load_sgraph(filename, format='binary', delimiter='auto'):
"""
Load SGraph from text file or previously saved SGraph binary.
Parameters
----------
filename : string
Location of the file. Can be a local path or a remote URL.
format : {'binary', 'snap', 'csv', 'tsv'}, optional
Format to of the file to load.
- 'binary': native graph format obtained from `SGraph.save`.
- 'snap': tab or space separated edge list format with comments, used in
the `Stanford Network Analysis Platform <http://snap.stanford.edu/snap/>`_.
- 'csv': comma-separated edge list without header or comments.
- 'tsv': tab-separated edge list without header or comments.
delimiter : str, optional
Specifying the Delimiter used in 'snap', 'csv' or 'tsv' format. Those
format has default delimiter, but sometimes it is useful to
overwrite the default delimiter.
Returns
-------
out : SGraph
Loaded SGraph.
See Also
--------
SGraph, SGraph.save
Examples
--------
>>> g = graphlab.SGraph().add_vertices([graphlab.Vertex(i) for i in range(5)])
Save and load in binary format.
>>> g.save('mygraph')
>>> g2 = graphlab.load_graph('mygraph')
"""
_mt._get_metric_tracker().track('sgraph.load_sgraph')
if not format in ['binary', 'snap', 'csv', 'tsv']:
raise ValueError('Invalid format: %s' % format)
with cython_context():
g = None
if format is 'binary':
proxy = glconnect.get_unity().load_graph(_make_internal_url(filename))
g = SGraph(_proxy=proxy)
elif format is 'snap':
if delimiter == 'auto':
delimiter = '\t'
sf = SFrame.read_csv(filename, comment_char='#', delimiter=delimiter,
header=False, column_type_hints=int)
g = SGraph().add_edges(sf, 'X1', 'X2')
elif format is 'csv':
if delimiter == 'auto':
delimiter = ','
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2')
elif format is 'tsv':
if delimiter == 'auto':
delimiter = '\t'
sf = SFrame.read_csv(filename, header=False, delimiter=delimiter)
g = SGraph().add_edges(sf, 'X1', 'X2')
g.summary() # materialize
return g
#/**************************************************************************/
#/* */
#/* Helper Function */
#/* */
#/**************************************************************************/
def _vertex_list_to_dataframe(ls, id_column_name):
"""
Convert a list of vertices into dataframe.
"""
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
cols = reduce(set.union, (set(v.attr.keys()) for v in ls))
df = pd.DataFrame({id_column_name: [v.vid for v in ls]})
for c in cols:
df[c] = [v.attr.get(c) for v in ls]
return df
def _vertex_list_to_sframe(ls, id_column_name):
"""
Convert a list of vertices into an SFrame.
"""
sf = SFrame()
if type(ls) == list:
cols = reduce(set.union, (set(v.attr.keys()) for v in ls))
sf[id_column_name] = [v.vid for v in ls]
for c in cols:
sf[c] = [v.attr.get(c) for v in ls]
elif type(ls) == Vertex:
sf[id_column_name] = [ls.vid]
for col, val in ls.attr.iteritems():
sf[col] = [val]
else:
raise TypeError('Vertices type {} is Not supported.'.format(type(ls)))
return sf
def _edge_list_to_dataframe(ls, src_column_name, dst_column_name):
"""
Convert a list of edges into dataframe.
"""
assert HAS_PANDAS, 'Cannot use dataframe because Pandas is not available or version is too low.'
cols = reduce(set.union, (set(e.attr.keys()) for e in ls))
df = pd.DataFrame({
src_column_name: [e.src_vid for e in ls],
dst_column_name: [e.dst_vid for e in ls]})
for c in cols:
df[c] = [e.attr.get(c) for e in ls]
return df
def _edge_list_to_sframe(ls, src_column_name, dst_column_name):
"""
Convert a list of edges into an SFrame.
"""
sf = SFrame()
if type(ls) == list:
cols = reduce(set.union, (set(v.attr.keys()) for v in ls))
sf[src_column_name] = [e.src_vid for e in ls]
sf[dst_column_name] = [e.dst_vid for e in ls]
for c in cols:
sf[c] = [e.attr.get(c) for e in ls]
elif type(ls) == Edge:
sf[src_column_name] = [ls.src_vid]
sf[dst_column_name] = [ls.dst_vid]
else:
raise TypeError('Edges type {} is Not supported.'.format(type(ls)))
return sf
def _dataframe_to_vertex_list(df):
"""
Convert dataframe into list of vertices, assuming that vertex ids are stored in _VID_COLUMN.
"""
cols = df.columns
if len(cols):
assert _VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _VID_COLUMN
df = df[cols].T
ret = [Vertex(None, _series=df[col]) for col in df]
return ret
else:
return []
def _dataframe_to_edge_list(df):
"""
Convert dataframe into list of edges, assuming that source and target ids are stored in _SRC_VID_COLUMN, and _DST_VID_COLUMN respectively.
"""
cols = df.columns
if len(cols):
assert _SRC_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _SRC_VID_COLUMN
assert _DST_VID_COLUMN in cols, "Vertex DataFrame must contain column %s" % _DST_VID_COLUMN
df = df[cols].T
ret = [Edge(None, None, _series=df[col]) for col in df]
return ret
else:
return []
def _vertex_data_to_sframe(data, vid_field):
"""
Convert data into a vertex data sframe. Using vid_field to identify the id
column. The returned sframe will have id column name '__id'.
"""
if isinstance(data, SFrame):
# '__id' already in the sframe, and it is ok to not specify vid_field
if vid_field is None and _VID_COLUMN in data.column_names():
return data
if vid_field is None:
raise ValueError("vid_field must be specified for SFrame input")
data_copy = copy.copy(data)
data_copy.rename({vid_field: _VID_COLUMN})
return data_copy
if type(data) == Vertex or type(data) == list:
return _vertex_list_to_sframe(data, '__id')
elif HAS_PANDAS and type(data) == pd.DataFrame:
if vid_field is None:
# using the dataframe index as vertex id
if data.index.is_unique:
if not ("index" in data.columns):
# pandas reset_index() will insert a new column of name "index".
sf = SFrame(data.reset_index()) # "index"
sf.rename({'index': _VID_COLUMN})
return sf
else:
# pandas reset_index() will insert a new column of name "level_0" if there exists a column named "index".
sf = SFrame(data.reset_index()) # "level_0"
sf.rename({'level_0': _VID_COLUMN})
return sf
else:
raise ValueError("Index of the vertices dataframe is not unique, \
try specifying vid_field name to use a column for vertex ids.")
else:
sf = SFrame(data)
if _VID_COLUMN in sf.column_names():
raise ValueError('%s reserved vid column name already exists in the SFrame' % _VID_COLUMN)
sf.rename({vid_field: _VID_COLUMN})
return sf
else:
raise TypeError('Vertices type %s is Not supported.' % str(type(data)))
def _edge_data_to_sframe(data, src_field, dst_field):
"""
Convert data into an edge data sframe. Using src_field and dst_field to
identify the source and target id column. The returned sframe will have id
column name '__src_id', '__dst_id'
"""
if isinstance(data, SFrame):
# '__src_vid' and '__dst_vid' already in the sframe, and
# it is ok to not specify src_field and dst_field
if src_field is None and dst_field is None and \
_SRC_VID_COLUMN in data.column_names() and \
_DST_VID_COLUMN in data.column_names():
return data
if src_field is None:
raise ValueError("src_field must be specified for SFrame input")
if dst_field is None:
raise ValueError("dst_field must be specified for SFrame input")
data_copy = copy.copy(data)
if src_field == _DST_VID_COLUMN and dst_field == _SRC_VID_COLUMN:
# special case when src_field = "__dst_id" and dst_field = "__src_id"
# directly renaming will cause name collision
dst_id_column = data_copy[_DST_VID_COLUMN]
del data_copy[_DST_VID_COLUMN]
data_copy.rename({_SRC_VID_COLUMN: _DST_VID_COLUMN})
data_copy[_SRC_VID_COLUMN] = dst_id_column
else:
data_copy.rename({src_field: _SRC_VID_COLUMN, dst_field: _DST_VID_COLUMN})
return data_copy
elif HAS_PANDAS and type(data) == pd.DataFrame:
if src_field is None:
raise ValueError("src_field must be specified for Pandas input")
if dst_field is None:
raise ValueError("dst_field must be specified for Pandas input")
sf = SFrame(data)
if src_field == _DST_VID_COLUMN and dst_field == _SRC_VID_COLUMN:
# special case when src_field = "__dst_id" and dst_field = "__src_id"
# directly renaming will cause name collision
dst_id_column = data_copy[_DST_VID_COLUMN]
del sf[_DST_VID_COLUMN]
sf.rename({_SRC_VID_COLUMN: _DST_VID_COLUMN})
sf[_SRC_VID_COLUMN] = dst_id_column
else:
sf.rename({src_field: _SRC_VID_COLUMN, dst_field: _DST_VID_COLUMN})
return sf
elif type(data) == Edge:
return _edge_list_to_sframe([data], _SRC_VID_COLUMN, _DST_VID_COLUMN)
elif type(data) == list:
return _edge_list_to_sframe(data, _SRC_VID_COLUMN, _DST_VID_COLUMN)
else:
raise TypeError('Edges type %s is Not supported.' % str(type(data)))
## Hack: overriding GFrame class name to make it appears as SFrame##
GFrame.__name__ = SFrame.__name__
GFrame.__module__ = SFrame.__module__
| bsd-3-clause |
kpespinosa/BuildingMachineLearningSystemsWithPython | ch06/04_sent.py | 22 | 10125 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script trains tries to tweak hyperparameters to improve P/R AUC
#
import time
start_time = time.time()
import re
import nltk
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from utils import log_false_positives
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import f1_score
from sklearn.base import BaseEstimator
from sklearn.naive_bayes import MultinomialNB
from utils import load_sent_word_net
sent_word_net = load_sent_word_net()
phase = "04"
import json
poscache_filename = "poscache.json"
try:
poscache = json.load(open(poscache_filename, "r"))
except IOError:
poscache = {}
class LinguisticVectorizer(BaseEstimator):
def get_feature_names(self):
return np.array(['sent_neut', 'sent_pos', 'sent_neg',
'nouns', 'adjectives', 'verbs', 'adverbs',
'allcaps', 'exclamation', 'question'])
def fit(self, documents, y=None):
return self
def _get_sentiments(self, d):
# http://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
sent = tuple(nltk.word_tokenize(d))
if poscache is not None:
if d in poscache:
tagged = poscache[d]
else:
poscache[d] = tagged = nltk.pos_tag(sent)
else:
tagged = nltk.pos_tag(sent)
pos_vals = []
neg_vals = []
nouns = 0.
adjectives = 0.
verbs = 0.
adverbs = 0.
for w, t in tagged:
p, n = 0, 0
sent_pos_type = None
if t.startswith("NN"):
sent_pos_type = "n"
nouns += 1
elif t.startswith("JJ"):
sent_pos_type = "a"
adjectives += 1
elif t.startswith("VB"):
sent_pos_type = "v"
verbs += 1
elif t.startswith("RB"):
sent_pos_type = "r"
adverbs += 1
if sent_pos_type is not None:
sent_word = "%s/%s" % (sent_pos_type, w)
if sent_word in sent_word_net:
p, n = sent_word_net[sent_word]
pos_vals.append(p)
neg_vals.append(n)
l = len(sent)
avg_pos_val = np.mean(pos_vals)
avg_neg_val = np.mean(neg_vals)
return [1 - avg_pos_val - avg_neg_val, avg_pos_val, avg_neg_val,
nouns / l, adjectives / l, verbs / l, adverbs / l]
def transform(self, documents):
obj_val, pos_val, neg_val, nouns, adjectives, verbs, adverbs = np.array(
[self._get_sentiments(d) for d in documents]).T
allcaps = []
exclamation = []
question = []
for d in documents:
allcaps.append(
np.sum([t.isupper() for t in d.split() if len(t) > 2]))
exclamation.append(d.count("!"))
question.append(d.count("?"))
result = np.array(
[obj_val, pos_val, neg_val, nouns, adjectives, verbs, adverbs, allcaps,
exclamation, question]).T
return result
emo_repl = {
# positive emoticons
"<3": " good ",
":d": " good ", # :D in lower case
":dd": " good ", # :DD in lower case
"8)": " good ",
":-)": " good ",
":)": " good ",
";)": " good ",
"(-:": " good ",
"(:": " good ",
# negative emoticons:
":/": " bad ",
":>": " sad ",
":')": " sad ",
":-(": " bad ",
":(": " bad ",
":S": " bad ",
":-S": " bad ",
}
emo_repl_order = [k for (k_len, k) in reversed(
sorted([(len(k), k) for k in list(emo_repl.keys())]))]
re_repl = {
r"\br\b": "are",
r"\bu\b": "you",
r"\bhaha\b": "ha",
r"\bhahaha\b": "ha",
r"\bdon't\b": "do not",
r"\bdoesn't\b": "does not",
r"\bdidn't\b": "did not",
r"\bhasn't\b": "has not",
r"\bhaven't\b": "have not",
r"\bhadn't\b": "had not",
r"\bwon't\b": "will not",
r"\bwouldn't\b": "would not",
r"\bcan't\b": "can not",
r"\bcannot\b": "can not",
}
def create_union_model(params=None):
def preprocessor(tweet):
tweet = tweet.lower()
for k in emo_repl_order:
tweet = tweet.replace(k, emo_repl[k])
for r, repl in re_repl.items():
tweet = re.sub(r, repl, tweet)
return tweet.replace("-", " ").replace("_", " ")
tfidf_ngrams = TfidfVectorizer(preprocessor=preprocessor,
analyzer="word")
ling_stats = LinguisticVectorizer()
all_features = FeatureUnion(
[('ling', ling_stats), ('tfidf', tfidf_ngrams)])
#all_features = FeatureUnion([('tfidf', tfidf_ngrams)])
#all_features = FeatureUnion([('ling', ling_stats)])
clf = MultinomialNB()
pipeline = Pipeline([('all', all_features), ('clf', clf)])
if params:
pipeline.set_params(**params)
return pipeline
def __grid_search_model(clf_factory, X, Y):
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
param_grid = dict(vect__ngram_range=[(1, 1), (1, 2), (1, 3)],
vect__min_df=[1, 2],
vect__smooth_idf=[False, True],
vect__use_idf=[False, True],
vect__sublinear_tf=[False, True],
vect__binary=[False, True],
clf__alpha=[0, 0.01, 0.05, 0.1, 0.5, 1],
)
grid_search = GridSearchCV(clf_factory(),
param_grid=param_grid,
cv=cv,
score_func=f1_score,
verbose=10)
grid_search.fit(X, Y)
clf = grid_search.best_estimator_
print(clf)
return clf
def train_model(clf, X, Y, name="NB ngram", plot=False):
# create it again for plotting
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
clfs = [] # just to later get the median
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
if plot:
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
plot_pr(pr_scores[median], name, phase, precisions[median],
recalls[median], label=name)
log_false_positives(clfs[median], X_test, y_test, name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in range(len(X_wrong)):
print("clf.predict('%s')=%i instead of %i" %
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx]))
def get_best_model():
best_params = dict(all__tfidf__ngram_range=(1, 2),
all__tfidf__min_df=1,
all__tfidf__stop_words=None,
all__tfidf__smooth_idf=False,
all__tfidf__use_idf=False,
all__tfidf__sublinear_tf=True,
all__tfidf__binary=False,
clf__alpha=0.01,
)
best_clf = create_union_model(best_params)
return best_clf
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
#from sklearn.utils import shuffle
# print "shuffle, sample"
#X_orig, Y_orig = shuffle(X_orig, Y_orig)
#X_orig = X_orig[:100,]
#Y_orig = Y_orig[:100,]
classes = np.unique(Y_orig)
for c in classes:
print("#%s: %i" % (c, sum(Y_orig == c)))
print("== Pos vs. neg ==")
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print("== Pos/neg vs. irrelevant/neutral ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
# best_clf = grid_search_model(create_union_model, X, Y, name="sent vs
# rest", plot=True)
train_model(get_best_model(), X, Y, name="pos+neg vs rest", plot=True)
print("== Pos vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs rest",
plot=True)
print("== Neg vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(get_best_model(), X, Y, name="neg vs rest",
plot=True)
print("time spent:", time.time() - start_time)
json.dump(poscache, open(poscache_filename, "w"))
| mit |
initNirvana/Easyphotos | env/lib/python3.4/site-packages/IPython/parallel/tests/test_view.py | 5 | 28733 | # -*- coding: utf-8 -*-
"""test View objects"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import base64
import sys
import platform
import time
from collections import namedtuple
from tempfile import NamedTemporaryFile
import zmq
from nose.plugins.attrib import attr
from IPython.testing import decorators as dec
from IPython.utils.io import capture_output
from IPython.utils.py3compat import unicode_type
from IPython import parallel as pmod
from IPython.parallel import error
from IPython.parallel import AsyncResult, AsyncHubResult, AsyncMapResult
from IPython.parallel.util import interactive
from IPython.parallel.tests import add_engines
from .clienttest import ClusterTestCase, crash, wait, skip_without
def setup():
add_engines(3, total=True)
point = namedtuple("point", "x y")
class TestView(ClusterTestCase):
def setUp(self):
# On Win XP, wait for resource cleanup, else parallel test group fails
if platform.system() == "Windows" and platform.win32_ver()[0] == "XP":
# 1 sec fails. 1.5 sec seems ok. Using 2 sec for margin of safety
time.sleep(2)
super(TestView, self).setUp()
@attr('crash')
def test_z_crash_mux(self):
"""test graceful handling of engine death (direct)"""
# self.add_engines(1)
eid = self.client.ids[-1]
ar = self.client[eid].apply_async(crash)
self.assertRaisesRemote(error.EngineError, ar.get, 10)
eid = ar.engine_id
tic = time.time()
while eid in self.client.ids and time.time()-tic < 5:
time.sleep(.01)
self.client.spin()
self.assertFalse(eid in self.client.ids, "Engine should have died")
def test_push_pull(self):
"""test pushing and pulling"""
data = dict(a=10, b=1.05, c=list(range(10)), d={'e':(1,2),'f':'hi'})
t = self.client.ids[-1]
v = self.client[t]
push = v.push
pull = v.pull
v.block=True
nengines = len(self.client)
push({'data':data})
d = pull('data')
self.assertEqual(d, data)
self.client[:].push({'data':data})
d = self.client[:].pull('data', block=True)
self.assertEqual(d, nengines*[data])
ar = push({'data':data}, block=False)
self.assertTrue(isinstance(ar, AsyncResult))
r = ar.get()
ar = self.client[:].pull('data', block=False)
self.assertTrue(isinstance(ar, AsyncResult))
r = ar.get()
self.assertEqual(r, nengines*[data])
self.client[:].push(dict(a=10,b=20))
r = self.client[:].pull(('a','b'), block=True)
self.assertEqual(r, nengines*[[10,20]])
def test_push_pull_function(self):
"test pushing and pulling functions"
def testf(x):
return 2.0*x
t = self.client.ids[-1]
v = self.client[t]
v.block=True
push = v.push
pull = v.pull
execute = v.execute
push({'testf':testf})
r = pull('testf')
self.assertEqual(r(1.0), testf(1.0))
execute('r = testf(10)')
r = pull('r')
self.assertEqual(r, testf(10))
ar = self.client[:].push({'testf':testf}, block=False)
ar.get()
ar = self.client[:].pull('testf', block=False)
rlist = ar.get()
for r in rlist:
self.assertEqual(r(1.0), testf(1.0))
execute("def g(x): return x*x")
r = pull(('testf','g'))
self.assertEqual((r[0](10),r[1](10)), (testf(10), 100))
def test_push_function_globals(self):
"""test that pushed functions have access to globals"""
@interactive
def geta():
return a
# self.add_engines(1)
v = self.client[-1]
v.block=True
v['f'] = geta
self.assertRaisesRemote(NameError, v.execute, 'b=f()')
v.execute('a=5')
v.execute('b=f()')
self.assertEqual(v['b'], 5)
def test_push_function_defaults(self):
"""test that pushed functions preserve default args"""
def echo(a=10):
return a
v = self.client[-1]
v.block=True
v['f'] = echo
v.execute('b=f()')
self.assertEqual(v['b'], 10)
def test_get_result(self):
"""test getting results from the Hub."""
c = pmod.Client(profile='iptest')
# self.add_engines(1)
t = c.ids[-1]
v = c[t]
v2 = self.client[t]
ar = v.apply_async(wait, 1)
# give the monitor time to notice the message
time.sleep(.25)
ahr = v2.get_result(ar.msg_ids[0], owner=False)
self.assertIsInstance(ahr, AsyncHubResult)
self.assertEqual(ahr.get(), ar.get())
ar2 = v2.get_result(ar.msg_ids[0])
self.assertNotIsInstance(ar2, AsyncHubResult)
self.assertEqual(ahr.get(), ar2.get())
c.spin()
c.close()
def test_run_newline(self):
"""test that run appends newline to files"""
with NamedTemporaryFile('w', delete=False) as f:
f.write("""def g():
return 5
""")
v = self.client[-1]
v.run(f.name, block=True)
self.assertEqual(v.apply_sync(lambda f: f(), pmod.Reference('g')), 5)
def test_apply_tracked(self):
"""test tracking for apply"""
# self.add_engines(1)
t = self.client.ids[-1]
v = self.client[t]
v.block=False
def echo(n=1024*1024, **kwargs):
with v.temp_flags(**kwargs):
return v.apply(lambda x: x, 'x'*n)
ar = echo(1, track=False)
self.assertTrue(isinstance(ar._tracker, zmq.MessageTracker))
self.assertTrue(ar.sent)
ar = echo(track=True)
self.assertTrue(isinstance(ar._tracker, zmq.MessageTracker))
self.assertEqual(ar.sent, ar._tracker.done)
ar._tracker.wait()
self.assertTrue(ar.sent)
def test_push_tracked(self):
t = self.client.ids[-1]
ns = dict(x='x'*1024*1024)
v = self.client[t]
ar = v.push(ns, block=False, track=False)
self.assertTrue(isinstance(ar._tracker, zmq.MessageTracker))
self.assertTrue(ar.sent)
ar = v.push(ns, block=False, track=True)
self.assertTrue(isinstance(ar._tracker, zmq.MessageTracker))
ar._tracker.wait()
self.assertEqual(ar.sent, ar._tracker.done)
self.assertTrue(ar.sent)
ar.get()
def test_scatter_tracked(self):
t = self.client.ids
x='x'*1024*1024
ar = self.client[t].scatter('x', x, block=False, track=False)
self.assertTrue(isinstance(ar._tracker, zmq.MessageTracker))
self.assertTrue(ar.sent)
ar = self.client[t].scatter('x', x, block=False, track=True)
self.assertTrue(isinstance(ar._tracker, zmq.MessageTracker))
self.assertEqual(ar.sent, ar._tracker.done)
ar._tracker.wait()
self.assertTrue(ar.sent)
ar.get()
def test_remote_reference(self):
v = self.client[-1]
v['a'] = 123
ra = pmod.Reference('a')
b = v.apply_sync(lambda x: x, ra)
self.assertEqual(b, 123)
def test_scatter_gather(self):
view = self.client[:]
seq1 = list(range(16))
view.scatter('a', seq1)
seq2 = view.gather('a', block=True)
self.assertEqual(seq2, seq1)
self.assertRaisesRemote(NameError, view.gather, 'asdf', block=True)
@skip_without('numpy')
def test_scatter_gather_numpy(self):
import numpy
from numpy.testing.utils import assert_array_equal
view = self.client[:]
a = numpy.arange(64)
view.scatter('a', a, block=True)
b = view.gather('a', block=True)
assert_array_equal(b, a)
def test_scatter_gather_lazy(self):
"""scatter/gather with targets='all'"""
view = self.client.direct_view(targets='all')
x = list(range(64))
view.scatter('x', x)
gathered = view.gather('x', block=True)
self.assertEqual(gathered, x)
@dec.known_failure_py3
@skip_without('numpy')
def test_push_numpy_nocopy(self):
import numpy
view = self.client[:]
a = numpy.arange(64)
view['A'] = a
@interactive
def check_writeable(x):
return x.flags.writeable
for flag in view.apply_sync(check_writeable, pmod.Reference('A')):
self.assertFalse(flag, "array is writeable, push shouldn't have pickled it")
view.push(dict(B=a))
for flag in view.apply_sync(check_writeable, pmod.Reference('B')):
self.assertFalse(flag, "array is writeable, push shouldn't have pickled it")
@skip_without('numpy')
def test_apply_numpy(self):
"""view.apply(f, ndarray)"""
import numpy
from numpy.testing.utils import assert_array_equal
A = numpy.random.random((100,100))
view = self.client[-1]
for dt in [ 'int32', 'uint8', 'float32', 'float64' ]:
B = A.astype(dt)
C = view.apply_sync(lambda x:x, B)
assert_array_equal(B,C)
@skip_without('numpy')
def test_apply_numpy_object_dtype(self):
"""view.apply(f, ndarray) with dtype=object"""
import numpy
from numpy.testing.utils import assert_array_equal
view = self.client[-1]
A = numpy.array([dict(a=5)])
B = view.apply_sync(lambda x:x, A)
assert_array_equal(A,B)
A = numpy.array([(0, dict(b=10))], dtype=[('i', int), ('o', object)])
B = view.apply_sync(lambda x:x, A)
assert_array_equal(A,B)
@skip_without('numpy')
def test_push_pull_recarray(self):
"""push/pull recarrays"""
import numpy
from numpy.testing.utils import assert_array_equal
view = self.client[-1]
R = numpy.array([
(1, 'hi', 0.),
(2**30, 'there', 2.5),
(-99999, 'world', -12345.6789),
], [('n', int), ('s', '|S10'), ('f', float)])
view['RR'] = R
R2 = view['RR']
r_dtype, r_shape = view.apply_sync(interactive(lambda : (RR.dtype, RR.shape)))
self.assertEqual(r_dtype, R.dtype)
self.assertEqual(r_shape, R.shape)
self.assertEqual(R2.dtype, R.dtype)
self.assertEqual(R2.shape, R.shape)
assert_array_equal(R2, R)
@skip_without('pandas')
def test_push_pull_timeseries(self):
"""push/pull pandas.TimeSeries"""
import pandas
ts = pandas.TimeSeries(list(range(10)))
view = self.client[-1]
view.push(dict(ts=ts), block=True)
rts = view['ts']
self.assertEqual(type(rts), type(ts))
self.assertTrue((ts == rts).all())
def test_map(self):
view = self.client[:]
def f(x):
return x**2
data = list(range(16))
r = view.map_sync(f, data)
self.assertEqual(r, list(map(f, data)))
def test_map_empty_sequence(self):
view = self.client[:]
r = view.map_sync(lambda x: x, [])
self.assertEqual(r, [])
def test_map_iterable(self):
"""test map on iterables (direct)"""
view = self.client[:]
# 101 is prime, so it won't be evenly distributed
arr = range(101)
# ensure it will be an iterator, even in Python 3
it = iter(arr)
r = view.map_sync(lambda x: x, it)
self.assertEqual(r, list(arr))
@skip_without('numpy')
def test_map_numpy(self):
"""test map on numpy arrays (direct)"""
import numpy
from numpy.testing.utils import assert_array_equal
view = self.client[:]
# 101 is prime, so it won't be evenly distributed
arr = numpy.arange(101)
r = view.map_sync(lambda x: x, arr)
assert_array_equal(r, arr)
def test_scatter_gather_nonblocking(self):
data = list(range(16))
view = self.client[:]
view.scatter('a', data, block=False)
ar = view.gather('a', block=False)
self.assertEqual(ar.get(), data)
@skip_without('numpy')
def test_scatter_gather_numpy_nonblocking(self):
import numpy
from numpy.testing.utils import assert_array_equal
a = numpy.arange(64)
view = self.client[:]
ar = view.scatter('a', a, block=False)
self.assertTrue(isinstance(ar, AsyncResult))
amr = view.gather('a', block=False)
self.assertTrue(isinstance(amr, AsyncMapResult))
assert_array_equal(amr.get(), a)
def test_execute(self):
view = self.client[:]
# self.client.debug=True
execute = view.execute
ar = execute('c=30', block=False)
self.assertTrue(isinstance(ar, AsyncResult))
ar = execute('d=[0,1,2]', block=False)
self.client.wait(ar, 1)
self.assertEqual(len(ar.get()), len(self.client))
for c in view['c']:
self.assertEqual(c, 30)
def test_abort(self):
view = self.client[-1]
ar = view.execute('import time; time.sleep(1)', block=False)
ar2 = view.apply_async(lambda : 2)
ar3 = view.apply_async(lambda : 3)
view.abort(ar2)
view.abort(ar3.msg_ids)
self.assertRaises(error.TaskAborted, ar2.get)
self.assertRaises(error.TaskAborted, ar3.get)
def test_abort_all(self):
"""view.abort() aborts all outstanding tasks"""
view = self.client[-1]
ars = [ view.apply_async(time.sleep, 0.25) for i in range(10) ]
view.abort()
view.wait(timeout=5)
for ar in ars[5:]:
self.assertRaises(error.TaskAborted, ar.get)
def test_temp_flags(self):
view = self.client[-1]
view.block=True
with view.temp_flags(block=False):
self.assertFalse(view.block)
self.assertTrue(view.block)
@dec.known_failure_py3
def test_importer(self):
view = self.client[-1]
view.clear(block=True)
with view.importer:
import re
@interactive
def findall(pat, s):
# this globals() step isn't necessary in real code
# only to prevent a closure in the test
re = globals()['re']
return re.findall(pat, s)
self.assertEqual(view.apply_sync(findall, '\w+', 'hello world'), 'hello world'.split())
def test_unicode_execute(self):
"""test executing unicode strings"""
v = self.client[-1]
v.block=True
if sys.version_info[0] >= 3:
code="a='é'"
else:
code=u"a=u'é'"
v.execute(code)
self.assertEqual(v['a'], u'é')
def test_unicode_apply_result(self):
"""test unicode apply results"""
v = self.client[-1]
r = v.apply_sync(lambda : u'é')
self.assertEqual(r, u'é')
def test_unicode_apply_arg(self):
"""test passing unicode arguments to apply"""
v = self.client[-1]
@interactive
def check_unicode(a, check):
assert not isinstance(a, bytes), "%r is bytes, not unicode"%a
assert isinstance(check, bytes), "%r is not bytes"%check
assert a.encode('utf8') == check, "%s != %s"%(a,check)
for s in [ u'é', u'ßø®∫',u'asdf' ]:
try:
v.apply_sync(check_unicode, s, s.encode('utf8'))
except error.RemoteError as e:
if e.ename == 'AssertionError':
self.fail(e.evalue)
else:
raise e
def test_map_reference(self):
"""view.map(<Reference>, *seqs) should work"""
v = self.client[:]
v.scatter('n', self.client.ids, flatten=True)
v.execute("f = lambda x,y: x*y")
rf = pmod.Reference('f')
nlist = list(range(10))
mlist = nlist[::-1]
expected = [ m*n for m,n in zip(mlist, nlist) ]
result = v.map_sync(rf, mlist, nlist)
self.assertEqual(result, expected)
def test_apply_reference(self):
"""view.apply(<Reference>, *args) should work"""
v = self.client[:]
v.scatter('n', self.client.ids, flatten=True)
v.execute("f = lambda x: n*x")
rf = pmod.Reference('f')
result = v.apply_sync(rf, 5)
expected = [ 5*id for id in self.client.ids ]
self.assertEqual(result, expected)
def test_eval_reference(self):
v = self.client[self.client.ids[0]]
v['g'] = list(range(5))
rg = pmod.Reference('g[0]')
echo = lambda x:x
self.assertEqual(v.apply_sync(echo, rg), 0)
def test_reference_nameerror(self):
v = self.client[self.client.ids[0]]
r = pmod.Reference('elvis_has_left')
echo = lambda x:x
self.assertRaisesRemote(NameError, v.apply_sync, echo, r)
def test_single_engine_map(self):
e0 = self.client[self.client.ids[0]]
r = list(range(5))
check = [ -1*i for i in r ]
result = e0.map_sync(lambda x: -1*x, r)
self.assertEqual(result, check)
def test_len(self):
"""len(view) makes sense"""
e0 = self.client[self.client.ids[0]]
self.assertEqual(len(e0), 1)
v = self.client[:]
self.assertEqual(len(v), len(self.client.ids))
v = self.client.direct_view('all')
self.assertEqual(len(v), len(self.client.ids))
v = self.client[:2]
self.assertEqual(len(v), 2)
v = self.client[:1]
self.assertEqual(len(v), 1)
v = self.client.load_balanced_view()
self.assertEqual(len(v), len(self.client.ids))
# begin execute tests
def test_execute_reply(self):
e0 = self.client[self.client.ids[0]]
e0.block = True
ar = e0.execute("5", silent=False)
er = ar.get()
self.assertEqual(str(er), "<ExecuteReply[%i]: 5>" % er.execution_count)
self.assertEqual(er.execute_result['data']['text/plain'], '5')
def test_execute_reply_rich(self):
e0 = self.client[self.client.ids[0]]
e0.block = True
e0.execute("from IPython.display import Image, HTML")
ar = e0.execute("Image(data=b'garbage', format='png', width=10)", silent=False)
er = ar.get()
b64data = base64.encodestring(b'garbage').decode('ascii')
self.assertEqual(er._repr_png_(), (b64data, dict(width=10)))
ar = e0.execute("HTML('<b>bold</b>')", silent=False)
er = ar.get()
self.assertEqual(er._repr_html_(), "<b>bold</b>")
def test_execute_reply_stdout(self):
e0 = self.client[self.client.ids[0]]
e0.block = True
ar = e0.execute("print (5)", silent=False)
er = ar.get()
self.assertEqual(er.stdout.strip(), '5')
def test_execute_result(self):
"""execute triggers execute_result with silent=False"""
view = self.client[:]
ar = view.execute("5", silent=False, block=True)
expected = [{'text/plain' : '5'}] * len(view)
mimes = [ out['data'] for out in ar.execute_result ]
self.assertEqual(mimes, expected)
def test_execute_silent(self):
"""execute does not trigger execute_result with silent=True"""
view = self.client[:]
ar = view.execute("5", block=True)
expected = [None] * len(view)
self.assertEqual(ar.execute_result, expected)
def test_execute_magic(self):
"""execute accepts IPython commands"""
view = self.client[:]
view.execute("a = 5")
ar = view.execute("%whos", block=True)
# this will raise, if that failed
ar.get(5)
for stdout in ar.stdout:
lines = stdout.splitlines()
self.assertEqual(lines[0].split(), ['Variable', 'Type', 'Data/Info'])
found = False
for line in lines[2:]:
split = line.split()
if split == ['a', 'int', '5']:
found = True
break
self.assertTrue(found, "whos output wrong: %s" % stdout)
def test_execute_displaypub(self):
"""execute tracks display_pub output"""
view = self.client[:]
view.execute("from IPython.core.display import *")
ar = view.execute("[ display(i) for i in range(5) ]", block=True)
expected = [ {u'text/plain' : unicode_type(j)} for j in range(5) ]
for outputs in ar.outputs:
mimes = [ out['data'] for out in outputs ]
self.assertEqual(mimes, expected)
def test_apply_displaypub(self):
"""apply tracks display_pub output"""
view = self.client[:]
view.execute("from IPython.core.display import *")
@interactive
def publish():
[ display(i) for i in range(5) ]
ar = view.apply_async(publish)
ar.get(5)
expected = [ {u'text/plain' : unicode_type(j)} for j in range(5) ]
for outputs in ar.outputs:
mimes = [ out['data'] for out in outputs ]
self.assertEqual(mimes, expected)
def test_execute_raises(self):
"""exceptions in execute requests raise appropriately"""
view = self.client[-1]
ar = view.execute("1/0")
self.assertRaisesRemote(ZeroDivisionError, ar.get, 2)
def test_remoteerror_render_exception(self):
"""RemoteErrors get nice tracebacks"""
view = self.client[-1]
ar = view.execute("1/0")
ip = get_ipython()
ip.user_ns['ar'] = ar
with capture_output() as io:
ip.run_cell("ar.get(2)")
self.assertTrue('ZeroDivisionError' in io.stdout, io.stdout)
def test_compositeerror_render_exception(self):
"""CompositeErrors get nice tracebacks"""
view = self.client[:]
ar = view.execute("1/0")
ip = get_ipython()
ip.user_ns['ar'] = ar
with capture_output() as io:
ip.run_cell("ar.get(2)")
count = min(error.CompositeError.tb_limit, len(view))
self.assertEqual(io.stdout.count('ZeroDivisionError'), count * 2, io.stdout)
self.assertEqual(io.stdout.count('by zero'), count, io.stdout)
self.assertEqual(io.stdout.count(':execute'), count, io.stdout)
def test_compositeerror_truncate(self):
"""Truncate CompositeErrors with many exceptions"""
view = self.client[:]
msg_ids = []
for i in range(10):
ar = view.execute("1/0")
msg_ids.extend(ar.msg_ids)
ar = self.client.get_result(msg_ids)
try:
ar.get()
except error.CompositeError as _e:
e = _e
else:
self.fail("Should have raised CompositeError")
lines = e.render_traceback()
with capture_output() as io:
e.print_traceback()
self.assertTrue("more exceptions" in lines[-1])
count = e.tb_limit
self.assertEqual(io.stdout.count('ZeroDivisionError'), 2 * count, io.stdout)
self.assertEqual(io.stdout.count('by zero'), count, io.stdout)
self.assertEqual(io.stdout.count(':execute'), count, io.stdout)
@dec.skipif_not_matplotlib
def test_magic_pylab(self):
"""%pylab works on engines"""
view = self.client[-1]
ar = view.execute("%pylab inline")
# at least check if this raised:
reply = ar.get(5)
# include imports, in case user config
ar = view.execute("plot(rand(100))", silent=False)
reply = ar.get(5)
self.assertEqual(len(reply.outputs), 1)
output = reply.outputs[0]
self.assertTrue("data" in output)
data = output['data']
self.assertTrue("image/png" in data)
def test_func_default_func(self):
"""interactively defined function as apply func default"""
def foo():
return 'foo'
def bar(f=foo):
return f()
view = self.client[-1]
ar = view.apply_async(bar)
r = ar.get(10)
self.assertEqual(r, 'foo')
def test_data_pub_single(self):
view = self.client[-1]
ar = view.execute('\n'.join([
'from IPython.kernel.zmq.datapub import publish_data',
'for i in range(5):',
' publish_data(dict(i=i))'
]), block=False)
self.assertTrue(isinstance(ar.data, dict))
ar.get(5)
self.assertEqual(ar.data, dict(i=4))
def test_data_pub(self):
view = self.client[:]
ar = view.execute('\n'.join([
'from IPython.kernel.zmq.datapub import publish_data',
'for i in range(5):',
' publish_data(dict(i=i))'
]), block=False)
self.assertTrue(all(isinstance(d, dict) for d in ar.data))
ar.get(5)
self.assertEqual(ar.data, [dict(i=4)] * len(ar))
def test_can_list_arg(self):
"""args in lists are canned"""
view = self.client[-1]
view['a'] = 128
rA = pmod.Reference('a')
ar = view.apply_async(lambda x: x, [rA])
r = ar.get(5)
self.assertEqual(r, [128])
def test_can_dict_arg(self):
"""args in dicts are canned"""
view = self.client[-1]
view['a'] = 128
rA = pmod.Reference('a')
ar = view.apply_async(lambda x: x, dict(foo=rA))
r = ar.get(5)
self.assertEqual(r, dict(foo=128))
def test_can_list_kwarg(self):
"""kwargs in lists are canned"""
view = self.client[-1]
view['a'] = 128
rA = pmod.Reference('a')
ar = view.apply_async(lambda x=5: x, x=[rA])
r = ar.get(5)
self.assertEqual(r, [128])
def test_can_dict_kwarg(self):
"""kwargs in dicts are canned"""
view = self.client[-1]
view['a'] = 128
rA = pmod.Reference('a')
ar = view.apply_async(lambda x=5: x, dict(foo=rA))
r = ar.get(5)
self.assertEqual(r, dict(foo=128))
def test_map_ref(self):
"""view.map works with references"""
view = self.client[:]
ranks = sorted(self.client.ids)
view.scatter('rank', ranks, flatten=True)
rrank = pmod.Reference('rank')
amr = view.map_async(lambda x: x*2, [rrank] * len(view))
drank = amr.get(5)
self.assertEqual(drank, [ r*2 for r in ranks ])
def test_nested_getitem_setitem(self):
"""get and set with view['a.b']"""
view = self.client[-1]
view.execute('\n'.join([
'class A(object): pass',
'a = A()',
'a.b = 128',
]), block=True)
ra = pmod.Reference('a')
r = view.apply_sync(lambda x: x.b, ra)
self.assertEqual(r, 128)
self.assertEqual(view['a.b'], 128)
view['a.b'] = 0
r = view.apply_sync(lambda x: x.b, ra)
self.assertEqual(r, 0)
self.assertEqual(view['a.b'], 0)
def test_return_namedtuple(self):
def namedtuplify(x, y):
from IPython.parallel.tests.test_view import point
return point(x, y)
view = self.client[-1]
p = view.apply_sync(namedtuplify, 1, 2)
self.assertEqual(p.x, 1)
self.assertEqual(p.y, 2)
def test_apply_namedtuple(self):
def echoxy(p):
return p.y, p.x
view = self.client[-1]
tup = view.apply_sync(echoxy, point(1, 2))
self.assertEqual(tup, (2,1))
def test_sync_imports(self):
view = self.client[-1]
with capture_output() as io:
with view.sync_imports():
import IPython
self.assertIn("IPython", io.stdout)
@interactive
def find_ipython():
return 'IPython' in globals()
assert view.apply_sync(find_ipython)
def test_sync_imports_quiet(self):
view = self.client[-1]
with capture_output() as io:
with view.sync_imports(quiet=True):
import IPython
self.assertEqual(io.stdout, '')
@interactive
def find_ipython():
return 'IPython' in globals()
assert view.apply_sync(find_ipython)
| mit |
maciekcc/tensorflow | tensorflow/examples/learn/text_classification.py | 12 | 6651 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def estimator_spec_for_softmax_classification(
logits, labels, mode):
"""Returns EstimatorSpec instance for softmax classification."""
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def bag_of_words_model(features, labels, mode):
"""A bag-of-words model. Note it disregards the word order in the text."""
bow_column = tf.feature_column.categorical_column_with_identity(
WORDS_FEATURE, num_buckets=n_words)
bow_embedding_column = tf.feature_column.embedding_column(
bow_column, dimension=EMBEDDING_SIZE)
bow = tf.feature_column.input_layer(
features,
feature_columns=[bow_embedding_column])
logits = tf.layers.dense(bow, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def rnn_model(features, labels, mode):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for softmax
# classification over output classes.
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_transform_train = vocab_processor.fit_transform(x_train)
x_transform_test = vocab_processor.transform(x_test)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
# Subtract 1 because VocabularyProcessor outputs a word-id matrix where word
# ids start from 1 and 0 means 'no word'. But
# categorical_column_with_identity assumes 0-based count and uses -1 for
# missing word.
x_train -= 1
x_test -= 1
model_fn = bag_of_words_model
classifier = tf.estimator.Estimator(model_fn=model_fn)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
ishanic/scikit-learn | benchmarks/bench_mnist.py | 154 | 6006 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100))
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
jlegendary/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
SlipknotTN/udacity-deeplearning-nanodegree | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
chrisburr/scikit-learn | sklearn/base.py | 22 | 18131 | """Base classes for all estimators."""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import copy
import warnings
import numpy as np
from scipy import sparse
from .externals import six
from .utils.fixes import signature
from .utils.deprecation import deprecated
from .exceptions import ChangedBehaviorWarning as ChangedBehaviorWarning_
class ChangedBehaviorWarning(ChangedBehaviorWarning_):
pass
ChangedBehaviorWarning = deprecated("ChangedBehaviorWarning has been moved "
"into the sklearn.exceptions module. "
"It will not be available here from "
"version 0.19")(ChangedBehaviorWarning)
##############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
new_obj_val = new_object_params[name]
params_set_val = params_set[name]
# The following construct is required to check equality on special
# singletons such as np.nan that are not equal to them-selves:
equality_test = (new_obj_val == params_set_val or
new_obj_val is params_set_val)
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='variance_weighted')
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor."""
return getattr(estimator, "_estimator_type", None) == "regressor"
| bsd-3-clause |
astocko/statsmodels | statsmodels/tsa/statespace/tests/test_representation.py | 6 | 19651 | """
Tests for representation module
Author: Chad Fulton
License: Simplified-BSD
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
from statsmodels.tsa.statespace.representation import Representation
from statsmodels.tsa.statespace.model import Model
from .results import results_kalman_filter
from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
current_path = os.path.dirname(os.path.abspath(__file__))
class Clark1987(object):
"""
Clark's (1987) univariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, **kwargs):
self.true = results_kalman_filter.uc_uni
self.true_states = pd.DataFrame(self.true['states'])
# GDP, Quarterly, 1947.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP']
)
data['lgdp'] = np.log(data['GDP'])
# Construct the statespace representation
k_states = 4
self.model = Model(data['lgdp'], k_states=k_states, **kwargs)
self.model.design[:, :, 0] = [1, 1, 0, 0]
self.model.transition[([0, 0, 1, 1, 2, 3],
[0, 3, 1, 2, 1, 3],
[0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
self.model.selection = np.eye(self.model.k_states)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
self.true['parameters']
)
self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.model.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, sigma_w**2
]
# Initialization
initial_state = np.zeros((k_states,))
initial_state_cov = np.eye(k_states)*100
# Initialization: modification
initial_state_cov = np.dot(
np.dot(self.model.transition[:, :, 0], initial_state_cov),
self.model.transition[:, :, 0].T
)
self.model.initialize_known(initial_state, initial_state_cov)
def run_filter(self):
# Filter the data
self.results = self.model.filter()
def test_loglike(self):
assert_almost_equal(
self.results.llf_obs[self.true['start']:].sum(),
self.true['loglike'], 5
)
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[3][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
class TestClark1987Single(Clark1987):
"""
Basic single precision test for the loglikelihood and filtered states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987Single, self).__init__(
dtype=np.float32, conserve_memory=0
)
self.run_filter()
class TestClark1987Double(Clark1987):
"""
Basic double precision test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Double, self).__init__(
dtype=float, conserve_memory=0
)
self.run_filter()
class TestClark1987SingleComplex(Clark1987):
"""
Basic single precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987SingleComplex, self).__init__(
dtype=np.complex64, conserve_memory=0
)
self.run_filter()
class TestClark1987DoubleComplex(Clark1987):
"""
Basic double precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987DoubleComplex, self).__init__(
dtype=complex, conserve_memory=0
)
self.run_filter()
class TestClark1987Conserve(Clark1987):
"""
Memory conservation test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class Clark1987Forecast(Clark1987):
"""
Forecasting test for the loglikelihood and filtered states.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1987Forecast, self).__init__(
dtype=dtype, conserve_memory=conserve_memory
)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self.model.endog = np.array(
np.r_[self.model.endog[0, :], [np.nan]*nforecast],
ndmin=2, dtype=dtype, order="F"
)
self.model.nobs = self.model.endog.shape[1]
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[3][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
class TestClark1987ForecastDouble(Clark1987Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987ForecastDouble, self).__init__()
self.run_filter()
class TestClark1987ForecastDoubleComplex(Clark1987Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.run_filter()
class TestClark1987ForecastConserve(Clark1987Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class TestClark1987ConserveAll(Clark1987):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08
)
self.model.loglikelihood_burn = self.true['start']
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.results.llf_obs[0], self.true['loglike'], 5
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.results.filtered_state[0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][-1],
self.true_states.iloc[end-1, 1], 4
)
class Clark1989(object):
"""
Clark's (1989) bivariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Tests two-dimensional observation data.
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, **kwargs):
self.true = results_kalman_filter.uc_bi
self.true_states = pd.DataFrame(self.true['states'])
# GDP and Unemployment, Quarterly, 1948.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP', 'UNEMP']
)[4:]
data['GDP'] = np.log(data['GDP'])
data['UNEMP'] = (data['UNEMP']/100)
k_states = 6
self.model = Model(data, k_states=k_states, **kwargs)
# Statespace representation
self.model.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]
self.model.transition[
([0, 0, 1, 1, 2, 3, 4, 5],
[0, 4, 1, 2, 1, 2, 4, 5],
[0, 0, 0, 0, 0, 0, 0, 0])
] = [1, 1, 0, 0, 1, 1, 1, 1]
self.model.selection = np.eye(self.model.k_states)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec,
phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array(
self.true['parameters'],
)
self.model.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [
alpha_1, alpha_2, alpha_3
]
self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.model.obs_cov[1, 1, 0] = sigma_ec**2
self.model.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, 0, sigma_w**2, sigma_vl**2
]
# Initialization
initial_state = np.zeros((k_states,))
initial_state_cov = np.eye(k_states)*100
# Initialization: self.modelification
initial_state_cov = np.dot(
np.dot(self.model.transition[:, :, 0], initial_state_cov),
self.model.transition[:, :, 0].T
)
self.model.initialize_known(initial_state, initial_state_cov)
def run_filter(self):
# Filter the data
self.results = self.model.filter()
def test_loglike(self):
assert_almost_equal(
# self.results.llf_obs[self.true['start']:].sum(),
self.results.llf_obs[0:].sum(),
self.true['loglike'], 2
)
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[4][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.results.filtered_state[5][self.true['start']:],
self.true_states.iloc[:, 3], 4
)
class TestClark1989(Clark1989):
"""
Basic double precision test for the loglikelihood and filtered
states with two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989, self).__init__(dtype=float, conserve_memory=0)
self.run_filter()
class TestClark1989Conserve(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class Clark1989Forecast(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1989Forecast, self).__init__(
dtype=dtype, conserve_memory=conserve_memory
)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self.model.endog = np.array(
np.c_[
self.model.endog,
np.r_[[np.nan, np.nan]*nforecast].reshape(2, nforecast)
],
ndmin=2, dtype=dtype, order="F"
)
self.model.nobs = self.model.endog.shape[1]
self.run_filter()
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[4][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.results.filtered_state[5][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 3], 4
)
class TestClark1989ForecastDouble(Clark1989Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1989ForecastDouble, self).__init__()
self.run_filter()
class TestClark1989ForecastDoubleComplex(Clark1989Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.run_filter()
class TestClark1989ForecastConserve(Clark1989Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class TestClark1989ConserveAll(Clark1989):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08
)
# self.model.loglikelihood_burn = self.true['start']
self.model.loglikelihood_burn = 0
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.results.llf_obs[0], self.true['loglike'], 2
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.results.filtered_state[0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][-1],
self.true_states.iloc[end-1, 1], 4
)
assert_almost_equal(
self.results.filtered_state[4][-1],
self.true_states.iloc[end-1, 2], 4
)
assert_almost_equal(
self.results.filtered_state[5][-1],
self.true_states.iloc[end-1, 3], 4
)
# Miscellaneous coverage-related tests
def test_slice_notation():
endog = np.arange(10)*1.0
mod = Model(endog, k_states=2)
# Test invalid __setitem__
def set_designs():
mod['designs'] = 1
def set_designs2():
mod['designs',0,0] = 1
def set_designs3():
mod[0] = 1
assert_raises(IndexError, set_designs)
assert_raises(IndexError, set_designs2)
assert_raises(IndexError, set_designs3)
# Test invalid __getitem__
assert_raises(IndexError, lambda: mod['designs'])
assert_raises(IndexError, lambda: mod['designs',0,0,0])
assert_raises(IndexError, lambda: mod[0])
# Test valid __setitem__, __getitem__
assert_equal(mod.design[0,0,0], 0)
mod['design',0,0,0] = 1
assert_equal(mod['design'].sum(), 1)
assert_equal(mod.design[0,0,0], 1)
assert_equal(mod['design',0,0,0], 1)
# Test valid __setitem__, __getitem__ with unspecified time index
mod['design'] = np.zeros(mod['design'].shape)
assert_equal(mod.design[0,0], 0)
mod['design',0,0] = 1
assert_equal(mod.design[0,0], 1)
assert_equal(mod['design',0,0], 1)
def test_representation():
# Test an invalid number of states
def zero_kstates():
mod = Representation(1, 0)
assert_raises(ValueError, zero_kstates)
# Test an invalid endogenous array
def empty_endog():
endog = np.zeros((0,0))
mod = Representation(endog, k_states=2)
assert_raises(ValueError, empty_endog)
# Test a Fortran-ordered endogenous array (which will be assumed to be in
# wide format: k_endog x nobs)
nobs = 10
k_endog = 2
endog = np.asfortranarray(np.arange(nobs*k_endog).reshape(k_endog,nobs)*1.)
mod = Representation(endog, k_states=2)
assert_equal(mod.nobs, nobs)
assert_equal(mod.k_endog, k_endog)
# Test a C-ordered endogenous array (which will be assumed to be in
# tall format: nobs x k_endog)
nobs = 10
k_endog = 2
endog = np.arange(nobs*k_endog).reshape(nobs,k_endog)*1.
mod = Representation(endog, k_states=2)
assert_equal(mod.nobs, nobs)
assert_equal(mod.k_endog, k_endog)
# Test getting the statespace representation
assert_equal(mod._statespace, None)
mod._initialize_representation()
assert(mod._statespace is not None)
def test_bind():
mod = Representation(1, k_states=2)
# Test invalid endogenous array (it must be ndarray)
assert_raises(ValueError, lambda: mod.bind([1,2,3,4]))
# Test valid (nobs x 1) endogenous array
mod.bind(np.arange(10)*1.)
assert_equal(mod.nobs, 10)
# Test valid (k_endog x 0) endogenous array
mod.bind(np.zeros(0,dtype=np.float64))
# Test invalid (3-dim) endogenous array
assert_raises(ValueError, lambda: mod.bind(np.arange(12).reshape(2,2,3)*1.))
# Test valid F-contiguous
mod.bind(np.asfortranarray(np.arange(10).reshape(1,10)))
assert_equal(mod.nobs, 10)
# Test valid C-contiguous
mod.bind(np.arange(10).reshape(10,1))
assert_equal(mod.nobs, 10)
# Test invalid F-contiguous
assert_raises(ValueError, lambda: mod.bind(np.asfortranarray(np.arange(10).reshape(10,1))))
# Test invalid C-contiguous
assert_raises(ValueError, lambda: mod.bind(np.arange(10).reshape(1,10)))
def test_initialization():
mod = Representation(1, k_states=2)
# Test invalid state initialization
assert_raises(RuntimeError, lambda: mod._initialize_state())
# Test valid initialization
initial_state = np.zeros(2,) + 1.5
initial_state_cov = np.eye(2) * 3.
mod.initialize_known(initial_state, initial_state_cov)
assert_equal(mod._initial_state.sum(), 3)
assert_equal(mod._initial_state_cov.diagonal().sum(), 6)
# Test invalid initial_state
initial_state = np.zeros(10,)
assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov))
initial_state = np.zeros((10,10))
assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov))
# Test invalid initial_state_cov
initial_state = np.zeros(2,) + 1.5
initial_state_cov = np.eye(3)
assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov))
| bsd-3-clause |
PrashntS/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
Haunter17/MIR_SU17 | exp4/exp4_rnn.py | 1 | 6465 | '''
A Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import h5py
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
SMALL_FLAG = 0
print('==> Experiment 4 RNN')
filepath = '/pylon2/ci560sp/haunter/exp3_taylorswift_d15_1s_C1C8.mat'
if SMALL_FLAG:
filepath = '/pylon2/ci560sp/haunter/exp3_small.mat'
print('==> Loading data from {}...'.format(filepath))
# benchmark
t_start = time.time()
# ==============================================
# reading data
# ==============================================
f = h5py.File(filepath)
X_train = np.array(f.get('trainingFeatures'))
y_train = np.array(f.get('trainingLabels'))
X_val = np.array(f.get('validationFeatures'))
y_val = np.array(f.get('validationLabels'))
t_end = time.time()
print('--Time elapsed for loading data: {t:.2f} \
seconds'.format(t = t_end - t_start))
del f
print('-- Number of training samples: {}'.format(X_train.shape[0]))
print('-- Number of validation samples: {}'.format(X_val.shape[0]))
# ==============================================
# RNN configs
# ==============================================
# Network Parameters
num_training_vec, total_features = X_train.shape
num_freq = 169
num_frames = int(total_features / num_freq)
max_iter = 300
print_freq = 10
if SMALL_FLAG:
max_iter = 10
print_freq = 1
batch_size = 1000
learning_rate = 0.001
n_input = num_freq # number of sequences (rows)
n_steps = num_frames # size of each sequence (number of columns), timesteps
n_hidden = 512 # hidden layer num of features
n_classes = int(max(y_train.max(), y_val.max()) + 1)
# ==============================================
# RNN architecture
# ==============================================
# Transform labels into on-hot encoding form
y_train_OHEnc = tf.one_hot(y_train.copy(), n_classes)
y_val_OHEnc = tf.one_hot(y_val.copy(), n_classes)
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def RNN(x, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.unstack(x, n_steps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = RNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# evaluation metrics
train_acc_list = []
val_acc_list = []
train_err_list = []
val_err_list = []
# ==============================================
# RNN training
# ==============================================
# Launch the graph
with tf.Session() as sess:
sess.run(init)
y_train = sess.run(y_train_OHEnc)[:, 0, :]
y_val = sess.run(y_val_OHEnc)[:, 0, :]
print('==> Training the full network...')
t_start = time.time()
# Keep training until reach max iterations
for epoch in range(max_iter):
for i in range(0, num_training_vec, batch_size):
end_ind = min(i + batch_size, num_training_vec)
batch_x = X_train[i : end_ind]
batch_y = y_train[i : end_ind]
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((-1, n_steps, n_input))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if (epoch + 1) % print_freq == 0:
train_acc = accuracy.eval(feed_dict={x: X_train.reshape((-1, n_steps, n_input)),\
y: y_train})
train_acc_list.append(train_acc)
val_acc = accuracy.eval(feed_dict={x: X_val.reshape((-1, n_steps, n_input)),\
y: y_val})
val_acc_list.append(val_acc)
train_err = cost.eval(feed_dict={x: X_train.reshape((-1, n_steps, n_input)),\
y: y_train})
train_err_list.append(train_err)
val_err = cost.eval(feed_dict={x: X_val.reshape((-1, n_steps, n_input)),\
y: y_val})
val_err_list.append(val_err)
print("-- epoch: %d, training error %g"%(epoch + 1, train_err))
t_end = time.time()
print('--Time elapsed for training: {t:.2f} \
seconds'.format(t = t_end - t_start))
# ==============================================
# RNN Evaluation
# ==============================================
# Reports
print('-- Training accuracy: {:.4f}'.format(train_acc_list[-1]))
print('-- Validation accuracy: {:.4f}'.format(val_acc_list[-1]))
print('-- Training error: {:.4E}'.format(train_err_list[-1]))
print('-- Validation error: {:.4E}'.format(val_err_list[-1]))
print('==> Generating error plot...')
x_list = range(0, print_freq * len(train_acc_list), print_freq)
train_err_plot = plt.plot(x_list, train_err_list, 'b-', label='training')
val_err_plot = plt.plot(x_list, val_err_list, '-', color='orange', label='validation')
plt.xlabel('Number of epochs')
plt.ylabel('Cross-Entropy Error')
plt.title('Error vs Number of Epochs with {} Hidden Units'.format(n_hidden))
plt.legend(loc='best')
plt.savefig('rnn_{}.png'.format(n_hidden), format='png')
plt.close()
print('==> Finished!')
| mit |
google/makani | analysis/force_balance_loop/power_calcs/kite_loop.py | 1 | 70138 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from scipy.optimize import minimize_scalar
from scipy.interpolate import interp1d
import math
import numbers
import json
import time
import pprint
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import cm
from cycler import cycler
from lib import utils
from power_calcs import kite_path
from power_calcs import kite_pose
from tools import rotor_model_util
from lib import fun
from six.moves import range
from six.moves import zip
i = 0
j = 1
k = 2
class KiteLoop(object):
"""
Contains all the poses for a loop. This allows you to evaluate an entire loop.
Has lots of helper functions and plotting outputs for use with Plotly
javascript libraries.
The path and poses can be explictly specified, or left to the
optimizer.
Explicitly defining poses is done through either the pose_states or
pose_states_param kwarg, path shape though path_shape_params, and path
location through path_location_params. Any necessary information not
specified will automatically seed and be determined by the optimizer,
using default settings.
Alternatively, you can explicitly specify what to optimize over and the
settings for each variable by the vars_to_opt kwarg.
Args:
resource: a dictionary that specifies a resource
See KitePowerCurve docstring for more details
config: a dictionary that specifies a kite configuration
See KitePowerCurve docstring for more details
Kwargs:
v_w_at_h_ref:
optional wind speed, in m/s, at resource reference height
if not provided, it attempts to use resource['v_w_avg_h_ref']
v_w_hat:
optional wind direction, in array like vector format
if not provided, nominal direction is [1, 0, 0]
grav_mult:
multiplier to apply to gravity - nominal is 1.
verbose:
Boolean - adds additional print statements about calculation.
pose_states:
A dictionary of lists that specifies a variable and values for all
poses in the loop. The lists must be of length of the number of poses,
as specified in the path_shape_params or default settings.
See KitePose for keys needed to solve a pose.
pose_states_param:
A dictionary of dictionaries, where each dictionary specifies a
parameterization type
path_shape_params:
a dictionary of values needed to specify the kite path.
See KitePath for acceptable inputs
path_location_params:
a dictionary of values needed to specify the path location.
See KitePath for acceptable inputs
vars_to_opt:
Dictionary of variables to optimize over.
Variables need to be a variable type for pose_states, path_shape_params,
or path_location_params. Format is:
{<variable_name>: {'type': <parameterization_type>,
'values': <initial_value>,
'max_step': <maximum_step_size>}}
available parameterization types are:
spline:
A spline fit with evenly space control points about the path.
Seed must be array-like with number of elements equal to
number of desired control points.
linear_interp:
Linear interpolation with evenly spaced control points about the
path. Seed must be array-like with number of elements equal to
number of desired transition points.
constant:
All values around the loop are the same.
value:
Only a single value for the entire loop. No parameterization.
This is to be used for things like path_shape_params and
path_location_params, where generally a single value is used.
each:
Every pose is individually optimized.
opt_params:
optional dictionary of optimization params that are passed to
self.optimize and KitePose objects.
Optional items:
maxiter:
maximum iterations for v_k optimizer
tol:
tolerance for power in optimizer.
higher values will stop the optimization sooner.
see scipy.optimize for further details
catol:
tolerance for constraints violations in v_k optimizer
see scipy.optimize for further details
store_converge_data:
Boolean that determines if convergence data during optimization is
stored or not. This can be memory intensive for long optimizations.
constraint_stiffness:
all constraints are normalized, and then multiplied by this
parameter. high values cause the optimizer to quickly flee solutions
that violate constraints, while low values let the optimizer
find the best power, then work on meeting constraints.
typically you want low values of approx. 0.0001
constraint_penalty_norm:
normalized multiplier on power on normalized constraint values in
optimizer. applies penalty to violated constraints as a way
to further penalize failing to meet constraints.
not typically needed.
tension_penalty_cut_in:
the portion of positions that must be at rated power to
start ramping in a tension penalty for v_k optimizer.
used to steer rated power positions towards min tension solutions
ramps penalty linearly from zero at cut in to full at rated.
tension_penalty_norm:
maximum tension penalty to apply for v_k optimizer.
penalty is this * rated_power * normalized tension * ramp_in
flap_m_mult:
Array-like penalties to be multiplied by absolute value of current
flap aero coefficients.
pose_solve_options:
dictionary that is picked off and passed to KitePose.
see KitePose for usage
Typical Usage Examples:
Create a KiteLoop with a resource, and kite config.
Use KiteLoop.solve() to fill object with results.
Use plotting tools to inspect results, or directly pull desired data out.
Summary loop data is contained in:
self.data_loop
Pose data is contained in a pandas Dataframe object in:
self.data_poses
"""
def __init__(self, resource, config,
v_w_at_h_ref=None, v_w_hat=None,
grav_mult=1.0, verbose=False,
opt=False,
**kwargs):
self.resource = resource
self.config = config
self.grav_mult = grav_mult
self.verbose = verbose
self.opt = opt
if v_w_hat is not None:
self.v_w_hat = np.asarray(v_w_hat) / utils.vec_length(v_w_hat)
else:
self.v_w_hat = np.array([1.,0.,0.])
if v_w_at_h_ref is not None:
self.v_w_at_h_ref = v_w_at_h_ref
else:
self.v_w_at_h_ref = self.resource['v_w_avg_h_ref']
self._parse_kwargs(kwargs)
def __getitem__(self, key):
return getattr(self, key)
def _parse_kwargs(self, kwargs):
# Setup default optimization params.
opt_params = {
'maxiter': 2000,
'tol': 0.01,
'catol': 0.01,
'constraint_penalty_norm': 0.,
'store_converge_data': True,
'constraint_stiffness': 0.001,
'tension_penalty_cut_in': 0.95,
'tension_penalty_norm': 0.0005,
'flap_penalty_factor': 0.,
'flap_m_mult': [0., 0., 0.]}
opt_params.update(kwargs.get('opt_params', {}))
self.opt_params = opt_params
self.pose_solve_options = kwargs.get('pose_solve_options', {})
self.solver = kwargs.get('solver', 'FBL')
# Pose states and pose_states_param may get changed during solve, so we make
# a copy.
self.pose_states = utils.deepcopy_lite(kwargs.get('pose_states', {}))
self.pose_states_param = utils.deepcopy_lite(
kwargs.get('pose_states_param', {}))
# Pose_states_param are converted into pose_states, which means specifying
# the same variable in both is overspecifying, and pose_states_param steps
# on pose_states.
if self.verbose:
over_specified = (
[k for k in self.pose_states.keys() if k in
list(self.pose_states_param.keys())])
if over_specified:
print(str(over_specified) + ' is/are specified in both pose_state and '
+ 'pose_states_param.\nPose_states_param is overriding.')
# Path shape and location params may get changed during solve, so we make a
# copy.
self.path_shape_params = utils.deepcopy_lite(
kwargs.get('path_shape_params', {}))
self.path_location_params = utils.deepcopy_lite(
kwargs.get('path_location_params', {}))
self.given_path = kwargs.get('given_path', None)
# Vars to opt will be changed during solve, so we make a copy.
self.vars_to_opt = utils.deepcopy_lite(kwargs.get('vars_to_opt', {}))
if self.given_path is None:
kite_path.KitePath.setup_optimization(
self.path_shape_params, self.path_location_params, self.vars_to_opt)
# Make path here, as it's needed to make good seed guesses and fill in
# number of poses for parameterized loop args.
# TODO: Separate making seeds from parsing kwargs.
self._make_path(self.path_shape_params,
self.path_location_params, self.given_path)
# Turn parameterized args into states for each pose.
loop_param_args = self._setup_param_args(self.pose_states_param)
self._update_loop_inputs(loop_param_args)
if all(k not in self.pose_states for k in ('v_a', 'v_k')):
if all(k not in self.vars_to_opt for k in ('v_a', 'v_k')):
self.vars_to_opt['v_a'] = (
{'param_type': 'spline',
'values': [self.config['v_a_min'] + 5.] * 6,
'max_step': 5.})
if ('alpha' not in self.pose_states) and ('alpha' not in self.vars_to_opt):
self.vars_to_opt['alpha'] = (
{'param_type': 'spline',
'values': [3.] * 6,
'max_step': 2.})
if ('beta' not in self.pose_states) and ('beta' not in self.vars_to_opt):
self.vars_to_opt['beta'] = (
{'param_type': 'spline',
'values': [3.] * 6,
'max_step': 2.})
if self.config.get('aero_device', None):
if 'aero_device_scale' not in self.pose_states:
if 'aero_device_scale' not in self.vars_to_opt:
self.vars_to_opt['aero_device_scale'] = (
{'param_type': 'spline',
'values': [0.0] * 6,
'max_step': 0.2})
def _initialize_poses(self):
self.poses = []
self._make_path(
self.path_shape_params, self.path_location_params, self.given_path)
for k, states in self.pose_states.items():
assert len(states) == self.num_poses, (
'Number of states must be same as number of poses.\n'
'Number of poses is: ' + str(self.num_poses) + '\n' +
'Number of states is: ' + str(len(states)) + '\n' +
'States are: ' + k + ': ' + str(states))
poses_state = utils.dl_to_ld(self.pose_states)
# If optimizing (non-blank vars_to_opt), then make sure the optimizer
# constraint tolerance is tighter than pose constraint tolerance. Without
# this check, the optimizer may provide constraint violations that result
# in invalid poses.
if (('catol' in self.opt_params) and ('vars_to_opt' != {})):
if 'contol_norm' not in self.pose_solve_options:
self.pose_solve_options['contol_norm'] = self.opt_params['catol']
if self.opt_params['catol'] > self.pose_solve_options['contol_norm']:
print(
'\nPose constraint tolerance must be greater than or equal to '
+ 'optimization constraint tolerance. Overriding pose constraint '
+ 'tolerance with opt_params catol.')
self.pose_solve_options['contol_norm'] = self.opt_params['catol']
# Setup poses.
for position, pose_params in zip(self.path.positions, poses_state):
pose = kite_pose.KitePose(
position, self.resource, self.config,
self.v_w_at_h_ref, self.v_w_hat, grav_mult=self.grav_mult,
opt=self.opt, solve_options=self.pose_solve_options, **pose_params)
self.poses.append(pose)
self._calc_knowns()
def _enum_neighboring_indices(self):
num_poses = len(self.poses)
assert num_poses == self.num_poses
for index in range(num_poses):
next_index = 0 if index == (num_poses - 1) else index + 1
prev_index = index - 1 if index > 0 else num_poses - 1
yield prev_index, index, next_index
def _enum_neighboring_poses(self):
for p, i, n in self._enum_neighboring_indices():
yield self.poses[p], self.poses[i], self.poses[n]
def _calc_segment_time(self):
# Solve for time.
for prev_pose, curr_pose, next_pose in self._enum_neighboring_poses():
curr_pose.state['segment_time'] = (
curr_pose.state['segment_length'] /
((next_pose.state['v_k'] + curr_pose.state['v_k']) / 2.))
def _calc_knowns(self):
# Calc state derivatives to close loop with strategies given.
# Solve for time.
self._calc_segment_time()
total_time = 0.
total_dist = 0.
for pose in self.poses:
pose.state['time'] = total_time
pose.state['dist'] = total_dist
total_time += pose.state['segment_time']
total_dist += pose.position['segment_length']
self.data_loop = {'total_time': total_time,
'total_dist': total_dist}
# Solve for acceleration along path.
for prev_pose, pose, next_pose in self._enum_neighboring_poses():
# Segment times are from current to next pose.
prev_accel = (
(pose.state['v_k'] - prev_pose.state['v_k'])
/ prev_pose.state['segment_time'])
next_accel = (
(next_pose.state['v_k'] - pose.state['v_k'])
/ pose.state['segment_time'])
# Weight accel along path by how close in time it is between poses.
pose.state['accel_along_path'] = utils.slope_linear(
prev_accel, next_accel,
pose.state['segment_time'],
prev_pose.state['segment_time'])
# If kite orientation is known, calculate pqr rates and rotational accel
# moments.
if 'kite_axis' in self.poses[0].state:
self._calc_body_rates()
self._calc_body_rates_dot()
def _calc_body_rates(self):
"""Calculates body rates and stores them in pose states."""
def angular_diff(initial_pose, end_pose):
d_roll = utils.angle_signed(
initial_pose.state['kite_axis']['y'],
end_pose.state['kite_axis']['y'],
initial_pose.state['kite_axis']['x'])
d_pitch = utils.angle_signed(
initial_pose.state['kite_axis']['x'],
end_pose.state['kite_axis']['x'],
initial_pose.state['kite_axis']['y'])
d_yaw = utils.angle_signed(
initial_pose.state['kite_axis']['y'],
end_pose.state['kite_axis']['y'],
initial_pose.state['kite_axis']['z'])
return np.array([d_roll, d_pitch, d_yaw])
for prev_pose, pose, next_pose in self._enum_neighboring_poses():
prev_pqr = angular_diff(prev_pose, pose) / prev_pose.state['segment_time']
next_pqr = angular_diff(pose, next_pose) / pose.state['segment_time']
# Weight pqr by how close in time it is between poses.
pose.state['pqr'] = utils.slope_linear(
prev_pqr, next_pqr,
pose.state['segment_time'],
prev_pose.state['segment_time'])
pose._calc_omega_hat()
def _calc_body_rates_dot(self):
"""Calculates body rate accelerations and stores them in pose states."""
for prev_pose, pose, next_pose in self._enum_neighboring_poses():
# Segment times are from current to next pose.
prev_d_pqr = pose.state['pqr'] - prev_pose.state['pqr']
next_d_pqr = next_pose.state['pqr'] - pose.state['pqr']
# Segment time is from active pose to next pose, so d_time is the segment
# time for the first pose in the comparison.
prev_pqr_dot = prev_d_pqr / prev_pose.state['segment_time']
next_pqr_dot = next_d_pqr / pose.state['segment_time']
# Weight pqr_dot by how close in time it is between poses.
pose.state['pqr_dot'] = utils.slope_linear(
prev_pqr_dot, next_pqr_dot,
pose.state['segment_time'],
prev_pose.state['segment_time'])
def solve(self):
if self.vars_to_opt:
self.optimize(self.vars_to_opt, **self.opt_params)
else:
self._initialize_poses()
self._solve_poses()
self._calc_loop_data()
if not self.opt:
self._extract_pose_data()
def _solve_poses(self):
if self.verbose:
print('Solving poses in loop...', end='')
start_t = time.time()
for pose in self.poses:
pose.solve()
if self.verbose:
end_t = time.time()
print('solved. Time is %0.4fs' % (end_t - start_t))
def _make_path(
self, path_shape_params, path_location_params, given_path=None):
self.path = kite_path.KitePath(
path_shape_params, path_location_params, self.config, given_path)
self.v_w_at_h_hub = self.resource['v_w_at_height'](
self.path.h_hub, self.v_w_at_h_ref)
self.num_poses = self.path.num_pos
self.poses = []
def _extract_pose_data(self):
if self.verbose:
print('Extracting pose data into DataFrame.')
data = {}
keys = []
# All pose states should contain the same keys, but we grab all unique keys
# accross all poses to be sure.
for pose in self.poses:
keys.extend([key for key in pose.state.keys() if key not in keys])
for key in keys:
data[key] = []
data['position_index'] = []
for ii, pose in enumerate(self.poses):
data['position_index'].append(ii)
for key in keys:
try:
data[key].append(pose.state[key])
except KeyError:
data[key].append(np.NaN)
data['dist_norm'] = (
(np.array(data['dist']) / self.data_loop['total_dist']).tolist())
self.data_poses = pd.DataFrame(data, index=data['position_index'])
def _calc_loop_data(self):
if self.verbose:
print('Loop solved, now calculating loop data.')
temp_p = 0.
temp_vals = {'dist': {},
'time': {}}
count_pose_at_rated = 0
self.valids = []
self.valid = True
self.constraints = []
self.constraints_violated = []
for ii, pose in enumerate(self.poses):
self.valids.append(pose.valid)
if not pose.valid:
self.valid = False
for constraint in pose.state['constraints']:
constraint['position'] = ii
self.constraints.append(constraint)
for constraint_v in pose.state['constraints_violated']:
constraint_v['position'] = ii
self.constraints_violated.append(constraint_v)
next_index = (ii + 1) % self.num_poses
# Variables are weighted by time spent at each position.
# Power must be calculated during optimization runs, so it's pulled out
# separately.
temp_p += (
(pose.state['power'] + self.poses[next_index].state['power']) / 2.
* pose.state['segment_time'])
# If not an optimization run, store values for all number-like state
# variables weighted by both time and distance. These are used in the stat
# calculations below.
if not self.opt:
# TODO: Redo section for readability and conciseness.
for key in pose.state.keys():
if isinstance(pose.state[key], numbers.Number):
# Temp_vals is weighted sum of particular variable, weighted by
# by either time or distance. It is later divided by total
# respective weight to get a weighted average.
if key not in temp_vals['dist']:
temp_vals['dist'][key] = 0.
temp_vals['time'][key] = 0.
temp_vals['time'][key] += (
(pose.state[key] + self.poses[next_index].state[key]) / 2.
* pose.state['segment_time'])
temp_vals['dist'][key] += (
(pose.state[key] + self.poses[next_index].state[key]) / 2.
* pose.state['segment_length'])
elif isinstance(pose.state[key], (list, np.ndarray)):
if np.array(pose.state[key]).size==3 and all(
[isinstance(v, numbers.Number) for v in pose.state[key]]):
for idx, suffix in enumerate(['-x', '-y', '-z']):
key_suff = key + suffix
if key_suff not in temp_vals['dist']:
temp_vals['dist'][key_suff] = 0.
temp_vals['time'][key_suff] = 0.
temp_vals['time'][key_suff] += (
(pose.state[key][idx]
+ self.poses[next_index].state[key][idx]) / 2.
* pose.state['segment_time'])
temp_vals['dist'][key_suff] += (
(pose.state[key][idx]
+ self.poses[next_index].state[key][idx]) / 2.
* pose.state['segment_length'])
# TODO: Extend to work for forces as well.
elif key in ['moments_b']:
# Moments and forces are nested in a dict, so we pull them out for
# summary stats.
for kk in pose.state[key]['type'].keys():
# Calculate the magnitude.
key_kk = key + '-' + kk + '-mag'
if key_kk not in temp_vals['dist']:
temp_vals['dist'][key_kk] = 0.
temp_vals['time'][key_kk] = 0.
curr_vec_len = utils.vec_length(pose.state[key]['type'][kk])
next_vec_len = utils.vec_length(
self.poses[next_index].state[key]['type'][kk])
temp_vals['time'][key_kk] += (
(curr_vec_len + next_vec_len) / 2.
* pose.state['segment_time'])
temp_vals['dist'][key_kk] += (
(curr_vec_len + next_vec_len) / 2.
* pose.state['segment_length'])
# Calculate the components and add suffix to name.
for idx, suffix in enumerate(['-x', '-y', '-z']):
key_kk_suff = key + '-' + kk + suffix
if key_kk_suff not in temp_vals['dist']:
temp_vals['dist'][key_kk_suff] = 0.
temp_vals['time'][key_kk_suff] = 0.
temp_vals['time'][key_kk_suff] += (
(pose.state[key]['type'][kk][idx]
+ self.poses[next_index].state[key]['type'][kk][idx]) / 2.
* pose.state['segment_time'])
temp_vals['dist'][key_kk_suff] += (
(pose.state[key]['type'][kk][idx]
+ self.poses[next_index].state[key]['type'][kk][idx]) / 2.
* pose.state['segment_length'])
pose.state['time_norm'] = (
pose.state['time'] / self.data_loop['total_time'])
if pose.state['power_shaft'] >= self.config['power_shaft_max']:
count_pose_at_rated += 1
self.r_at_rated = count_pose_at_rated / float(self.num_poses)
# Calculate stats for all possible values. Omitted on optimization runs
# for computation time.
if not self.opt:
stats = {}
for key, value in temp_vals.items():
# Key is either distance or time.
for k in value.keys():
# Special items denoted with hyphens above - we need to parse the
# string to figure out what to do.
if k.find('-') != -1:
k_split = k.split('-')
# Non-nested vectors have one hyphen, where suffix indicates axis.
if len(k_split) == 2:
# Separate out the suffix so we can lookup in pose.state with kk.
kk, suffix = k_split[0], k_split[1]
idx = {'x': 0, 'y': 1, 'z': 2}[suffix]
stats[k + '_avg_' + key] = (
value[k] / self.data_loop['total_' + key])
stats[k + '_max'] = np.max(
[pose.state[kk][idx] for pose in self.poses])
stats[k + '_min'] = np.min(
[pose.state[kk][idx] for pose in self.poses])
# Force and moment vectors are nested and have an additional lookup
# to do. Assume that's what they are, and are nested under 'type'.
elif len(k_split) == 3:
kk, kkk, suffix = k_split[0], k_split[1], k_split[2]
stats[k + '_avg_' + key] = (
value[k] / self.data_loop['total_' + key])
if suffix == 'mag':
magnitudes = [
utils.vec_length(pose.state[kk]['type'][kkk])
for pose in self.poses]
stats[k + '_max'] = np.max(magnitudes)
stats[k + '_min'] = np.min(magnitudes)
else:
idx = {'x': 0, 'y': 1, 'z': 2}[suffix]
stats[k + '_max'] = np.max(
[pose.state[kk]['type'][kkk][idx] for pose in self.poses])
stats[k + '_min'] = np.min(
[pose.state[kk]['type'][kkk][idx] for pose in self.poses])
else:
stats[k + '_avg_' + key] = value[k] / self.data_loop['total_' + key]
stats[k + '_max'] = np.max(
[pose.state[k] for pose in self.poses
if pose.state[k] != float('inf')
or pose.state[k] != float('-inf')])
stats[k + '_min'] = np.min(
[pose.state[k] for pose in self.poses
if pose.state[k] != float('inf')
or pose.state[k] != float('-inf')])
# Power is required on optimization runs, so it is calculated every run.
self.power = temp_p / self.data_loop['total_time']
if self.verbose:
print(utils.TextFormat.BOLD + 'Loop Valid:', self.valid,
utils.TextFormat.END)
if not self.valid:
print('Type of constraints violated:\n'
+ str(list(set([c['name'] for c in self.constraints_violated]))))
print(utils.TextFormat.BOLD +
'Loop Mean Power is: %0.1f W' % self.power + utils.TextFormat.END)
p_in_wind = (0.5 * self.resource['rho'] * self.v_w_at_h_hub**3)
if p_in_wind != 0.:
self.zeta_padmount = self.power / (p_in_wind * self.config['s'])
else:
self.zeta_padmount = float('-inf')
if not self.opt:
self.data_loop['power'] = self.power
self.data_loop['r_at_rated'] = self.r_at_rated
self.data_loop['valid'] = self.valid
self.data_loop.update(stats)
self.data_loop['v_w_at_h_ref'] = self.v_w_at_h_ref
self.data_loop['v_w_at_h_hub'] = self.v_w_at_h_hub
self.data_loop.update(self.path.__dict__)
def calc_csim_v_a_sch(self):
"""
Calculates a best fit v_a schedule and params in CSim notation.
Results are added to loop object.
"""
v_as = np.array(self.data_poses['v_a'].tolist())
angles = np.array(self.data_poses['loop_angle_csim'].tolist())
def toCSim(angle, a, b, c, phi):
return [max(a,x) for x in b + c * np.cos(angle - phi)]
init_guess = [self.config['v_a_min'], self.config['v_a_min']*1.5,
self.config['v_a_min']*0.5, 1.5]
popt, pcov= curve_fit(toCSim, angles, v_as, init_guess)
self.v_a_sch_params = {'a': popt[0],
'b': popt[1],
'c': popt[2],
'phi': popt[3]}
self.v_a_sch = toCSim(angles, *popt)
def _get_seeds(self, opt_vars):
raise NotImplemented
#TODO: finish section, revise for new parameterization
seeds = {}
if 'r_loop' or 'incl' in opt_vars:
if 'r_loop' in opt_vars and 'incl' not in opt_vars:
seeds['r_loop'] = self._get_r_loop_analytical()
if 'r_loop' and 'incl' in opt_vars:
pass
if 'v_ks' in opt_vars:
seeds['v_ks'] = []
v_a_min = self.config['v_a_min']
v_a_tension = math.sqrt((2.*self.config['tension_max'])
/(self.resource['rho'] * self.config['s']
* self.config['cL_oper']))
cD = self.config['cD_from_cL'](self.config['cL_oper'])
loyd_limit = (4./27.) * (self.config['cL_oper']**3 / cD**2)
v_k_ideal = (
(2./3.) * (self.config['cL_oper'] / cD) * self.v_w_at_h_hub
* math.cos(self.path.incl))
# TODO: Potential energy is already calculated in the pose.
# Remove repeated calculations.
m_pot = (self.config['m_kite'] + 0.5 * self.config['m_tether'])
h_delta = (self.path.h_max - self.path.h_min)
pot_e_delta = h_delta * m_pot * utils.Const.G
k_grav = 0.5
v_k_delta = math.sqrt(2. * pot_e_delta / m_pot)
for pose in self.poses:
h_norm = ((pose.state['xyz'][k] - self.path.h_min) / h_delta) - 0.5
seeds['v_ks'].append(
max(v_a_min,
min(v_a_tension, v_k_ideal - h_norm * v_k_delta * k_grav)))
return seeds
def _get_r_loop_analytical(self):
r_loop_max = self.config['l_tether']
# Calculate ideal loop size using analytical model.
# TODO: Document source for this model.
r_loop_ideal = math.sqrt(
(2.*self.config['l_tether'] * self.config['m_eff'])/
(self.resource['rho'] * self.config['cL_oper'] * self.config['s']))
if 'h_min' in self.config:
h = (math.sin(self.path.incl) * self.config['l_tether']
- self.config['h_min'])
r_loop_max = h / math.cos(self.path.incl)
r_loop = min(r_loop_max, r_loop_ideal)
return r_loop
def _get_incl_analytical(self):
incl_min = None
shear = max(0., self.resource['shear'])
incl_ideal = math.atan(math.sqrt(shear))
if 'h_min' in self.config:
incl_min = (math.asin(self.config['h_min'] / self.config['l_tether'])
+ math.asin(self.path.r_loop / self.config['l_tether']))
incl = max(incl_min, incl_ideal)
return incl
def _get_r_loop_incl_analytical(self):
raise NotImplemented
def _get_v_k_schedule_analytical(self):
raise NotImplemented
def optimize(self, vars_to_opt,
maxiter, tol, catol,
constraint_penalty_norm, constraint_stiffness,
tension_penalty_cut_in, tension_penalty_norm, flap_m_mult,
flap_penalty_factor, store_converge_data):
"""
Optimizes parameters specified in vars_to_opt for best average loop power,
meeting constraints in KitePose.
"""
if self.verbose:
print('Optimizing loop...')
start_t = time.time()
constraint_penalty_mult = (
constraint_penalty_norm * self.config['power_shaft_max'])
class ConstraintsWrapper(object):
"""Defines a wrapper class for constraints.
The sole reason for the existance of this class is to provide a fixed
function for the optimizer to 'call.' This wrapper lets the call simply
access a variable that we swap out with the result of each run."""
def __init__(self, constraints):
self.constraints = []
for constraint in constraints:
self.constraints.append(ConstraintWrapper(constraint))
def get_list(self):
# Returns a list of constraint dicts, rather than the list of
# ConstraintWrapper objects that is self.constraints.
out = tuple(c.constraint for c in self.constraints)
return out
class ConstraintWrapper(object):
def __init__(self, constraint):
self.constraint = {}
# 'type' and 'fun' required for COBYLA optimizer.
self.constraint['type'] = 'ineq'
# 'fun' is the function call required by COBYLA.
# Note that here we just pin it to a function that gets a variable that
# is repeatedly swapped out. This is because the optimizer calls a
# specific memory address function - the function itself cannot be
# updated.
self.constraint['fun'] = self.get_constraint_val
self.update_constraint(constraint)
def get_constraint_val(self, x):
return self.constraint['opt']
def update_constraint(self, constraint):
self.constraint.update(constraint)
self.constraint['opt'] = (
self.constraint['margin_norm'] * constraint_stiffness)
if store_converge_data:
self.convergence_data = []
self._vars_to_opt_param_dict = (
self._setup_param_args(vars_to_opt, norm=True))
self._update_loop_inputs(self._vars_to_opt_param_dict)
if self.verbose:
print('Variables being optimized: ', list(vars_to_opt.keys()))
print('Initial seed is: ')
param_vals_rounded = {}
for k, vals in (
list(self._get_param_values(self._vars_to_opt_param_dict).items())):
param_vals_rounded[k] = np.round(vals, 5)
pprint.pprint(param_vals_rounded)
print('Parameterization types are: ')
for items in [(k, v['param_type']) for k, v in vars_to_opt.items()]:
print('%s: %s'%items)
# Constraints are created at the KitePose level, so we don't know what
# constraints the current model has until it is run. Run the model once to
# get the current active constraints.
loop = KiteLoop(
self.resource, self.config, self.v_w_at_h_ref,
self.v_w_hat, self.grav_mult,
opt=True, pose_states=self.pose_states,
path_shape_params=self.path_shape_params,
path_location_params=self.path_location_params,
opt_params=self.opt_params,
solver=self.solver)
loop.solve()
cons = ConstraintsWrapper(loop.constraints)
self.iter_count = 0
if self.verbose:
print('Max iter: %d, calculating iter %d..'%(maxiter, self.iter_count),
end='')
# Define objective function for optimizer.
def eval_loop_power(norm_args):
# Update parameterized args via optimizer provided normalized args.
self._update_param_args_w_norm_args(
self._vars_to_opt_param_dict, norm_args)
self._update_loop_inputs(self._vars_to_opt_param_dict)
loop = KiteLoop(
self.resource, self.config, self.v_w_at_h_ref,
self.v_w_hat, self.grav_mult,
opt=True, pose_states=self.pose_states,
path_shape_params=self.path_shape_params,
path_location_params=self.path_location_params,
opt_params=self.opt_params,
solver=self.solver)
loop.solve()
# Ramp in tension penalty if ratio of poses at rated power is above
# tension penalty cut in.
tension_mult = 0.
if loop.r_at_rated >= tension_penalty_cut_in:
ramp = interp1d([tension_penalty_cut_in, 1.], [0., 1.])
ramp_mult = ramp(loop.r_at_rated)
tension_mult = (tension_penalty_norm
* self.config['power_shaft_max']
* ramp_mult)
for pose in loop.poses:
pose._apply_opt_penalties(
constraint_penalty_mult, tension_mult, flap_m_mult,
flap_penalty_factor)
for con, constraint in zip(cons.constraints, loop.constraints):
con.update_constraint(constraint)
# Applying optimization penalties changes the power, so we must update
# the summary power again by running calc_loop_data.
loop._calc_loop_data()
# Record intermediary optimization inputs to see how it converged.
if store_converge_data:
self.convergence_data.append(
{'obj_out': loop.power, # Not necessarily power due to penalties.
'params': utils.deepcopy_lite(
self._get_param_values(self._vars_to_opt_param_dict)),
# Store a reduced set of constraint info for memory management.
'constraints': tuple(
{'name': c['name'],
'margin_norm': c['margin_norm'],
'value': c['value']} for c in cons.get_list())})
if self.verbose:
self.iter_count += 1
if self.iter_count % 100 == 0:
print('%d..'%self.iter_count, end='')
return -loop.power
self.optimizer_output = minimize(
eval_loop_power,
self._get_norm_param_args(self._vars_to_opt_param_dict),
method='cobyla',
constraints=cons.get_list(),
options={'tol':tol,
'maxiter':maxiter,
'catol': catol * constraint_stiffness,
'rhobeg':1.})
# Pose states and path params are updated on every call the
# optimizer makes, but we overwrite them with the optimizer output,
# which is in the normalized domain.
# We do this because in the case where the optimizer fails,
# the final iteration may not be the best run, and the optimizer
# returns the best run.
self._update_param_args_w_norm_args(
self._vars_to_opt_param_dict, self.optimizer_output['x'])
self._update_loop_inputs(self._vars_to_opt_param_dict)
self._initialize_poses()
# Store vars_to_opt final output in pose_state_param so user has them.
self.pose_states_param.update(
self._get_param_dict(self._vars_to_opt_param_dict))
if self.verbose:
end_t = time.time()
print('')
print('Optimization complete in %d iterations and %0.2fs.'
% (self.optimizer_output['nfev'],
(end_t - start_t)) + utils.TextFormat.BOLD)
if self.optimizer_output['success']:
print('Converged to solution.')
else:
if ((self.pose_solve_options['contol_norm'] * constraint_stiffness
- self.optimizer_output['maxcv']) >= 0.):
print('Converged to solution within pose constraints tolerance.')
print('Ignore constraint violations, within constraint tolerance '
'limit of %0.3f' % (self.pose_solve_options['contol_norm']))
else:
print('Did NOT converge to solution within constraints '
'tolerance of', catol)
print('Max violation is',
self.optimizer_output['maxcv'] / constraint_stiffness)
print('Constraints violated: ')
out = {}
for con in cons.constraints:
if con.constraint['opt'] < -catol * constraint_stiffness:
name = (
con.constraint['name'] + ' norm @ pos {:02d}'.format(
con.constraint['position']))
out[name] = round(con.constraint['margin_norm'], 4)
pprint.pprint(out)
print(utils.TextFormat.END
+ 'Results for optimization in normalized domain: ')
print([round(x,3) for x in self.optimizer_output['x']])
print('Results for optimization in parameter domain: ')
pprint.pprint(self._get_param_values(self._vars_to_opt_param_dict))
print('Results in pose domain: ')
pprint.pprint(self.pose_states)
print('Results in path domain: ')
pprint.pprint(self.path_shape_params)
pprint.pprint(self.path_location_params)
@staticmethod
def _get_norm_param_args(param_args):
norm_args = []
for key in sorted(param_args.keys()):
norm_args.extend(param_args[key].get_norm_values())
return np.array(norm_args)
@staticmethod
def _update_param_args_w_norm_args(param_args, norm_args):
idx_start = 0
for key in sorted(param_args.keys()):
idx_end = idx_start + param_args[key].num_args
param_args[key].set_norm_values(norm_args[idx_start:idx_end])
idx_start = idx_end
def _setup_param_args(self, param_dict, norm=False):
"""Initializes all parameterized args."""
param_args = {}
for key, args in param_dict.items():
output_xs = None
kwargs = {}
kwargs['param_type'] = args.get('param_type', 'value')
# All loop lookups are currently assumed to be in the "pose domain."
# Assuming circular paths and even spaced poses, this can be easily
# mapped, but the assumption breaks down with arbitrary paths and spacing.
# TODO: Add functionality to define things that vary around the
# path in something other than pose space, such as loop angle or similar.
if kwargs['param_type'] == 'spline':
num_ctrl_pts = len(args['values'])
end_offset = (self.num_poses) / (2. * num_ctrl_pts)
kwargs['ctrl_pts_xs'] = np.linspace(end_offset,
self.num_poses - end_offset,
num=num_ctrl_pts)
output_xs = list(range(self.num_poses))
elif kwargs['param_type'] == 'linear_interp':
num_ctrl_pts = len(args['values'])
kwargs['ctrl_pts_xs'] = (
np.linspace(0., self.num_poses, num_ctrl_pts))
output_xs = list(range(self.num_poses))
elif kwargs['param_type'] == 'constant':
output_xs = list(range(self.num_poses))
if norm:
param_args[key] = utils.NormParamArg(
args['values'], args['max_step'], **kwargs)
else:
param_args[key] = utils.ParamArg(args['values'], **kwargs)
# If output_xs was defined above based on parameterization type, then set
# it in the param_arg.
if output_xs is not None:
param_args[key].set_lookup_xs(output_xs)
return param_args
@staticmethod
def _get_param_values(param_args):
"""Returns a dictionary of all output values of parameterized args."""
output = {}
for key in sorted(param_args.keys()):
output[key] = param_args[key].get_param_values()
return output
@staticmethod
def _get_param_dict(param_args):
"""Returns an input dictionary format for a dict of ParamArg objects."""
out = {}
for k, param_arg in param_args.items():
out[k] = {
'param_type': param_arg.param_type,
'values': param_arg.get_param_values()}
return out
def _update_loop_inputs(self, param_args_dict):
for key, param_arg in param_args_dict.items():
self.pose_states[key] = param_arg.get_values()
path_location_params = self.path_location_params
for k in path_location_params.keys():
if k in self.pose_states:
path_location_params[k] = self.pose_states.pop(k)
path_shape_params = self.path_shape_params
for k in path_shape_params.keys():
if k in self.pose_states:
path_shape_params[k] = self.pose_states.pop(k)
def plot_power_components(self, x='dist_norm', **kwargs):
"""
Plots components of total power measured along aerodynamic axis.
"""
figsize = kwargs.get('figsize', (9., 7.))
keys = ['aero_power', 'tether_power', 'gravity_power', 'accel_power',
'rotor_thrust_power']
plt.figure(figsize=figsize)
plt.title('Power Components vs %s @ %0.1fm/s' % (x, self.v_w_at_h_ref))
plt.ylabel('Power [kW]')
plt.xlabel(x)
for k in keys:
plt.plot(self.data_poses[x], self.data_poses[k]/1000., label=k)
plt.grid(linestyle=':', color='gray', linewidth=0.5)
plt.legend()
def plot_moments(self, x='dist_norm'):
"""Plots components of moments in body frame."""
for axis, ii in zip(['x', 'y', 'z'], [0, 1, 2]):
plt.figure()
for k in ['inertial', 'rotors', 'aero', 'gravity', 'tether', 'residual']:
plt.plot(
self.data_poses[x],
[p.state['moments_b']['type'][k][ii] for p in self.poses], label=k)
plt.ylabel('Nm of moment')
plt.title('Moments about %s axis'%axis)
plt.tight_layout()
plt.legend()
plt.grid(linewidth=0.5, linestyle=':')
def plot_flap_coeff(self, x='dist_norm'):
"""Plots flap aero coefficients required to meet moment balance."""
plt.figure()
plt.plot(self.data_poses[x],
[p.state['flap_aero_coeffs'] for p in self.poses])
plt.ylabel('Flap Aero Moment Coeff')
plt.xlabel('Normalized Distance around Loop')
plt.tight_layout()
plt.legend(['cl', 'cm', 'cn'])
plt.grid(linewidth=0.5, linestyle=':')
def plot_vectors(self, ys=['pqr', 'omega_hat'], ys_components=[['p','q','r']],
x='dist_norm', plot_kwargs={}, **kwargs):
"""Method for plotting 3-component vectors in the pose states.
Pass a list of corresponding component label lists. If empty or None,
['x','y','z'] is used for all. If length of 1, that entry is used for
all."""
figsize = kwargs.get('figsize', (9, 6))
fig = kwargs.get('fig', None)
base_label = kwargs.get('base_label', '')
label = kwargs.get('label','v_w_at_h_hub')
if not fig:
fig, axes = plt.subplots(figsize=(figsize[0],figsize[1] * len(ys)),
nrows=len(ys), ncols=1, dpi=100)
if ys_components is None or len(ys_components)==0:
ys_components = [['x', 'y', 'z']] * len(ys)
elif len(ys_components)==1:
ys_components = ys_components * len(ys)
assert len(ys_components)==len(ys), 'Cannot map list of ys_components to ys.'
for jj, (ax, y, y_components) in enumerate(
zip(fig.get_axes(), ys, ys_components)):
if len(y_components)==0:
y_components = ['x', 'y', 'z']
assert len(y_components) == 3, (
'This method only works for 3-component vectors.')
for ii, component in enumerate(y_components):
if type(y) is str:
data = [p.state[y][ii] for p in self.poses]
elif hasattr(y, '__iter__') and not isinstance(y, str):
# Extract data that's nested a few levels
data = [fun.nested_dict_traverse(y, p.state)[ii] for p in self.poses]
else:
assert(False), 'Failed to parse y: {}'.format(y)
ax.plot(self.data_poses[x], data,
label=component+base_label, **plot_kwargs)
ax.set_title('{} at {} of {} m/s'.format(y, label, self[label]))
ax.legend(bbox_to_anchor=(1.02, 1.0), loc=2, prop={'size':8})
ax.grid(linewidth=0.5, linestyle=':')
return fig
def plot_vectors_components(self, ys=['pqr', 'omega_hat'],
y_components=['p','q','r'], ys_labels=None, x='dist_norm',
plot_kwargs={}, **kwargs):
"""Method for plotting 3-component vectors in the pose states.
All ys in the list will have their components plotted together;
one subplots for each component."""
figsize = kwargs.get('figsize', (9, 6 * len(y_components)))
fig = kwargs.get('fig', None)
base_label = kwargs.get('base_label', '')
label = kwargs.get('label','v_w_at_h_hub')
if not fig:
fig, axes = plt.subplots(
figsize=figsize, ncols=1, nrows=len(y_components), dpi=100)
if y_components is None or len(y_components)==0:
y_components = ['x', 'y', 'z']
for jj, (ax, y_component) in enumerate(zip(fig.get_axes(), y_components)):
for ii, y in enumerate(ys):
if type(y) is str:
data = [p.state[y][jj] for p in self.poses]
elif hasattr(y, '__iter__') and not isinstance(y, str):
# Extract data that's nested a few levels
data = [fun.nested_dict_traverse(y, p.state)[jj] for p in self.poses]
else:
assert(False), 'Failed to parse y: {}'.format(y)
if ys_labels is not None:
y_label = '{}{}'.format(ys_labels[ii], base_label)
else:
y_label = '{}{}'.format(y, base_label)
ax.plot(self.data_poses[x], data,
label=y_label, **plot_kwargs)
ax.set_title(
'{} components at {} of {} m/s'.format(
y_component, label, self[label]))
ax.legend(bbox_to_anchor=(1.02, 1.0), loc=2, prop={'size':8})
ax.grid(linewidth=0.5, linestyle=':')
return fig
def _get_3d_plot_scale(self, poses):
xs = [pose.state['xyz'][i] for pose in poses]
ys = [pose.state['xyz'][j] for pose in poses]
zs = [pose.state['xyz'][k] for pose in poses]
x_min = min(xs)
y_min = min(ys)
z_min = min(zs)
x_max = max(xs)
y_max = max(ys)
z_max = max(zs)
# Finds the biggest dimension of positions in dataset, and sets range 1.1x
# larger than that.
scale = (
1.1 * max(
[x_max - x_min,
y_max - y_min,
z_max - z_min,
x_max - self.path.gs_position[i],
y_max - self.path.gs_position[j]])
/ 2.)
x_mid = (x_max + x_min) / 2.
y_mid = (y_max + y_min) / 2.
z_mid = (z_max + z_min) / 2.
x_range = [x_mid - scale, x_mid - scale, x_mid + scale]
y_range = [y_mid - scale, y_mid + scale, y_mid + scale]
z_range = [z_mid - scale, z_mid - scale, z_mid + scale]
return x_range, y_range, z_range
def plot_path_positions(self, **kwargs):
fig = kwargs.get('fig', None)
figsize = kwargs.get('figsize', (7, 7))
label = kwargs.get('label', '{} m/s'.format(self.v_w_at_h_ref))
path_color = kwargs.get('path_color', 'black')
pose_colormap = kwargs.get('pose_colormap', 'rainbow')
with_azim_incl = kwargs.get('with_azim_incl', False)
if not fig:
fig, ax = plt.subplots(figsize=figsize)
ax = fig.get_axes()[0]
# Axis not provided for cycler, as colors are set by path color.
# Colors only needed for markers for poses.
colors = utils.set_rainbow_plot_cycler(n=self.num_poses, cmap=pose_colormap)
xs = self.path['crosswind_xy'].T[0]
ys = self.path['crosswind_xy'].T[1]
if with_azim_incl:
# Simple Mercator projection of centroid locations,
# will distort centroid locations to show them further from each other
# than they really are.
xs = xs - self.path_location_params['azim'] * self.config['l_tether']
ys = ys + self.path_location_params['incl'] * self.config['l_tether']
ax.set_title('Crosswind Plane Path & Poses,'
' with approximate azim and incl.')
ax.axvline(linewidth=1, label=None)
ax.axhline(linewidth=1, label=None)
else:
ax.set_title('Crosswind Plane Path & Poses')
ax.scatter(0, 0, marker='+', c='k', s=250, label=None)
ax.plot(xs, ys, c=path_color, linestyle='--', label=label, zorder=1)
ax.quiver(xs[-1], ys[-1], xs[0]-xs[-1], ys[0]-ys[-1],
angles='xy', scale_units='xy', scale=1.,
linestyle='--', label=None, zorder=1, color=path_color)
ax.scatter(xs, ys, s=75, c=colors, zorder=2)
ax.grid(linewidth=1, linestyle=':')
ax.axis('equal')
if with_azim_incl:
ax.set_ylim((-10, self.config['l_tether'] * np.pi / 3))
return fig
def plot_rotor_map(self, **kwargs):
"""Map out the loop on the rotor table plots."""
fig = kwargs.get('fig', None)
label = kwargs.get('label', '{} m/s'.format(self.v_w_at_h_ref))
path_color = kwargs.get('path_color', 'gray')
path_width = kwargs.get('path_width', 2)
pose_colormap = kwargs.get('pose_colormap', 'rainbow')
pose_markersize = kwargs.get('pose_markersize', 75)
x = kwargs.get('x', 'omega_rotor')
y = kwargs.get('y', 'v_a_along_rotor_axis')
zs = kwargs.get('zs', ['c_t', 'c_p', 'eta_rotors'])
if 'torque_shaft_max' in self.config:
torque_shaft_max = self.config['torque_shaft_max']
else:
torque_shaft_max = None
if fig is None:
fig = rotor_model_util.PlotRotor(
self.config['shaft_power_from_drag_power'],
rho=self.resource['rho'], c_sound=self.resource['c_sound'],
power_shaft_max=self.config['power_shaft_max'],
torque_shaft_max=torque_shaft_max,
x=x, y=y, zs=zs,
**kwargs)
colors = utils.set_rainbow_plot_cycler(n=self.num_poses, cmap=pose_colormap)
xs = [p.state[x] for p in self.poses]
ys = [p.state[y] for p in self.poses]
for ax in fig.get_axes():
ax.plot(
xs, ys, c=path_color, linestyle='-.', label=label, zorder=2,
linewidth=path_width)
ax.quiver(xs[-1], ys[-1], xs[0]-xs[-1], ys[0]-ys[-1],
angles='xy', scale_units='xy', scale=1., color=path_color,
linestyle='--', label=None, zorder=2)
ax.scatter(xs, ys, s=pose_markersize, c=colors, zorder=3)
return fig
def plot_convergence_data(self, **kwargs):
"""Plots optimization convergence data.
Only available if loop was optimized.
All variables being optimized and constraints are plotted.
Constraints are violated when margins are negative."""
# TODO: Add additional plotting kwargs.
figsize = kwargs.get('figsize', (7,5))
out = {}
for ii, d in enumerate(self.convergence_data):
for k, v in d['params'].items():
if k not in out:
out[k] = []
out[k].append(v)
constraints = {}
for c in d['constraints']:
if c['name'] not in constraints:
constraints[c['name']] = []
constraints[c['name']].append(c['margin_norm'])
for k, v in constraints.items():
if k not in out:
out[k] = []
out[k].append(constraints[k])
# Plot crosswind path to show color scheme for constraints.
fig = self.plot_path_positions()
fig.suptitle('Color Key of Poses for Constraints')
for k, v in out.items():
plt.figure(figsize=figsize)
ps = plt.plot(v)
utils.set_rainbow_plot_cycler(n=len(v[0]), ax=plt.gca())
plt.title(k)
plt.xlabel('Iteration number')
plt.legend(ps, [str(ii) for ii in range(len(ps))],
bbox_to_anchor=(1.02, 1.0), loc=2)
plt.grid(linewidth=0.5, linestyle=':')
plt.tight_layout()
def gen_loop_vec_plot_file(
self, location='plots/plot_loop_forces.json', var='force_types',
no_show=[], nth_point=1, slicr=None, animate=False):
"""
Takes all selected vector data in the loop and writes them out to a
json that is formatted for use in Plotly javascript library.
Kwargs:
location: String of file location where data is stored.
var: Specifies what vectors to plot, either 'all', 'force_types',
'speeds', or 'path_def'.
no_show: A list of strings of things not to show. Only thing currently
active is the (...) scaling object.
nth_point: Only plots every nth point. Used to reduce large datasets to
be manageable.
slicr: Tuple of start and stop positions, normalized from 0:1 of data
to plot.
animate: Writes out each pose as a frame, which enables plotter to
animate poses.
Colors are done from the 5th to the 95th percentile to avoid outliers from
throwing it all off.
"""
# Makes a file of the poses data, to be used for a single plot.
plot_dict = {}
# Maps the selected variable to the KiteLoop method that returns that
# variable.
methods = {'all': 'gen_all_plot_data',
# TODO: Force components is currently broken. Fix.
#'force_components': 'gen_force_plot_data',
'force_types': 'gen_force_type_data',
'speeds': 'gen_speeds_plot_data',
'path_def': 'gen_path_def_plot_data'}
if slicr is not None:
start = int(slicr[0] * len(self.poses))
end = int(slicr[1] * len(self.poses))
poses = self.poses[start:end]
else:
if animate:
poses = self.poses + [self.poses[0]]
else:
poses = self.poses
if animate:
frames = []
times = []
x_range, y_range, z_range = self._get_3d_plot_scale(poses)
for ii, pose in enumerate(poses):
if ii % nth_point == 0:
if '(...)' not in no_show and '(...)' not in plot_dict:
plot_dict.update({'(...)': {'name':'(...)',
'type': 'scatter3d',
'mode': 'lines',
'x': x_range,
'y': y_range,
'z': z_range,
'text': ['bounding box for scaling'],
'line':{'width': 0.001}
}})
pose_plots = getattr(pose, methods.get(var))(no_show=(no_show + ['(...)']))
for plot in pose_plots:
plot['text'] = [(p+'\r\nPosition: %d \r\nV_a: %0.1f \r\nV_k: %0.1f \r\nThrust Power: %0.1f'
% (ii, pose.state['v_a'], pose.state['v_k'], pose.state['power_thrust'])) for p in plot['text']]
key = plot['name']
if key in plot_dict:
plot_dict[key]['x'].extend([None]+ plot['x'])
plot_dict[key]['y'].extend([None] + plot['y'])
plot_dict[key]['z'].extend([None] + plot['z'])
plot_dict[key]['text'].extend([None] + plot['text'])
else:
plot_dict.update({plot['name']: plot})
if animate:
if ii == 0:
frames.append({'data': list(plot_dict.values()),
'name': str(ii)})
else:
num_ease = int(pose.state['segment_time'] / (1./20.)) # 20 frames a second
prev_frame = frames[-1]
for num in range(num_ease):
data = []
for prev_val, cur_val in zip(prev_frame['data'], list(plot_dict.values())):
vec = {}
for p in ['x', 'y', 'z']:
start = interp1d([0, num_ease], [prev_val[p][0], cur_val[p][0]])
end = interp1d([0, num_ease], [prev_val[p][-1], cur_val[p][-1]])
vec[p] = [start(num).tolist(), end(num).tolist()]
data.append(vec)
times.append(int(pose.state['segment_time']*1000./num_ease))
frames.append({'data': data})
plot_dict = {}
if animate:
output = {'frames': frames,
'times': times}
else:
output = list(plot_dict.values())
if location is not None:
with open(location,'w') as outfile:
json.dump(output, outfile)
print('File saved to %s.' % location)
else:
return output
def gen_loop_positions_plot_file(
self, location='plots/plot_loop_positions.json', label=None, no_show=[],
var_to_color='power_shaft', path_options=None):
"""
Takes all positions in the loop and writes them out to a json that is
formatted for use in Plotly javascript library.
Args:
location: File location where data is stored.
no_show: A list of strings of things not to show. Only thing currently
active is the (...) scaling object.
var_to_color: Searches the pose.state dictionary for the variable given
and colors the marker and the point according to the value.
Colors are done from the 5th to the 95th percentile to avoid outliers from
throwing it all off.
"""
plot_list = []
var_list = []
text_list = []
xs = []
ys = []
zs = []
if label is None:
label = 'positions'
if var_to_color is not None:
for pose in self.poses:
var = pose.state[var_to_color]
var_list.append(var)
text_list.append(str(var))
prct95 = np.percentile(var_list, 95)
prct05 = np.percentile(var_list, 5)
color = var_list
mode = 'lines+markers'
marker_options = {'size':4.,
'color': color,
'cmin':prct05,
'cmax':prct95,
'colorscale':'Jet',
'showscale': True}
line_options_base = {'width':2.,
'color': color,
'cmin':prct05,
'cmax':prct95,
'colorscale':'Jet'}
else:
line_options_base = {'width':3.,
'color': 'red'}
marker_options = {}
mode = 'lines'
line_options = line_options_base
if path_options is not None:
line_options.update(path_options)
for pose in self.poses:
x, y, z = pose.position['xyz']
xs.append(x)
ys.append(y)
zs.append(z)
plot_list.append({ 'name': label,
'type': 'scatter3d',
'mode': mode,
'text': text_list,
'x': xs,
'y': ys,
'z': zs,
'marker': marker_options,
'line': line_options,
})
x_range, y_range, z_range = self._get_3d_plot_scale(self.poses)
if '(...)' not in no_show:
plot_list.append({'name':'(...)',
'type': 'scatter3d',
'mode': 'lines',
'x': x_range,
'y': y_range,
'z': z_range,
'line':{'width': 0.001}
})
with open(location,'w') as outfile:
json.dump(plot_list,outfile)
print('File saved to %s.' % location)
def plot_var_sweep(positions, resource, config,
var_to_sweep='v_a', var_range=(30.,90.,30.),
vars_to_plot=['power', 'tension'],
var_scales=None, var_units=None,
v_w_at_h_ref=7.0, alpha=5., beta=0., v_a=55.,
pqr=[0., 0., 0.], pqr_dot=[0., 0., 0.],
accel_along_path=None,
k_grav=0.5, grav_mult=1.0,
legend_key='loop_angle', every_nth=1,
ylims=None, plot_kwargs={},
**kwargs):
"""Takes a list of positions for a single kite loop, sweeps a variable for
each pose, and plots the results.
Each pose is evaluated completely separately. Accelerations (both rotational
and translational) are not consistent - ie: the accelerations applied to one
pose will not get you to the the next pose.
This is a tool to explore the effect of changing an input and best (but not
guaranteed) orient the kite to meet the force balance.
Args:
positions: A list of position dicts. See KitePose positions attribute for
example of format.
resource: A dict that specifies a resource. See KitePowerCurve object for
details.
config: A dict that specifies an energy kite. See KitePowerCurve object for
details.
Kwargs:
var_to_sweep: Name of the variable to sweep. Needs to be a valid input to
KitePose.
var_range: Sets up range for var_to_sweep.
Specified as (<start_value>, <end_value>, <num_steps>).
vars_to_plot: Variable to plot. Each gets its own figure. Must be available
in KitePose.state dict.
var_scales: Float that scales the y-axis for each vars_to_plot.
var_units: String that is appended to y-axis label.
v_w_at_h_ref: Float of wind speed at reference height.
alpha: Kite alpha, in deg. Can be a list of values specifying alpha for each
position.
beta: Kite beta, in deg. Can be a list of values specifying beta for each
position.
v_a: Kite airspeed. Can be a list of values specifying for each position.
pqr: Array-like vector of rotational body rates. Can be a list of vectors
specifying pqr for each position.
pqr_dot: Array-like vector of rotational body accelerations. Can be a list
of vectors specifying pqr_dot for each position.
k_grav: Scalar that specifies acceleration along path for each pose.
Represents how much of gravity acceleration along path is applied to each
pose. If not None, this overwrites the accel_along_path variable.
grav_mult: Multiplier applied to gravity.
legend_key: Variable in pose.state that is used to differentiate poses in
legend.
every_nth: Only plot every nth position in positions.
ylims: List-like of (<y_min>, <y_max>) for each variable in vars_to_plot.
plot_kwargs: Dict of kwargs to pass to matplotlib plot function.
"""
# TODO: Add more support for kwargs, such as adding to an existing
# figure.
figsize = kwargs.get('figsize', (10,6))
var_range = np.linspace(var_range[0], var_range[1], var_range[2])
if var_scales is None:
var_scales = [1.0] * len(vars_to_plot)
if var_units is None:
var_units = [('','')] * len(vars_to_plot)
if ylims == None:
ylims = [None] * len(vars_to_plot)
poses_data = []
for ii, position in enumerate(positions):
if ii % every_nth == 0:
pose_data = {'valid': {var_to_sweep: []},
'invalid': {var_to_sweep: []},
legend_key: None}
for key in vars_to_plot:
pose_data['valid'][key] = []
pose_data['invalid'][key] = []
# Setup inputs.
input_vars = {
'alpha': alpha,
'beta': beta,
'v_a': v_a,
'accel_along_path': accel_along_path,
'pqr': pqr,
'pqr_dot': pqr_dot,
'v_w_at_h_ref': v_w_at_h_ref}
if 'aero_device' in config:
input_vars.update(
{'aero_device_scale': kwargs.get('aero_device_scale', 0.)})
# Parse the inputs to determine things that are global or per pose.
input_dict = {}
uniques = {}
for k, v in input_vars.items():
# If k_grav is provided, it steps on all accel_along_path inputs.
if k == 'accel_along_path':
if k_grav is not None:
v = np.dot(
np.array([0., 0., -1.]) * utils.Const.G * k_grav,
position['e_path_tangent'])
if hasattr(v, '__iter__'):
if len(v) == len(positions):
input_dict[k] = v[ii]
uniques[k] = True
else:
uniques[k] = False
input_dict[k] = v
else:
uniques[k] = False
input_dict[k] = v
# Cannot specify both v_a and v_k, so if v_k is the variable we sweep,
# remove v_a from the inputs.
if var_to_sweep == 'v_k':
input_dict.pop('v_a')
# Step through the variable range.
for v in var_range:
# Define a function to return the net force residual.
# This will be used to optimize each pose to best meet a force balance.
def eval_net(lift_roll_angle):
input_dict[var_to_sweep] = v
input_dict['lift_roll_angle'] = lift_roll_angle
pose = kite_pose.KitePose(
position, resource, config,
grav_mult=grav_mult, **input_dict)
pose.solve()
for c in pose.state['constraints']:
if c['name'] == 'net_margin':
out = c['value']
return out
# Optimize to find the lift_roll_angle that best meets the force
# balance. The bracket specifies the initial steps.
result = minimize_scalar(
eval_net, bounds=(-math.pi, math.pi), tol=0.0005,
bracket=(-0.5, 0.), options={'maxiter': 12})
# Create the final pose to extract the data out of.
pose = kite_pose.KitePose(
position, resource, config,
grav_mult=grav_mult, **input_dict)
pose.solve()
# Save legend key value.
# Unnecessarily updated every iteration.
pose_data[legend_key] = pose.state[legend_key]
if pose.valid:
pose_data['valid'][var_to_sweep].append(v)
pose_data['invalid'][var_to_sweep].append(None)
else:
pose_data['valid'][var_to_sweep].append(None)
pose_data['invalid'][var_to_sweep].append(v)
for iii, key in enumerate(vars_to_plot):
pose_data['valid'][key].append(pose.state[key] / var_scales[iii])
pose_data['invalid'][key].append(pose.state[key] / var_scales[iii])
poses_data.append(pose_data)
# Plot everything.
for key, ylim, units in zip(vars_to_plot, ylims, var_units):
fig = plt.figure(figsize=figsize)
ax = fig.gca()
ax.set_title(
'%s vs %s for Various Positions @ %0.1f m/s'
% (key, var_to_sweep, v_w_at_h_ref), fontsize=15)
ax.set_xlabel(var_to_sweep + ' ' + units[0], fontsize=14)
ax.set_ylabel(key + ' ' + units[1], fontsize=14)
if ylim is not None:
ax.set_ylim(ylim)
max_legend_val = max([p[legend_key] for p in poses_data])
min_legend_val = min([p[legend_key] for p in poses_data])
for p in poses_data:
color = cm.rainbow(
(p[legend_key] - min_legend_val)/(max_legend_val - min_legend_val))
ax.plot(p['valid'][var_to_sweep],
p['valid'][key],
color=color, label=legend_key + ': %0.2f' % p[legend_key],
**plot_kwargs)
ax.plot(p['invalid'][var_to_sweep],
p['invalid'][key],
color=color,
linestyle='--', **plot_kwargs)
state_rnd = utils.deepcopy_lite(input_dict)
for k, v in state_rnd.items():
state_rnd[k] = np.round(v, 2)
if hasattr(state_rnd[k], 'tolist'):
state_rnd[k] = state_rnd[k].tolist()
state_rnd.pop(var_to_sweep)
state_rnd.pop('accel_along_path')
state_rnd.pop('lift_roll_angle')
state_str = ''
for ii, (k, v) in enumerate(state_rnd.items()):
if k_grav is not None and k == 'accel_along_path':
continue
if uniques[k]:
v = 'varies'
if ii % 2 == 0 and ii != 0:
state_str += '\n'
state_str += str(k).replace('\'', '') + ':' + str(v) + ' '
if k_grav is not None:
state_str += '\nk_grav: %0.2f' % k_grav
ax.text(1.02, 0.03, state_str
+ '\nInvalid = dashed line.\n'
+ 'Poses individually evaluated.',
fontsize=10, transform=plt.gca().transAxes, ha='left')
ax.legend(bbox_to_anchor=(1.01, 1.), loc=2, fontsize=14)
ax.grid(linestyle=':', linewidth=0.5)
fig.tight_layout()
return fig
| apache-2.0 |
AIML/scikit-learn | sklearn/datasets/svmlight_format.py | 114 | 15826 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
florian-f/sklearn | sklearn/cluster/tests/test_spectral.py | 2 | 7966 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances, adjusted_rand_score
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_lobpcg_mode():
# Test the lobpcg mode of SpectralClustering
# We need a fairly big data matrix, as lobpcg does not work with
# small data matrices
centers = np.array([
[0., 0.],
[10., 10.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=.1, random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="lobpcg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
# We need a large matrice, or the lobpcg solver will fallback to its
# non-sparse and buggy mode
S = np.array([[1, 5, 2, 2, 1, 0, 0, 0, 0, 0],
[5, 1, 3, 2, 1, 0, 0, 0, 0, 0],
[2, 3, 1, 1, 1, 0, 0, 0, 0, 0],
[2, 2, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 2, 1, 1, 1],
[0, 0, 0, 0, 1, 2, 2, 3, 3, 2],
[0, 0, 0, 0, 2, 2, 3, 3, 3, 4],
[0, 0, 0, 0, 1, 3, 3, 1, 2, 4],
[0, 0, 0, 0, 1, 3, 3, 2, 1, 4],
[0, 0, 0, 0, 1, 2, 4, 4, 4, 1],
])
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
if labels[0] == 0:
labels = 1 - labels
assert_greater(np.mean(labels == [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]), .89)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=40, random_state=2, centers=[[1, 1], [-1, -1]],
cluster_std=0.4)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.todense()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
c-benko/HHG_phasematching_fsEC | test/testrun.py | 1 | 1170 | # testrun.py
import sys, os
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.normpath(os.path.join(here, '../src')))
from phasematching import *
import matplotlib.pyplot as plt
from pylab import *
sim = phase_matching('Ar', 100, 88 , 45e-15, 40e-6, 1070e-9, 1, 20e-3, 300, 0, .015, 0, 'on')
# sim = phase_matching('Xe', 13, 35 , 300e-15, 22e-6, 1070e-9, .1, .3e-3, 200, 0, .015, 0, 'on')
close('all')
t, y1 , y2, y3, y4, y5, y6, y7,y8 = sim.harmonic_yield()
fig, ax1 = plt.subplots()
ax1.plot(t, y1/max(y1),'b-', label = 'Pulse: ' + str(max(y1)))
ax1.plot(t, y3,'b--', label = 'Ionization Fraction')
ax1.plot(t, y4/max(y4),'b.-', label = 'Harmonic Yield')
ax1.set_xlabel('time (s)')
ax1.set_ylabel('Ionization fraction/normalized intensity', color='b')
plt.legend( loc = 1)
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
plot(t, y2 * 10 ** 3,'r-', label = 'L coh [mm]' )
ax2.set_ylabel('Coherence Length [mm]', color='r')
ax2.grid(False)
for tl in ax2.get_yticklabels():
tl.set_color('r')
plt.legend(loc = 3)
fig, ax = subplots()
ax.plot(t, y6)
fig, ax = subplots()
ax.plot(t[:-1], y7)
plt.show()
| mit |
nrego/westpa | lib/examples/stringmethodexamples/examples/plotmueller.py | 1 | 1470 | import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
import h5py
from IPython import embed
from fasthist import normhistnd
def mueller(x, y):
aa = [-1, -1, -6.5, 0.7]
bb = [0, 0, 11, 0.6]
cc = [-10, -10, -6.5, 0.7]
AA = [-200, -100, -170, 15]
XX = [1, 0, -0.5, -1]
YY = [0, 0.5, 1.5, 1]
V1 = 0
for j in range(4):
V1 += AA[j] * np.exp(aa[j] * (x - XX[j])**2 + \
bb[j] * (x - XX[j]) * (y - YY[j]) + cc[j] * (y - YY[j])**2)
return V1
def plotmueller(beta=1, axis=None):
xx, yy = np.mgrid[-1.5:1.2:0.01, -0.2:2.0:0.01]
v = mueller(xx, yy)
v -= v.min()
#embed()
if axis==0:
probs = np.exp(-v)
probs = probs.sum(axis=1)
plt.plot(xx[:,0], -np.log(probs))
elif axis==1:
probs = np.exp(-v)
probs = probs.sum(axis=0)
plt.plot(yy[0,:], -np.log(probs))
else:
plt.contourf(xx, yy, v.clip(max=200), 40)
def calculate_length(x):
dd = x - np.roll(x, 1, axis=0)
dd[0,:] = 0.0
return np.cumsum(np.sqrt((dd*dd).sum(axis=1)))
f = h5py.File('strings.h5')
try:
all_strings = f['strings'][...]
for i in range(all_strings.shape[0]):
if (i) % (10) == 0:
string = all_strings[i]
plt.plot(string[:,0], string[:,1], '-o', label='{}'.format(i))
except:
pass
axis=None
plotmueller(axis=axis)
plt.legend()
if axis is None:
plt.colorbar()
plt.show()
| gpl-3.0 |
datachand/h2o-3 | h2o-py/tests/testdir_golden/pyunit_svd_1_golden.py | 5 | 2184 | import sys
sys.path.insert(1, "../../")
import h2o, tests
def svd_1_golden():
print "Importing USArrests.csv data..."
arrestsH2O = h2o.upload_file(h2o.locate("smalldata/pca_test/USArrests.csv"))
print "Compare with SVD"
fitH2O = h2o.svd(x=arrestsH2O[0:4], nv=4, transform="NONE", max_iterations=2000)
print "Compare singular values (D)"
h2o_d = fitH2O._model_json['output']['d']
r_d = [1419.06139509772, 194.825846110138, 45.6613376308754, 18.0695566224677]
print "R Singular Values: {0}".format(r_d)
print "H2O Singular Values: {0}".format(h2o_d)
for r, h in zip(r_d, h2o_d): assert abs(r - h) < 1e-6, "H2O got {0}, but R got {1}".format(h, r)
print "Compare right singular vectors (V)"
h2o_v = fitH2O._model_json['output']['v']
r_v = [[-0.04239181, 0.01616262, -0.06588426, 0.99679535],
[-0.94395706, 0.32068580, 0.06655170, -0.04094568],
[-0.30842767, -0.93845891, 0.15496743, 0.01234261],
[-0.10963744, -0.12725666, -0.98347101, -0.06760284]]
print "R Right Singular Vectors: {0}".format(r_v)
print "H2O Right Singular Vectors: {0}".format(h2o_v)
for rl, hl in zip(r_v, h2o_v):
for r, h in zip(rl, hl): assert abs(abs(r) - abs(h)) < 1e-5, "H2O got {0}, but R got {1}".format(h, r)
print "Compare left singular vectors (U)"
h2o_u = h2o.as_list(h2o.get_frame(fitH2O._model_json['output']['u_key']['name']), use_pandas=False)
h2o_u.pop(0)
r_u = [[-0.1716251, 0.096325710, 0.06515480, 0.15369551],
[-0.1891166, 0.173452566, -0.42665785, -0.17801438],
[-0.2155930, 0.078998111, 0.02063740, -0.28070784],
[-0.1390244, 0.059889811, 0.01392269, 0.01610418],
[-0.2067788, -0.009812026, -0.17633244, -0.21867425],
[-0.1558794, -0.064555293, -0.28288280, -0.11797419]]
print "R Left Singular Vectors: {0}".format(r_u)
print "H2O Left Singular Vectors: {0}".format(h2o_u)
for rl, hl in zip(r_u, h2o_u):
for r, h in zip(rl, hl): assert abs(abs(r) - abs(float(h))) < 1e-5, "H2O got {0}, but R got {1}".format(h, r)
if __name__ == "__main__":
tests.run_test(sys.argv, svd_1_golden)
| apache-2.0 |
yunfeilu/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
xiangdal/TrajectoryNet | load_data.py | 1 | 1386 | import cPickle, gzip, numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from logistic_sgd import LogisticRegression
# For ploting
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from itertools import compress
from numpy import genfromtxt
vesselnum = 5
print('load data...')
train_set = genfromtxt('/users/grad/xjiang/code/'+str(vesselnum)+'/trainoutput-99.csv', delimiter=',')
test_set = genfromtxt('/users/grad/xjiang/code/'+str(vesselnum)+'/testoutput-99.csv', delimiter=',')
valid_set = genfromtxt('/users/grad/xjiang/code/'+str(vesselnum)+'/valoutput-99.csv', delimiter=',')
def get_in_out(data):
x = data[:,0:(data.shape[1]-2)]
y = data[:, (data.shape[1]-2):(data.shape[1]-1)]
y = y.astype(int)
y = y.flatten()
return (x,y)
train_x, train_y = get_in_out(train_set)
test_x, test_y = get_in_out(test_set)
valid_x, valid_y = get_in_out(valid_set)
train_set_x = theano.shared(numpy.array(train_x, dtype='float32'))
test_set_x = theano.shared(numpy.array(test_x, dtype='float32'))
valid_set_x = theano.shared(numpy.array(valid_x, dtype='float32'))
train_set_y = theano.shared(numpy.array(train_y, dtype='int32'))
test_set_y = theano.shared(numpy.array(test_y, dtype='int32'))
valid_set_y = theano.shared(numpy.array(valid_y, dtype='int32'))
print('data loaded...')
| apache-2.0 |
surligas/gnuradio | gr-filter/examples/synth_filter.py | 58 | 2552 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
for fi in freqs:
s = analog.sig_source_c(fs, analog.GR_SIN_WAVE, fi, 1)
sigs.append(s)
taps = filter.firdes.low_pass_2(len(freqs), fs,
fs/float(nchans)/2, 100, 100)
print "Num. Taps = %d (taps per filter = %d)" % (len(taps),
len(taps)/nchans)
filtbank = filter.pfb_synthesizer_ccf(nchans, taps)
head = blocks.head(gr.sizeof_gr_complex, N)
snk = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(filtbank, head, snk)
for i,si in enumerate(sigs):
tb.connect(si, (filtbank, i))
tb.run()
if 1:
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(snk.data()[1000:])
fftlen = 2048
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
winfunc = scipy.blackman
s2.psd(snk.data()[10000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
nvoron23/statsmodels | statsmodels/tsa/vector_ar/tests/test_var.py | 23 | 18346 | """
Test VAR Model
"""
from __future__ import print_function
# pylint: disable=W0612,W0231
from statsmodels.compat.python import (iteritems, StringIO, lrange, BytesIO,
range)
from nose.tools import assert_raises
import nose
import os
import sys
import numpy as np
import statsmodels.api as sm
import statsmodels.tsa.vector_ar.util as util
import statsmodels.tools.data as data_util
from statsmodels.tsa.vector_ar.var_model import VAR
from numpy.testing import (assert_almost_equal, assert_equal, assert_,
assert_allclose)
DECIMAL_12 = 12
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
class CheckVAR(object):
# just so pylint won't complain
res1 = None
res2 = None
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_neqs(self):
assert_equal(self.res1.neqs, self.res2.neqs)
def test_nobs(self):
assert_equal(self.res1.avobs, self.res2.nobs)
def test_df_eq(self):
assert_equal(self.res1.df_eq, self.res2.df_eq)
def test_rmse(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(results[i].mse_resid**.5,
eval('self.res2.rmse_'+str(i+1)), DECIMAL_6)
def test_rsquared(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(results[i].rsquared,
eval('self.res2.rsquared_'+str(i+1)), DECIMAL_3)
def test_llf(self):
results = self.res1.results
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_2)
for i in range(len(results)):
assert_almost_equal(results[i].llf,
eval('self.res2.llf_'+str(i+1)), DECIMAL_2)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic)
def test_hqic(self):
assert_almost_equal(self.res1.hqic, self.res2.hqic)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe)
def test_detsig(self):
assert_almost_equal(self.res1.detomega, self.res2.detsig)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def get_macrodata():
data = sm.datasets.macrodata.load().data[['realgdp','realcons','realinv']]
names = data.dtype.names
nd = data.view((float,3))
nd = np.diff(np.log(nd), axis=0)
return nd.ravel().view(data.dtype)
def generate_var():
from rpy2.robjects import r
import pandas.rpy.common as prp
r.source('tests/var.R')
return prp.convert_robj(r['result'], use_pandas=False)
def write_generate_var():
result = generate_var()
np.savez('tests/results/vars_results.npz', **result)
class RResults(object):
"""
Simple interface with results generated by "vars" package in R.
"""
def __init__(self):
#data = np.load(resultspath + 'vars_results.npz')
from .results.results_var_data import var_results
data = var_results.__dict__
self.names = data['coefs'].dtype.names
self.params = data['coefs'].view((float, len(self.names)))
self.stderr = data['stderr'].view((float, len(self.names)))
self.irf = data['irf'].item()
self.orth_irf = data['orthirf'].item()
self.nirfs = int(data['nirfs'][0])
self.nobs = int(data['obs'][0])
self.totobs = int(data['totobs'][0])
crit = data['crit'].item()
self.aic = crit['aic'][0]
self.sic = self.bic = crit['sic'][0]
self.hqic = crit['hqic'][0]
self.fpe = crit['fpe'][0]
self.detomega = data['detomega'][0]
self.loglike = data['loglike'][0]
self.nahead = int(data['nahead'][0])
self.ma_rep = data['phis']
self.causality = data['causality']
def close_plots():
try:
import matplotlib.pyplot as plt
plt.close('all')
except ImportError:
pass
_orig_stdout = None
def setup_module():
global _orig_stdout
_orig_stdout = sys.stdout
sys.stdout = StringIO()
def teardown_module():
sys.stdout = _orig_stdout
close_plots()
def have_matplotlib():
try:
import matplotlib
return True
except ImportError:
return False
class CheckIRF(object):
ref = None; res = None; irf = None
k = None
#---------------------------------------------------------------------------
# IRF tests
def test_irf_coefs(self):
self._check_irfs(self.irf.irfs, self.ref.irf)
self._check_irfs(self.irf.orth_irfs, self.ref.orth_irf)
def _check_irfs(self, py_irfs, r_irfs):
for i, name in enumerate(self.res.names):
ref_irfs = r_irfs[name].view((float, self.k))
res_irfs = py_irfs[:, :, i]
assert_almost_equal(ref_irfs, res_irfs)
def test_plot_irf(self):
if not have_matplotlib():
raise nose.SkipTest
self.irf.plot()
self.irf.plot(plot_stderr=False)
self.irf.plot(impulse=0, response=1)
self.irf.plot(impulse=0)
self.irf.plot(response=0)
self.irf.plot(orth=True)
self.irf.plot(impulse=0, response=1, orth=True)
close_plots()
def test_plot_cum_effects(self):
if not have_matplotlib():
raise nose.SkipTest
self.irf.plot_cum_effects()
self.irf.plot_cum_effects(plot_stderr=False)
self.irf.plot_cum_effects(impulse=0, response=1)
self.irf.plot_cum_effects(orth=True)
self.irf.plot_cum_effects(impulse=0, response=1, orth=True)
close_plots()
class CheckFEVD(object):
fevd = None
#---------------------------------------------------------------------------
# FEVD tests
def test_fevd_plot(self):
if not have_matplotlib():
raise nose.SkipTest
self.fevd.plot()
close_plots()
def test_fevd_repr(self):
self.fevd
def test_fevd_summary(self):
self.fevd.summary()
def test_fevd_cov(self):
# test does not crash
# not implemented
# covs = self.fevd.cov()
pass
class TestVARResults(CheckIRF, CheckFEVD):
@classmethod
def setupClass(cls):
cls.p = 2
cls.data = get_macrodata()
cls.model = VAR(cls.data)
cls.names = cls.model.endog_names
cls.ref = RResults()
cls.k = len(cls.ref.names)
cls.res = cls.model.fit(maxlags=cls.p)
cls.irf = cls.res.irf(cls.ref.nirfs)
cls.nahead = cls.ref.nahead
cls.fevd = cls.res.fevd()
def test_constructor(self):
# make sure this works with no names
ndarr = self.data.view((float, 3))
model = VAR(ndarr)
res = model.fit(self.p)
def test_names(self):
assert_equal(self.model.endog_names, self.ref.names)
model2 = VAR(self.data)
assert_equal(model2.endog_names, self.ref.names)
def test_get_eq_index(self):
assert(type(self.res.names) is list)
for i, name in enumerate(self.names):
idx = self.res.get_eq_index(i)
idx2 = self.res.get_eq_index(name)
assert_equal(idx, i)
assert_equal(idx, idx2)
assert_raises(Exception, self.res.get_eq_index, 'foo')
def test_repr(self):
# just want this to work
foo = str(self.res)
bar = repr(self.res)
def test_params(self):
assert_almost_equal(self.res.params, self.ref.params, DECIMAL_3)
def test_cov_params(self):
# do nothing for now
self.res.cov_params
def test_cov_ybar(self):
self.res.cov_ybar()
def test_tstat(self):
self.res.tvalues
def test_pvalues(self):
self.res.pvalues
def test_summary(self):
summ = self.res.summary()
def test_detsig(self):
assert_almost_equal(self.res.detomega, self.ref.detomega)
def test_aic(self):
assert_almost_equal(self.res.aic, self.ref.aic)
def test_bic(self):
assert_almost_equal(self.res.bic, self.ref.bic)
def test_hqic(self):
assert_almost_equal(self.res.hqic, self.ref.hqic)
def test_fpe(self):
assert_almost_equal(self.res.fpe, self.ref.fpe)
def test_lagorder_select(self):
ics = ['aic', 'fpe', 'hqic', 'bic']
for ic in ics:
res = self.model.fit(maxlags=10, ic=ic, verbose=True)
assert_raises(Exception, self.model.fit, ic='foo')
def test_nobs(self):
assert_equal(self.res.nobs, self.ref.nobs)
def test_stderr(self):
assert_almost_equal(self.res.stderr, self.ref.stderr, DECIMAL_4)
def test_loglike(self):
assert_almost_equal(self.res.llf, self.ref.loglike)
def test_ma_rep(self):
ma_rep = self.res.ma_rep(self.nahead)
assert_almost_equal(ma_rep, self.ref.ma_rep)
#--------------------------------------------------
# Lots of tests to make sure stuff works...need to check correctness
def test_causality(self):
causedby = self.ref.causality['causedby']
for i, name in enumerate(self.names):
variables = self.names[:i] + self.names[i + 1:]
result = self.res.test_causality(name, variables, kind='f')
assert_almost_equal(result['pvalue'], causedby[i], DECIMAL_4)
rng = lrange(self.k)
rng.remove(i)
result2 = self.res.test_causality(i, rng, kind='f')
assert_almost_equal(result['pvalue'], result2['pvalue'], DECIMAL_12)
# make sure works
result = self.res.test_causality(name, variables, kind='wald')
# corner cases
_ = self.res.test_causality(self.names[0], self.names[1])
_ = self.res.test_causality(0, 1)
assert_raises(Exception,self.res.test_causality, 0, 1, kind='foo')
def test_select_order(self):
result = self.model.fit(10, ic='aic', verbose=True)
result = self.model.fit(10, ic='fpe', verbose=True)
# bug
model = VAR(self.model.endog)
model.select_order()
def test_is_stable(self):
# may not necessarily be true for other datasets
assert(self.res.is_stable(verbose=True))
def test_acf(self):
# test that it works...for now
acfs = self.res.acf(10)
# defaults to nlags=lag_order
acfs = self.res.acf()
assert(len(acfs) == self.p + 1)
def test_acorr(self):
acorrs = self.res.acorr(10)
def test_forecast(self):
point = self.res.forecast(self.res.y[-5:], 5)
def test_forecast_interval(self):
y = self.res.y[:-self.p:]
point, lower, upper = self.res.forecast_interval(y, 5)
def test_plot_sim(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plotsim(steps=100)
close_plots()
def test_plot(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot()
close_plots()
def test_plot_acorr(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot_acorr()
close_plots()
def test_plot_forecast(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot_forecast(5)
close_plots()
def test_reorder(self):
#manually reorder
data = self.data.view((float,3))
names = self.names
data2 = np.append(np.append(data[:,2,None], data[:,0,None], axis=1), data[:,1,None], axis=1)
names2 = []
names2.append(names[2])
names2.append(names[0])
names2.append(names[1])
res2 = VAR(data2).fit(maxlags=self.p)
#use reorder function
res3 = self.res.reorder(['realinv','realgdp', 'realcons'])
#check if the main results match
assert_almost_equal(res2.params, res3.params)
assert_almost_equal(res2.sigma_u, res3.sigma_u)
assert_almost_equal(res2.bic, res3.bic)
assert_almost_equal(res2.stderr, res3.stderr)
def test_pickle(self):
fh = BytesIO()
#test wrapped results load save pickle
self.res.save(fh)
fh.seek(0,0)
res_unpickled = self.res.__class__.load(fh)
assert_(type(res_unpickled) is type(self.res))
class E1_Results(object):
"""
Results from Lutkepohl (2005) using E2 dataset
"""
def __init__(self):
# Lutkepohl p. 120 results
# I asked the author about these results and there is probably rounding
# error in the book, so I adjusted these test results to match what is
# coming out of the Python (double-checked) calculations
self.irf_stderr = np.array([[[.125, 0.546, 0.664 ],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136]],
[[0.129, 0.547, 0.663],
[0.032, 0.134, 0.163],
[0.026, 0.108, 0.131]],
[[0.084, .385, .479],
[.016, .079, .095],
[.016, .078, .103]]])
self.cum_irf_stderr = np.array([[[.125, 0.546, 0.664 ],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136]],
[[0.149, 0.631, 0.764],
[0.044, 0.185, 0.224],
[0.033, 0.140, 0.169]],
[[0.099, .468, .555],
[.038, .170, .205],
[.033, .150, .185]]])
self.lr_stderr = np.array([[.134, .645, .808],
[.048, .230, .288],
[.043, .208, .260]])
basepath = os.path.split(sm.__file__)[0]
resultspath = basepath + '/tsa/vector_ar/tests/results/'
def get_lutkepohl_data(name='e2'):
lut_data = basepath + '/tsa/vector_ar/data/'
path = lut_data + '%s.dat' % name
return util.parse_lutkepohl_data(path)
def test_lutkepohl_parse():
files = ['e%d' % i for i in range(1, 7)]
for f in files:
get_lutkepohl_data(f)
class TestVARResultsLutkepohl(object):
"""
Verify calculations using results from Lutkepohl's book
"""
def __init__(self):
self.p = 2
sdata, dates = get_lutkepohl_data('e1')
data = data_util.struct_to_ndarray(sdata)
adj_data = np.diff(np.log(data), axis=0)
# est = VAR(adj_data, p=2, dates=dates[1:], names=names)
self.model = VAR(adj_data[:-16], dates=dates[1:-16], freq='Q')
self.res = self.model.fit(maxlags=self.p)
self.irf = self.res.irf(10)
self.lut = E1_Results()
def test_approx_mse(self):
# 3.5.18, p. 99
mse2 = np.array([[25.12, .580, 1.300],
[.580, 1.581, .586],
[1.300, .586, 1.009]]) * 1e-4
assert_almost_equal(mse2, self.res.forecast_cov(3)[1],
DECIMAL_3)
def test_irf_stderr(self):
irf_stderr = self.irf.stderr(orth=False)
for i in range(1, 1 + len(self.lut.irf_stderr)):
assert_almost_equal(np.round(irf_stderr[i], 3),
self.lut.irf_stderr[i-1])
def test_cum_irf_stderr(self):
stderr = self.irf.cum_effect_stderr(orth=False)
for i in range(1, 1 + len(self.lut.cum_irf_stderr)):
assert_almost_equal(np.round(stderr[i], 3),
self.lut.cum_irf_stderr[i-1])
def test_lr_effect_stderr(self):
stderr = self.irf.lr_effect_stderr(orth=False)
orth_stderr = self.irf.lr_effect_stderr(orth=True)
assert_almost_equal(np.round(stderr, 3), self.lut.lr_stderr)
def test_get_trendorder():
results = {
'c' : 1,
'nc' : 0,
'ct' : 2,
'ctt' : 3
}
for t, trendorder in iteritems(results):
assert(util.get_trendorder(t) == trendorder)
def test_var_constant():
# see 2043
import datetime
from pandas import DataFrame, DatetimeIndex
series = np.array([[2., 2.], [1, 2.], [1, 2.], [1, 2.], [1., 2.]])
data = DataFrame(series)
d = datetime.datetime.now()
delta = datetime.timedelta(days=1)
index = []
for i in range(data.shape[0]):
index.append(d)
d += delta
data.index = DatetimeIndex(index)
model = VAR(data)
assert_raises(ValueError, model.fit, 1)
def test_var_trend():
# see 2271
data = get_macrodata().view((float,3))
model = sm.tsa.VAR(data)
results = model.fit(4) #, trend = 'c')
irf = results.irf(10)
data_nc = data - data.mean(0)
model_nc = sm.tsa.VAR(data_nc)
results_nc = model_nc.fit(4, trend = 'nc')
assert_raises(ValueError, model.fit, 4, trend='t')
def test_irf_trend():
# test for irf with different trend see #1636
# this is a rough comparison by adding trend or subtracting mean to data
# to get similar AR coefficients and IRF
data = get_macrodata().view((float,3))
model = sm.tsa.VAR(data)
results = model.fit(4) #, trend = 'c')
irf = results.irf(10)
data_nc = data - data.mean(0)
model_nc = sm.tsa.VAR(data_nc)
results_nc = model_nc.fit(4, trend = 'nc')
irf_nc = results_nc.irf(10)
assert_allclose(irf_nc.stderr()[1:4], irf.stderr()[1:4], rtol=0.01)
trend = 1e-3 * np.arange(len(data)) / (len(data) - 1)
# for pandas version, currently not used, if data is a pd.DataFrame
#data_t = pd.DataFrame(data.values + trend[:,None], index=data.index, columns=data.columns)
data_t = data + trend[:,None]
model_t = sm.tsa.VAR(data_t)
results_t = model_t.fit(4, trend = 'ct')
irf_t = results_t.irf(10)
assert_allclose(irf_t.stderr()[1:4], irf.stderr()[1:4], rtol=0.03)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| bsd-3-clause |
rosenbrockc/acorn | tests/test_0sklearn.py | 1 | 5013 | """Tests the decoration and logging of the sklearn package by running some
common code lines and checking that the logged entries make sense. Also, checks
the analysis functions for the fit and predict methods to make sure they are
working.
"""
import pytest
import six
@pytest.fixture(scope="module", autouse=True)
def acorndb(request, dbdir):
"""Creates a sub-directory in the temporary folder for the `numpy` package's
database logging. Also sets the package and task to `acorn` and `numpy`
respectively.
Returns:
(py.path.local): representing the sub-directory for the packages JSON
files.
"""
from db import db_init
return db_init("sklearn", dbdir)
def test_decorate():
"""Tests the decoration of the full numpy module. Since the module can
change, the exact number of methods and objects decorated will be constantly
changing. Instead, we just make sure that some were decorated, skipped and
N/A in the module statistics.
"""
import acorn.sklearn as skl
from db import decorate_check
decorate_check("sklearn")
from acorn.analyze.sklearn import set_auto_print, set_auto_predict
set_auto_predict(True)
set_auto_print(True)
def test_classify():
"""Tests classification using several common classifiers; also tests
:func:`sklearn.datasets.make_classification`.
"""
from sklearn.datasets import make_classification
kwds = {'n_clusters_per_class': 1, 'n_informative': 2,
'random_state': 1, 'n_features': 2, 'n_redundant': 0}
X, y = make_classification(**kwds)
import acorn.numpy as np
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
from sklearn.svm import SVC
svc = SVC()
svc.fit(X_train, y_train)
yL=svc.predict(X_test)
from db import db_entries
sentries, uuids = db_entries("sklearn")
#Unfortunately, py2 and py3 are getting different stack lengths on the
#calls, so we have to handle them (almost) separately. Since this is the
#first test in the module, we don't have to look backwards. Just take the
#entries as they are
ue = sentries
u0, e0 = sentries[0] #make classification
u1, e1 = sentries[1] #RandomState constructor
#There are two calls to random seed; skip one of them.
us, es = sentries[3] #Random seed (sub-call of u1)
usu, esu = sentries[4] #Random.uniform instance method call.
u2, e2 = sentries[5] #multiply by 2
u3, e3 = sentries[6] #iadd on X
u4, e4 = sentries[7] #Standard scaler constructor.
#There are also two calls to standard scaler constructor. Ignore the second.
u5, e5 = sentries[9] #Standard scaler fit_transform.
u6, e6 = sentries[10] #train_test_split
if len(sentries) == 14:
ui = 11
else:
print(len(sentries))
ui = 12
u7, e7 = sentries[ui] #SVC constructor
u8, e8 = sentries[ui+1] #SVC fit
u9, e9 = sentries[ui+2] #SVC predict
assert e0["m"] == "sklearn.datasets.samples_generator.make_classification"
assert e0["a"]["_"] == []
for kw, val in kwds.items():
assert kw in e0["a"]
assert e0["a"][kw] == val
assert len(u0) == 2
if six.PY3:
randclass = "numpy.random.mtrand"
else:
randclass = "numpy.random"
assert e1["m"] == "{}.RandomState.__new__".format(randclass)
assert e1["a"]["_"] == [2]
assert e1["r"] == u1
assert es["m"] == "numpy.random.mtrand.RandomState.seed"
assert es["a"]["_"] == [u1, 2]
assert esu["m"] == "numpy.random.mtrand.RandomState.uniform"
assert esu["a"]["_"] == [u1]
assert "'tuple'> len=2 min=2 max=100" in esu["a"]["size"]
usr = esu["r"]
assert e2["m"] == "numpy.ufunc.multiply"
assert e2["a"]["_"] == [2, usr]
assert e3["m"] == "numpy.ufunc.add"
assert e3["a"]["_"] == [u0[0], u2, u0[0]]
assert e4["m"] == "sklearn.preprocessing.data.StandardScaler.__new__"
assert e4["r"] == u4
assert e5["m"] == "sklearn.base.fit_transform"
assert e5["a"]["_"] == [u4, u3]
#fit_transform was an instance method on the standard scaler, so the actual
#transformed matrix will show up in the return value.
uft = e5["r"]
assert e6["m"] == "sklearn.cross_validation.train_test_split"
assert e6["a"]["_"] == [uft, u0[1]]
assert e7["m"] == "sklearn.svm.classes.SVC.__new__"
assert e7["r"] == u7
assert e8["m"] == "sklearn.svm.base.fit"
assert e8["a"]["_"] == [u7, u6[0], u6[2]]
assert '%' in e8["z"]
assert e8["z"]['%'] > 0
assert "e" in e8
assert isinstance(e8["e"], float)
assert e9["m"] == "sklearn.svm.base.predict"
assert e9["a"]["_"] == [u7, u6[1]]
assert '%' in e9["z"]
assert e9["z"]['%'] > 0
| mit |
neuropoly/spinalcordtoolbox | testing/test_sct_dmri_moco.py | 1 | 3176 | #!/usr/bin/env python
#########################################################################################
#
# Test function for sct_dmri_moco
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2017 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Julien Cohen-Adad
#
# About the license: see the file LICENSE.TXT
#########################################################################################
from pandas import read_csv
from numpy import allclose
from spinalcordtoolbox.utils import run_proc
def init(param_test):
"""
Initialize class: param_test
"""
# Reorient image to sagittal for testing another orientation (and crop to save time)
run_proc('sct_image -i dmri/dmri.nii.gz -setorient AIL -o dmri/dmri_AIL.nii', verbose=0)
run_proc('sct_crop_image -i dmri/dmri_AIL.nii -zmin 19 -zmax 21 -o dmri/dmri_AIL_crop.nii', verbose=0)
# Create Gaussian mask for testing
run_proc('sct_create_mask -i dmri/dmri_T0000.nii.gz -p center -size 5mm -f gaussian -o dmri/mask.nii', verbose=0)
# initialization
default_args = [
'-i dmri/dmri.nii.gz -bvec dmri/bvecs.txt -g 3 -x nn -ofolder dmri_test1 -r 0',
'-i dmri/dmri.nii.gz -bvec dmri/bvecs.txt -g 3 -m dmri/mask.nii -ofolder dmri_test2 -r 0',
'-i dmri/dmri_AIL_crop.nii -bvec dmri/bvecs.txt -x nn -ofolder dmri_test3 -r 0',
]
# Output moco param files
param_test.file_mocoparam = [
'dmri_test1/moco_params.tsv',
'dmri_test2/moco_params.tsv',
None,
]
# Ground truth value for integrity testing (corresponds to X motion parameters column)
param_test.groundtruth = [
[0.00047529041677414337, -1.1970542445283172e-05, -1.1970542445283172e-05, -1.1970542445283172e-05, -0.1296642741802682,
-0.1296642741802682, -0.1296642741802682],
[0.008032332623754357, 0.0037734940916436697, 0.0037734940916436697, 0.0037734940916436697,
-0.01502861167728611, -0.01502861167728611, -0.01502861167728611],
None,
]
# assign default params
if not param_test.args:
param_test.args = default_args
return param_test
def test_integrity(param_test):
"""
Test integrity of function
"""
# find which test is performed
index_args = param_test.default_args.index(param_test.args)
# Open motion parameters and compare with ground truth
# NB: We skip the test for sagittal images (*_AIL) because there is no output moco params
if param_test.file_mocoparam[index_args] is not None:
df = read_csv(param_test.file_mocoparam[index_args], sep="\t")
lresults = list(df['X'][:])
lgroundtruth = param_test.groundtruth[index_args]
if allclose(lresults, lgroundtruth):
param_test.output += "\n--> PASSED"
else:
param_test.output += "\nMotion parameters do not match: " \
" results: {}" \
" ground truth: {}".format(lresults, lgroundtruth)
param_test.status = 99
param_test.output += "\n--> FAILED"
return param_test
| mit |
MathieuLeocmach/colloids | python/colloids/ddm.py | 1 | 3933 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 Mathieu Leocmach
#
# This file is part of Colloids.
#
# Colloids is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Colloids is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Colloids. If not, see <http://www.gnu.org/licenses/>.
#
# Implementation of the basics of Differential Dynamics Microscopy
# Cerbino, R. & Trappe, V. Differential dynamic microscopy: Probing wave vector dependent dynamics with a microscope. Phys. Rev. Lett. 100, 1–4 (2008).
import numpy as np
from matplotlib.image import imread
import numexpr
def readImages(path, N, t0=1):
"""Read N successive images using the given and stock them in a numpy array
NxHxW (image shape is HxW) of type np.uint8.
The path is a string to be formatted like 'directory/images_{:06d}.tif'
so that path.format(124) -> 'directory/images_000124.tif'.
"""
#get the images shape while checking that the last image do exist
H, W = imread(path.format(N-1+t0)).shape[:-1] #imread makes 4 channels out of one
images = np.zeros([N, H, W], np.uint8)
for t in range(N):
images[t] = imread(path.format(t+t0))[:,:,0]
return images
def spectreDiff(im0, im1):
"""Compute the squared modulus of the 2D Fourier Transform of the difference between im0 and im1"""
return numexpr.evaluate(
'real(abs(f))**2',
{'f': np.fft.fft2(im1-im0.astype(float))}
)
class RadialAverager(object):
"""Radial average of a 2D array centred on (0,0), like the result of fft2d."""
def __init__(self, shape):
assert len(shape)==2
self.dists = np.sqrt(np.fft.fftfreq(shape[0])[:,None]**2 + np.fft.fftfreq(shape[1])[None,:]**2)
self.bins = np.fft.fftfreq(max(shape))[:max(shape)/2]
self.hd = np.histogram(self.dists, self.bins)[0]
def __call__(self, im):
assert im.shape == self.dists.shape
hw = np.histogram(self.dists, self.bins, weights=im)[0]
return hw/self.hd
def radialAverage(im):
"""Radial average of a 2D array centred on (0,0), like the result of fft2d."""
dists = np.sqrt(np.fft.fftfreq(im.shape[0])[:,None]**2 + np.fft.fftfreq(im.shape[1])[None,:]**2)
bins = np.fft.fftfreq(max(im.shape))[:max(im.shape)/2]
hd = np.histogram(dists, bins)[0]
hw = np.histogram(dists, bins, weights=im)[0]
return hw/hd
def timeAveraged(images, dt, navmax=100):
"""Does at most navmax spectreDiff on regularly spaced couples of images.
Separation within couple is dt."""
result = np.zeros(images.shape[1:])
step = max([(len(images)-dt)/navmax, 1])
couples = np.arange(0, len(images)-dt, step)
#print step, len(couples)
for t in couples:
result += spectreDiff(images[t], images[t+dt])
return result / len(couples)
def logSpaced(L, num=50):
"""Generate an array of log spaced integers smaller than L"""
return np.unique(np.logspace(
start=0, stop=np.log(L)/np.log(2),
num=num, base=2, endpoint=False
).astype(int))
def ddm(images, navmax=100, num=50):
"""Does timeAveraged and radialAverage for log-spaced time intervals.
Returns (intervals, ddm)"""
dts = logSpaced(len(images), num)
ra = RadialAverager(images.shape[1:])
D = np.zeros((len(dts), len(ra.hd)))
for i, dt in enumerate(dts):
D[i] = ra(timeAveraged(images, dt, navmax))
return dts, D
| gpl-3.0 |
wangkua1/sportvu | sportvu/train-seq2seq.py | 1 | 10913 | """train-seq2seq.py
Usage:
train-seq2seq.py <fold_index> <f_data_config> <f_model_config>
train-seq2seq.py --test <fold_index> <f_data_config> <f_model_config>
Arguments:
<f_data_config> example ''data/config/train_rev0.yaml''
<f_model_config> example 'model/config/conv2d-3layers.yaml'
Example:
python train.py 0 data/config/train_rev0.yaml model/config/conv2d-3layers.yaml
python train.py 0 data/config/train_rev0_vid.yaml model/config/conv3d-1.yaml
Options:
--negative_fraction_hard=<percent> [default: 0]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# model
import tensorflow as tf
optimize_loss = tf.contrib.layers.optimize_loss
import sys
import os
if os.environ['HOME'] == '/u/wangkua1': # jackson guppy
sys.path.append('/u/wangkua1/toolboxes/resnet')
else:
sys.path.append('/ais/gobi4/slwang/sports/sportvu/resnet')
sys.path.append('/ais/gobi4/slwang/sports/sportvu')
from sportvu.model.seq2seq import Seq2Seq
from sportvu.model.encdec import EncDec
# data
from sportvu.data.dataset import BaseDataset
from sportvu.data.extractor import Seq2SeqExtractor, EncDecExtractor
from sportvu.data.loader import Seq2SeqLoader
# concurrent
from resnet.utils.concurrent_batch_iter import ConcurrentBatchIterator
from tqdm import tqdm
from docopt import docopt
import yaml
import gc
from utils import truncated_mean, experpolate_position
from vis_utils import make_sequence_prediction_image
import cPickle as pkl
# import matplotlib.pylab as plt
# plt.ioff()
# fig = plt.figure()
def train(data_config, model_config, exp_name, fold_index, init_lr, max_iter, best_acc_delay, testing=False):
# Initialize dataset/loader
dataset = BaseDataset(data_config, fold_index, load_raw=False)
extractor = eval(data_config['extractor_class'])(data_config)
if 'negative_fraction_hard' in data_config:
nfh = data_config['negative_fraction_hard']
else:
nfh = 0
loader = Seq2SeqLoader(dataset, extractor, data_config[
'batch_size'], fraction_positive=0.5,
negative_fraction_hard=nfh, move_N_neg_to_val=1000)
Q_size = 100
N_thread = 4
# cloader = ConcurrentBatchIterator(
# loader, max_queue_size=Q_size, num_threads=N_thread)
cloader = loader
net = eval(model_config['class_name'])(model_config['model_config'])
net.build()
# build loss
y_ = tf.placeholder(tf.float32,
[model_config['model_config']['batch_size'],
model_config['model_config']['decoder_time_size'],
2])
learning_rate = tf.placeholder(tf.float32, [])
# euclid_loss = tf.reduce_mean(tf.pow(net.output() - y_, 2))
euclid_loss = tf.reduce_mean(tf.pow(tf.reduce_sum(tf.pow(net.output() - y_, 2), axis=-1),.5))
global_step = tf.Variable(0)
# train_step = optimize_loss(euclid_loss, global_step, learning_rate,
# optimizer=lambda lr: tf.train.AdamOptimizer(lr),
# clip_gradients=0.01)
train_step = optimize_loss(euclid_loss, global_step, learning_rate,
optimizer=lambda lr: tf.train.RMSPropOptimizer(lr),
clip_gradients=0.01)
# train_step = optimize_loss(cross_entropy, global_step, learning_rate,
# optimizer=lambda lr: tf.train.MomentumOptimizer(lr, .9))
# # testing
# if testing:
# saver = tf.train.Saver()
# sess = tf.InteractiveSession()
# ckpt_path = os.path.join("./saves/", exp_name + '.ckpt.best')
# saver.restore(sess, ckpt_path)
# feed_dict = net.input(val_x, 1, False)
# feed_dict[y_] = val_t
# ce, val_accuracy = sess.run([cross_entropy, accuracy], feed_dict=feed_dict)
# print ('Best Validation CE: %f, Acc: %f' % (ce, val_accuracy))
# sys.exit(0)
# checkpoints
if not os.path.exists('./saves'):
os.mkdir('./saves')
# tensorboard
if not os.path.exists('./logs'):
os.mkdir('./logs')
v_loss = tf.Variable(tf.constant(0.0), trainable=False)
v_loss_pl = tf.placeholder(tf.float32, shape=[], name='v_loss_pl')
update_v_loss = tf.assign(v_loss, v_loss_pl, name='update_v_loss')
v_rloss = tf.Variable(tf.constant(0.0), trainable=False)
v_rloss_pl = tf.placeholder(tf.float32, shape=[])
update_v_rloss = tf.assign(v_rloss, v_rloss_pl)
tf.summary.scalar('euclid_loss', euclid_loss)
tf.summary.scalar('valid_loss', v_loss)
tf.summary.scalar('real_valid_loss', v_rloss)
# tf.summary.image('encoder_input', tf.transpose(
# tf.reduce_sum(net.tf_enc_input, 2), (0, 2, 3, 1))[:,:,:,:-1], max_outputs=4)
# tf.summary.image('decoder_input', tf.transpose(
# tf.reduce_sum(net.tf_dec_input, 2), (0, 2, 3, 1))[:,:,:,:-1], max_outputs=4)
# TODO
# tf.summary.image('prediction', tf.reduce_sum(net.x, 1), max_outputs=4)
# tf.summary.image('groundtruth', tf.reduce_sum(net.x, 1), max_outputs=4)
merged = tf.summary.merge_all()
log_folder = os.path.join('./logs', exp_name)
saver = tf.train.Saver()
best_saver = tf.train.Saver()
sess = tf.InteractiveSession()
# remove existing log folder for the same model.
if os.path.exists(log_folder):
import shutil
shutil.rmtree(log_folder)
train_writer = tf.summary.FileWriter(
os.path.join(log_folder, 'train'), sess.graph)
val_writer = tf.summary.FileWriter(
os.path.join(log_folder, 'val'), sess.graph)
tf.global_variables_initializer().run()
# Train
best_val_teacher_forced_loss = np.inf
best_val_real_loss = np.inf
best_not_updated = 0
lrv = init_lr
tfs = model_config['model_config']['decoder_time_size']
train_loss = []
for iter_ind in tqdm(range(max_iter)):
best_not_updated += 1
loaded = cloader.next()
if loaded is not None:
dec_input, dec_output, enc_input,_= loaded
else:
cloader.reset()
continue
if iter_ind>0 and iter_ind % 5000 == 0:
tfs -= 5
feed_dict = net.input(dec_input,
teacher_forcing_stop=np.max([1, tfs]),
enc_input=enc_input
)
feed_dict[y_] = dec_output
if iter_ind ==2000:
lrv *= .1
feed_dict[learning_rate] = lrv
summary, tl = sess.run([merged, train_step], feed_dict=feed_dict)
# print (tl)
train_loss.append(tl)
train_writer.add_summary(summary, iter_ind)
# Validate
if iter_ind % 1000 == 0:
val_tf_loss = []
val_real_loss = []
## loop throught valid examples
while True:
loaded = cloader.load_valid()
if loaded is not None:
dec_input, dec_output, enc_input, (meta) = loaded
history, pid = meta
else: ## done
# print ('...')
break
## teacher-forced loss
feed_dict = net.input(dec_input,
teacher_forcing_stop=None,
enc_input=enc_input,
enc_keep_prob = 1.,
decoder_noise_level = 0.,
decoder_input_keep_prob = 1.
)
feed_dict[y_] = dec_output
val_loss = sess.run(euclid_loss, feed_dict = feed_dict)
val_tf_loss.append(val_loss)
## real-loss
feed_dict = net.input(dec_input,
teacher_forcing_stop=1,
enc_input=enc_input,
enc_keep_prob = 1.,
decoder_noise_level = 0.,
decoder_input_keep_prob = 1.
)
feed_dict[y_] = dec_output
val_loss = sess.run(euclid_loss, feed_dict = feed_dict)
val_real_loss.append(val_loss)
### plot
pred = sess.run(net.output(), feed_dict = feed_dict)
gt_future = experpolate_position(history[:,pid,-1], dec_output)
pred_future = experpolate_position(history[:,pid,-1], pred)
# imgs = make_sequence_prediction_image(history, gt_future, pred_future, pid)
pkl.dump((history, gt_future, pred_future, pid),
open(os.path.join("./logs/"+exp_name, 'iter-%g.pkl'%(iter_ind)),'wb'))
# for i in xrange(5):
# plt.imshow(imgs[i])
# plt.savefig(os.path.join("./saves/", exp_name +'iter-%g-%g.png'%(iter_ind,i)))
## TODO: evaluate real-loss on training set
val_tf_loss = np.mean(val_tf_loss)
val_real_loss = np.mean(val_real_loss)
print ('[Iter: %g] Train Loss: %g, Validation TF Loss: %g | Real Loss: %g ' %(iter_ind,np.mean(train_loss),val_tf_loss, val_real_loss))
train_loss = []
feed_dict[v_loss_pl] = val_tf_loss
feed_dict[v_rloss_pl] = val_real_loss
_,_, summary = sess.run([update_v_loss,update_v_rloss, merged], feed_dict=feed_dict)
val_writer.add_summary(summary, iter_ind)
if val_real_loss < best_val_real_loss:
best_not_updated = 0
p = os.path.join("./saves/", exp_name + '.ckpt.best')
print ('Saving Best Model to: %s' % p)
save_path = best_saver.save(sess, p)
best_val_real_loss = val_real_loss
if iter_ind % 2000 == 0:
save_path = saver.save(sess, os.path.join(
"./saves/", exp_name + '%d.ckpt' % iter_ind))
# if best_not_updated == best_acc_delay:
# break
return _
if __name__ == '__main__':
arguments = docopt(__doc__)
print ("...Docopt... ")
print(arguments)
print ("............\n")
f_data_config = arguments['<f_data_config>']
f_model_config = arguments['<f_model_config>']
data_config = yaml.load(open(f_data_config, 'rb'))
model_config = yaml.load(open(f_model_config, 'rb'))
model_name = os.path.basename(f_model_config).split('.')[0]
data_name = os.path.basename(f_data_config).split('.')[0]
exp_name = '%s-X-%s' % (model_name, data_name)
fold_index = int(arguments['<fold_index>'])
init_lr = 1e-4
max_iter = 100000
best_acc_delay = 3000
testing = arguments['--test']
train(data_config, model_config, exp_name, fold_index,
init_lr, max_iter, best_acc_delay, testing)
| mit |
DCSaunders/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 13 | 2641 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(features, layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
ctogle/dilapidator | test/geometry/tform_tests.py | 1 | 1264 | from dilap.geometry.vec3 import vec3
from dilap.geometry.quat import quat
from dilap.geometry.tform import tform
import dilap.geometry.tools as dpr
import matplotlib.pyplot as plt
import unittest,numpy,math
import pdb
#python3 -m unittest discover -v ./ "*tests.py"
class test_tform(unittest.TestCase):
def setUp(self):
a1 = dpr.PI2
v1,v2,v3 = vec3(1,0,0),vec3(0,1,0),vec3(0,0,1)
q0 = quat(0,0,0,0).av( 0,v3)
q1 = quat(0,0,0,0).av(a1,v1)
q2 = quat(0,0,0,0).av(a1,v2)
q3 = quat(0,0,0,0).av(a1,v3)
self.tf1 = tform(vec3(1,1,0),q3.cp(),vec3(1,2,1))
self.tf2 = tform(vec3(1,1,0),q3.cp(),vec3(1,2,1))
self.tf3 = tform(vec3(0,1,0),q1,vec3(1,1,1))
a2 = dpr.PI
q4 = quat(0,0,0,0).av(a2,v3)
self.tf4 = tform(vec3(0,2,0),q4,vec3(1,4,1))
def test_cp(self):
self.assertTrue(self.tf1 is self.tf1)
self.assertFalse(self.tf1 is self.tf1.cp())
self.assertTrue(self.tf1 == self.tf1.cp())
self.assertFalse(self.tf1 is self.tf2)
self.assertTrue(self.tf1 == self.tf2)
def test_true(self):
tf4 = self.tf1.true(self.tf2)
self.assertEqual(self.tf4,tf4)
if __name__ == '__main__':
unittest.main()
| mit |
redhat-openstack/rdo-infra | ci-scripts/infra-setup/roles/rrcockpit/files/telegraf/openstack_infra_status.py | 2 | 1592 | #!/usr/bin/env python
import re
from datetime import datetime
import influxdb_utils
import pandas as pd
import requests
from bs4 import BeautifulSoup
infra_status_regexp = re.compile(
'^ *([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}) *UTC *(.+)$')
infra_status_url = 'https://wiki.openstack.org/wiki/Infrastructure_Status'
infra_status_utc_format = '%Y-%m-%d %H:%M:%S'
pd.set_option('display.max_colwidth', -1)
def to_infra_date(date_str):
return datetime.strptime(date_str, infra_status_utc_format)
def get_infra_issues():
infra_status = requests.get(infra_status_url)
infra_status_soup = BeautifulSoup(infra_status.content, 'html.parser')
raw_issues = infra_status_soup.find_all('li')
times = []
issues = []
for ts_and_issue in raw_issues:
m = infra_status_regexp.match(ts_and_issue.get_text())
if m:
times.append(to_infra_date(m.group(1)))
issues.append(m.group(2))
time_and_issue = pd.DataFrame({'time': times, 'issue': issues})
return time_and_issue.set_index('time')
def convert_to_influxdb_lines(infra_issues):
formatted = ""
# TODO: Filter to interested issues
for index, infra_issue in infra_issues.head().iterrows():
ts = influxdb_utils.format_ts_from_date(index)
issue = infra_issue['issue'].replace('"', "'")
formatted += "openstack-infra-issues issue=\"{}\" {}\n".format(
issue.encode('utf-8'), ts)
return formatted
def main():
print(convert_to_influxdb_lines(get_infra_issues()))
if __name__ == '__main__':
main()
| apache-2.0 |
ElvisLouis/code | work/ML/tensorflow/separa/lda_rf.py | 1 | 1969 | from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier
import tool as tool
import numpy as np
SIZE = 828
IMAGE_SIZE = 224
NUM_CHANNEL = 1
LABEL = 6
BASE_DIVIDE = 128
ACCELERATION_FACTOR = 256
FEATURE = 42
FILENAME = "/home/elvis/work/ML/tensorflow/separa/lda_rf.txt"
util = tool.tool()
data, label = tool.parse_data(SIZE, IMAGE_SIZE, NUM_CHANNEL, LABEL)
data, label = tool.alignment_data(data=data, label=label, LABEL=LABEL, BASE_DIVIDE=BASE_DIVIDE)
data = tool.min_max_normalization(data)
np.set_printoptions(threshold='nan')
label = np.argmax(label, 1)
SIZE = label.shape[0]
data = np.reshape(data, [SIZE, data.shape[1] ** 2])
TRAIN_SIZE = SIZE - ACCELERATION_FACTOR
EVAL_SIZE = SIZE - TRAIN_SIZE
train_data, train_label, eval_data, eval_label = tool.random_sample(data, label, ACCELERATION_FACTOR, LABEL)
train_data, train_label = tool.random_shuffle(train_data, train_label)
eval_data, eval_label = tool.random_shuffle(eval_data, eval_label)
print ("lda ready")
lda = LinearDiscriminantAnalysis()
lda.fit(train_data, train_label)
print ("lda done")
fake_train_data = np.ndarray([TRAIN_SIZE, LABEL-1])
fake_eval_data = np.ndarray([EVAL_SIZE, LABEL-1])
for x in range(TRAIN_SIZE):
data_T = np.reshape(train_data[x], [-1, 1])
fake_train_data[x] = lda.transform(data_T)
for x in range(EVAL_SIZE):
data_T = np.reshape(eval_data[x], [-1, 1])
fake_eval_data[x] = lda.transform(data_T)
train_data = fake_train_data
eval_data = fake_eval_data
clf = RandomForestClassifier(n_estimators=200)
clf.fit(train_data, train_label)
print (train_data.shape)
print (eval_data.shape)
print (eval_label)
train_predict = clf.predict(train_data)
eval_predict = clf.predict(eval_data)
eval_result = np.sum(eval_predict == eval_label) / float(eval_label.shape[0])
train_result = np.sum(eval_predict == eval_label) / float(eval_label.shape[0])
print (eval_predict)
print (eval_result)
print (train_result) | gpl-2.0 |
toastedcornflakes/scikit-learn | examples/neural_networks/plot_mlp_alpha.py | 58 | 4088 | """
================================================
Varying regularization in Multi-layer Perceptron
================================================
A comparison of different values for regularization parameter 'alpha' on
synthetic datasets. The plot shows that different alphas yield different
decision functions.
Alpha is a parameter for regularization term, aka penalty term, that combats
overfitting by constraining the size of the weights. Increasing alpha may fix
high variance (a sign of overfitting) by encouraging smaller weights, resulting
in a decision boundary plot that appears with lesser curvatures.
Similarly, decreasing alpha may fix high bias (a sign of underfitting) by
encouraging larger weights, potentially resulting in a more complicated
decision boundary.
"""
print(__doc__)
# Author: Issam H. Laradji
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
h = .02 # step size in the mesh
alphas = np.logspace(-5, 3, 5)
names = []
for i in alphas:
names.append('alpha ' + str(i))
classifiers = []
for i in alphas:
classifiers.append(MLPClassifier(alpha=i, random_state=1))
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=0, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable]
figure = plt.figure(figsize=(17, 9))
i = 1
# iterate over datasets
for X, y in datasets:
# preprocess dataset, split into training and test part
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
phockett/ePSproc | epsproc/sphCalc.py | 1 | 27587 | # -*- coding: utf-8 -*-
"""
ePSproc spherical function calculations.
Collection of functions for calculating Spherical Tensors: Ylm, wignerD etc.
For spherical harmonics, currently using scipy.special.sph_harm
For other functions, using Moble's spherical_functions package
https://github.com/moble/spherical_functions
See tests/Spherical function testing Aug 2019.ipynb
04/12/19 Added `setPolGeoms()` to define frames as Xarray.
Added `setADMs()` to define ADMs as Xarray
02/12/19 Added basic TKQ multipole frame rotation routine.
27/08/19 Added wDcalc for Wigner D functions.
14/08/19 v1 Implmented sphCalc
"""
# Imports
import numpy as np
import pandas as pd
import xarray as xr
from scipy.special import sph_harm, lpmv
import spherical_functions as sf
import quaternion
import string
try:
from sympy.physics.quantum.spin import Rotation # For basic frame rotation code, should update to use sf
except ImportError as e:
if e.msg != "No module named 'sympy'":
raise
print('* Sympy not found, some (legacy) sph functions may not be available. ')
# Master function for setting geometries/frame rotations
def setPolGeoms(eulerAngs = None, quat = None, labels = None, vFlag = 2):
"""
Generate Xarray containing polarization geometries as Euler angles and corresponding quaternions.
Define LF > MF polarization geometry/rotations.
Provide either eulerAngs or quaternions, but not both (supplied quaternions only will be used in this case).
For default case (eulerAngs = None, quat = None), 3 geometries are calculated,
corresponding to z-pol, x-pol and y-pol cases.
Defined by Euler angles:
(p,t,c) = [0 0 0] for z-pol,
(p,t,c) = [0 pi/2 0] for x-pol,
(p,t,c) = [pi/2 pi/2 0] for y-pol.
Parameters
----------
eulerAngs : list or np.array of Euler angles (p(hi), t(heta), c(hi)), optional.
List or array [p,t,c...], shape (Nx3).
List or array including set labels, [label,p,t,c...], shape (Nx4)
quat : list or np.array of quaternions, optional.
labels : list of labels, one per set of angles. Optional.
If not set, states will be labelled numerically.
vFlag : version of routine to use, optional, default = 2
Options:
- 1, use labels as sub-dimensional coord.
- 2, set labels as non-dimensional coord.
Returns
-------
RX : Xarray of quaternions, with Euler angles as dimensional params.
To do
-----
- Better label handling, as dictionary? With mixed-type array may get issues later.
(sf.quaternion doesn't seem to have an issue however.)
- Xarray MultiIndex with mixed types?
Tested with pd - not supported:
>>> eulerInd = pd.MultiIndex.from_arrays([eulerAngs[:,0].T, eulerAngs[:,1:].T.astype('float')], names = ['Label','P','T','C'])
# Gives error:
# NotImplementedError: > 1 ndim Categorical are not supported at this time
Examples
--------
>>> # Defaults
>>> RXdefault = setPolGeoms()
>>> print(RXdefault)
>>> # Pass Eulers, no labels
>>> pRot = [1.666, 0, np.pi/2]
>>> tRot = [0, np.pi/2, np.pi/2]
>>> cRot = [-1.5, 0, 0]
>>> eulerAngs = np.array([pRot, tRot, cRot]).T
>>> RXePass = setPolGeoms(eulerAngs = eulerAngs)
>>> print(RXePass)
>>> # Pass labels separately
>>> RXePass = setPolGeoms(eulerAngs = eulerAngs, labels = ['1','23','ff'])
>>> print(RXePass)
>>> # Pass Eulers with existing labels
>>> labels = ['A','B','C']
>>> eulerAngs = np.array([labels, pRot, tRot, cRot]).T
>>> RXePass = setPolGeoms(eulerAngs = eulerAngs)
>>> print(RXePass)
>>> # Pass Quaternions and labels
>>> RXqPass = setPolGeoms(quat = RXePass, labels = labels)
>>> print(RXqPass)
>>> # Pass both - only quaternions will be used in this case, and warning displayed.
>>> RXqeTest = setPolGeoms(eulerAngs = eulerAngs, quat = RXePass, labels = labels)
>>> print(RXqeTest)
"""
# Default case, set (x,y,z) geometries
if (eulerAngs is None) and (quat is None):
# As arrays, with labels
pRot = [0, 0, np.pi/2]
tRot = [0, np.pi/2, np.pi/2]
cRot = [0, 0, 0]
labels = ['z','x','y']
eulerAngs = np.array([labels, pRot, tRot, cRot]).T # List form to use later, rows per set of angles
# Get quaternions from Eulers, if provided or as set above for default case.
if eulerAngs is not None:
if type(eulerAngs) is not np.ndarray:
eulerAngs = np.asarray(eulerAngs)
if eulerAngs.shape[1] is 3:
if labels is None:
# Set labels if missing, alphabetic or numeric
if eulerAngs.shape[0] < 27:
labels = list(string.ascii_uppercase[0:eulerAngs.shape[0]])
else:
labels = np.arange(1,eulerAngs.shape[0]+1)
eulerAngs = np.c_[labels, eulerAngs]
# If quaternions are passed, set corresponding Eulers
if quat is not None:
eulerFromQuat = quaternion.as_euler_angles(quat) # Set Eulers from quaternions
if labels is None:
# Set labels if missing
labels = np.arange(1,eulerFromQuat.shape[0]+1)
if eulerAngs is not None:
print('***Warning: Euler angles and Quaternions passed, using Quaternions only.')
eulerAngs = np.c_[labels, eulerFromQuat]
# Otherwise set from Eulers
else:
quat = quaternion.from_euler_angles(eulerAngs[:,1:]) # Convert Eulers to quaternions
#*** Set up Xarray
if vFlag == 1:
# v1 keep Labels as subdim.
# This works, and allows selection by label, but Euler coords may be string type
# Set Pandas MultiIndex - note transpose for eulerAngs to (angs,set) order
eulerInd = pd.MultiIndex.from_arrays(eulerAngs.T, names = ['Label','P','T','C'])
# Create Xarray
RX = xr.DataArray(quat, coords={'Euler':eulerInd}, dims='Euler')
RX.attrs['dataType'] = 'Euler'
elif vFlag == 2:
# v2 Labels as non-dim coords.
# Doesn't allow selection, but keeps Euler coords as floats in all cases.
Euler = pd.MultiIndex.from_arrays(eulerAngs[:,1:].T.astype('float'), names = ['P','T','C'])
RX = xr.DataArray(quat, coords={'Euler':Euler,'Labels':('Euler',eulerAngs[:,0].T)}, dims='Euler')
RX.attrs['dataType'] = 'Euler'
else:
print('***Version not recognized')
return RX
# Create Xarray from set of ADMs - adapted from existing blmXarray()
def setADMs(ADMs = [0,0,0,1], KQSLabels = None, t = None, addS = False):
"""
Create Xarray from ADMs, or create default case ADM(K,Q,S) = [0,0,0,1].
Parameters
----------
ADMs : list or np.array, default = [0,0,0,1]
Set of ADMs = [K, Q, S, ADM].
If multiple ADMs are provided per (K,Q,S) index, they are set to the t axis (if provided), or indexed numerically.
KQSLabels : list or np.array, optional, default = None
If passed, assume ADMs are unabelled, and use (K,Q,S) indicies provided here.
t : list or np.array, optional, default = None
If passed, use for dimension defining ADM sets (usually time).
Defaults to numerical label if not passed, t = np.arange(0,ADMs.shape[1])
addS : bool, default = False
If set, append S = 0 to ADMs.
This allows for passing of [K,Q,ADM] type values (e.g. for symmetric top case)
Returns
-------
ADMX : Xarray
ADMs in Xarray format, dims as per :py:func:`epsproc.utils.ADMdimList()`
Examples
---------
>>> # Default case
>>> ADMX = setADMs()
>>> ADMX
>>> # With full N2 rotational wavepacket ADM set from demo data (ePSproc\data\alignment), where modPath defines root...
>>> # Load ADMs for N2
>>> from scipy.io import loadmat
>>> ADMdataFile = os.path.join(modPath, 'data', 'alignment', 'N2_ADM_VM_290816.mat')
>>> ADMs = loadmat(ADMdataFile)
>>> ADMX = setADMs(ADMs = ADMs['ADM'], KQSLabels = ADMs['ADMlist'], addS = True)
>>> ADMX
"""
# Check size of passed set of ADMs
# For ease of manipulation, just change to np.array if necessary!
if isinstance(ADMs, list):
ADMs = np.array(ADMs, ndmin = 2)
# Set lables explicitly if not passed, and resize ADMs
if KQSLabels is None:
if addS:
KQSLabels = ADMs[:,0:2]
KQSLabels = np.c_[KQSLabels, np.zeros(KQSLabels.shape[0])]
ADMs = ADMs[:,2:]
else:
KQSLabels = ADMs[:,0:3]
ADMs = ADMs[:,3:]
else:
if addS:
KQSLabels = np.c_[KQSLabels, np.zeros(KQSLabels.shape[0])] # Add S for labels passed case
# Set indexing, default to numerical
if t is None:
t = np.arange(0,ADMs.shape[1])
# Set up Xarray
QNs = pd.MultiIndex.from_arrays(KQSLabels.real.T.astype('int8'), names = ['K','Q','S']) # Set lables, enforce type
ADMX = xr.DataArray(ADMs, coords={'ADM':QNs,'t':t}, dims = ['ADM','t'])
ADMX.attrs['dataType'] = 'ADM'
return ADMX
# Calculate a set of sph function
def sphCalc(Lmax, Lmin = 0, res = None, angs = None, XFlag = True, fnType = 'sph', convention = 'phys'):
'''
Calculate set of spherical harmonics Ylm(theta,phi) on a grid.
Parameters
----------
Lmax : int
Maximum L for the set. Ylm calculated for Lmin:Lmax, all m.
Lmin : int, optional, default 0
Min L for the set. Ylm calculated for Lmin:Lmax, all m.
res : int, optional, default None
(Theta, Phi) grid resolution, outputs will be of dim [res,res].
angs : list of 2D np.arrays, [thetea, phi], optional, default None
If passed, use these grids for calculation
XFlag : bool, optional, default True
Flag for output. If true, output is Xarray. If false, np.arrays
fnType : str, optional, default = 'sph'
Currently can set to 'sph' for SciPy spherical harmonics, or 'lg' for SciPy Legendre polynomials.
More backends to follow.
Note that either res OR angs needs to be passed.
Outputs
-------
- if XFlag -
YlmX
3D Xarray, dims (lm,theta,phi)
- else -
Ylm, lm
3D np.array of values, dims (lm,theta,phi), plus list of lm pairs
Methods
-------
Currently set for scipy.special.sph_harm as calculation routine. Note (theta, phi) definition, and normalisation.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.sph_harm.html
Example
-------
>>> YlmX = sphCalc(2, res = 50)
'''
# Set coords based on inputs
# TODO: better code here (try/fail?)
# TODO: 03/09/20 checking/testing coords defns, needs a tidy up (or just remove)
if angs is None and res:
if convention == 'maths':
# theta, phi = np.meshgrid(np.linspace(0,2*np.pi,res),np.linspace(0,np.pi,res))
TP = np.meshgrid(np.linspace(0,2*np.pi,res),np.linspace(0,np.pi,res))
elif convention == 'phys':
# phi, theta = np.meshgrid(np.linspace(0,2*np.pi,res),np.linspace(0,np.pi,res))
TP = np.meshgrid(np.linspace(0,np.pi,res),np.linspace(0,2*np.pi,res))
elif res is None and angs:
theta = angs[0]
phi = angs[1]
else:
print('Need to pass either res or angs.')
return False
# Loop over lm and calculate
lm = []
Ylm = []
for l in np.arange(Lmin,Lmax+1):
for m in np.arange(-l,l+1):
lm.append([l, m])
if fnType is 'sph':
if convention == 'maths':
# Ylm.append(sph_harm(m,l,theta,phi))
Ylm.append(sph_harm(m,l,TP[0],TP[1])) # For SciPy.special.sph_harm() 'maths' convention is enforced.
elif convention == 'phys':
# Ylm.append(sph_harm(m,l,phi,theta))
Ylm.append(sph_harm(m,l,TP[1],TP[0]))
# Ylm.append(sph_harm(m,l,TP[0],TP[1])) # Pass arrays by ind to allow for different conventions above.
elif fnType is 'lg':
# Ylm.append(lpmv(m,l,np.cos(phi)))
if convention == 'maths':
Ylm.append(lpmv(m,l,np.cos(TP[1]))) # For SciPy.special.lpmv() 'maths' convention is enforced.
elif convention == 'phys':
Ylm.append(lpmv(m,l,np.cos(TP[0])))
else:
print(f"fnType {fnType} not supported.")
# Return as Xarray or np arrays.
if XFlag:
# Set indexes
QNs = pd.MultiIndex.from_arrays(np.asarray(lm).T, names = ['l','m'])
# YlmX = xr.DataArray(np.asarray(Ylm), coords=[('LM',QNs), ('Theta',theta[0,:]), ('Phi',phi[:,0])])
YlmX = xr.DataArray(np.asarray(Ylm), coords=[('LM',QNs), ('Theta', TP[0][0,:]), ('Phi', TP[1][:,0])])
return YlmX
else:
return np.asarray(Ylm), np.asarray(lm)
# Calculate wignerD functions
# Adapted directly from Matlab code,
# via Jupyter test Notebook "Spherical function testing Aug 2019.ipynb"
def wDcalc(Lrange = [0, 1], Nangs = None, eAngs = None, R = None, XFlag = True, QNs = None, dlist = ['lp','mu','mu0'], eNames = ['P','T','C'], conjFlag = False):
'''
Calculate set of Wigner D functions D(l,m,mp; R) on a grid.
Parameters
----------
Lrange : list, optional, default [0, 1]
Range of L to calculate parameters for.
If len(Lrange) == 2 assumed to be of form [Lmin, Lmax], otherwise list is used directly.
For a given l, all (m, mp) combinations are calculated.
QNs : np.array, optional, default = None
List of QNs [l,m,mp] to compute Wigner D terms for.
If supplied, use this instead of Lrange setting.
Options for setting angles (use one only):
Nangs : int, optional, default None
If passed, use this to define Euler angles sampled.
Ranges will be set as (theta, phi, chi) = (0:pi, 0:pi/2, 0:pi) in Nangs steps.
eAngs : np.array, optional, default None
If passed, use this to define Euler angles sampled.
Array of angles, [theta,phi,chi], in radians
R : np.array, optional, default None
If passed, use this to define Euler angles sampled.
Array of quaternions, as given by quaternion.from_euler_angles(eAngs).
XFlag : bool, optional, default True
Flag for output. If true, output is Xarray. If false, np.arrays
dlist : list, optional, default ['lp','mu','mu0']
Labels for Xarray QN dims.
eNames : list, optional, default ['P','T','C']
Labels for Xarray Euler dims.
conjFlag : bool, optional, default = False
If true, return complex conjuage values.
Outputs
-------
- if XFlag -
wDX
Xarray, dims (lmmp,Euler)
- else -
wD, R, lmmp
np.arrays of values, dims (lmmp,Euler), plus list of angles and lmmp sets.
Methods
-------
Uses Moble's spherical_functions package for wigner D function.
https://github.com/moble/spherical_functions
Moble's quaternion package for angles and conversions.
https://github.com/moble/quaternion
For testing, see https://epsproc.readthedocs.io/en/latest/tests/Spherical_function_testing_Aug_2019.html
Examples
--------
>>> wDX1 = wDcalc(eAngs = np.array([0,0,0]))
>>> wDX2 = wDcalc(Nangs = 10)
'''
# Set QNs for calculation, (l,m,mp)
if len(Lrange) == 2:
Ls = np.arange(Lrange[0], Lrange[1]+1)
else:
Ls = Lrange
# Set QNs based on Lrange if not passed to function.
if QNs is None:
QNs = []
for l in Ls:
for m in np.arange(-l, l+1):
for mp in np.arange(-l, l+1):
QNs.append([l, m, mp])
QNs = np.array(QNs)
# Set angles - either input as a range, a set or as quaternions
if Nangs is not None:
# Set a range of Eugler angles for testing
pRot = np.linspace(0,np.pi,Nangs)
tRot = np.linspace(0,np.pi/2,Nangs)
cRot = np.linspace(0,np.pi,Nangs)
eAngs = np.array([pRot, tRot, cRot,]).T
if eAngs is not None:
if eAngs.shape[-1] != 3: # Check dims, should be (N X 3) for quaternion... but transpose for pd.MultiIndex
eAngs = eAngs.T
else:
if R is not None:
eAngs = quaternion.as_euler_angles(R) # Set Eulers from quaternions
if R is None:
# Convert to quaternions
R = quaternion.from_euler_angles(eAngs)
# Calculate WignerDs
# sf.Wigner_D_element is vectorised for QN OR angles
# Here loop over QNs for a set of angles R
wD = []
lmmp = []
for n in np.arange(0, QNs.shape[0]):
lmmp.append(QNs[n,:])
if conjFlag:
wD.append(sf.Wigner_D_element(R, QNs[n,0], QNs[n,1], QNs[n,2]).conj())
else:
wD.append(sf.Wigner_D_element(R, QNs[n,0], QNs[n,1], QNs[n,2]))
# Return values as Xarray or np.arrays
if XFlag:
# Put into Xarray
#TODO: this will currently fail for a single set of QNs.
QNs = pd.MultiIndex.from_arrays(np.asarray(lmmp).T, names = dlist)
if (eAngs is not None) and (eAngs.size == 3): # Ugh, special case for only one set of angles.
Euler = pd.MultiIndex.from_arrays([[eAngs[0]],[eAngs[1]],[eAngs[2]]], names = eNames)
wDX = xr.DataArray(np.asarray(wD), coords=[('QN',QNs)])
wDX = wDX.expand_dims({'Euler':Euler})
else:
Euler = pd.MultiIndex.from_arrays(eAngs.T, names = eNames)
wDX = xr.DataArray(np.asarray(wD), coords=[('QN',QNs), ('Euler',Euler)])
return wDX
else:
return wD, R, np.asarray(lmmp).T
#*** Basic frame rotation code, see https://github.com/phockett/Quantum-Metrology-with-Photoelectrons/blob/master/Alignment/Alignment-1.ipynb
# Define frame rotation of state multipoles.
# Eqn. 4.41 in Blum (p127)
# Currently a bit ugly!
# Also set for numerical output only, although uses Sympy functions which can be used symbolically.
# Pass TKQ np.array [K,Q,TKQ], eAngs list of Euler angles (theta,phi,chi) to define rotation.
def TKQarrayRot(TKQ,eAngs):
r"""
Frame rotation for multipoles $T_{K,Q}$.
Basic frame rotation code, see https://github.com/phockett/Quantum-Metrology-with-Photoelectrons/blob/master/Alignment/Alignment-1.ipynb for examples.
Parameters
----------
TKQ : np.array
Values defining the initial distribution, [K,Q,TKQ]
eAngs : list or np.array
List of Euler angles (theta,phi,chi) defining rotated frame.
Returns
-------
TKQRot : np.array
Multipoles $T'_{K,Q}$ in rotated frame, as an np.array [K,Q,TKQ].
TODO: redo with Moble's functions, and Xarray input & output.
Formalism
----------
For the state multipoles, frame rotations are fairly straightforward
(Eqn. 4.41 in Blum):
.. math::
\begin{equation}
\left\langle T(J',J)_{KQ}^{\dagger}\right\rangle =\sum_{q}\left\langle T(J',J)_{Kq}^{\dagger}\right\rangle D(\Omega)_{qQ}^{K*}
\end{equation}
Where $D(\Omega)_{qQ}^{K*}$ is a Wigner rotation operator, for a
rotation defined by a set of Euler angles $\Omega=\{\theta,\phi,\chi\}$.
Hence the multipoles transform, as expected, as irreducible tensors,
i.e. components $q$ are mixed by rotation, but terms of different
rank $K$ are not.
"""
TKQRot = []
thres = 1E-5
Kmax = 6
# Easy way - loop over possible output values & sum based on input TKQ. Can probably do this in a smarter way.
for K in range(0,Kmax+1):
for q in range(-K,K+1):
# Set summation variable and add relevant terms from summation
TKQRotSum = 0.0
for row in range(TKQ.shape[0]):
Kin = TKQ[row][0]
Qin = TKQ[row][1]
if Kin == K:
Dval = Rotation.D(K,Qin,q,eAngs[0],eAngs[1],eAngs[2])
TKQRotSum += conjugate(Dval.doit())*TKQ[row][2]
else:
pass
if np.abs(N(TKQRotSum)) > thres:
TKQRot.append([K,q,N(TKQRotSum)]) # Use N() here to ensure Sympy numerical output only
return np.array(TKQRot)
# 05/12/19 Rewriting with new eAngs and ADM defns... (Xarrays)
def TKQarrayRotX(TKQin, RX, form = 2):
r"""
Frame rotation for multipoles $T_{K,Q}$.
Basic frame rotation code, see https://github.com/phockett/Quantum-Metrology-with-Photoelectrons/blob/master/Alignment/Alignment-1.ipynb for examples.
Parameters
----------
TKQin : Xarray
Values defining the initial distribution, [K,Q,TKQ]. Other dimensions will be propagated.
RX : Xarray defining frame rotations, from :py:func:`epsproc.setPolGeoms()`
List of Euler angles (theta,phi,chi) and corresponding quaternions defining rotated frame.
Returns
-------
TKQRot : Xarray
Multipoles $T'_{K,Q}$ in rotated frame, as an np.array [K,Q,TKQ].
Formalism
----------
For the state multipoles, frame rotations are fairly straightforward
(Eqn. 4.41 in Blum):
.. math::
\begin{equation}
\left\langle T(J',J)_{KQ}^{\dagger}\right\rangle =\sum_{q}\left\langle T(J',J)_{Kq}^{\dagger}\right\rangle D(\Omega)_{qQ}^{K*}
\end{equation}
Where $D(\Omega)_{qQ}^{K*}$ is a Wigner rotation operator, for a
rotation defined by a set of Euler angles $\Omega=\{\theta,\phi,\chi\}$.
Hence the multipoles transform, as expected, as irreducible tensors,
i.e. components $q$ are mixed by rotation, but terms of different
rank $K$ are not.
Examples
--------
>>> vFlag = 2
>>> RX = ep.setPolGeoms(vFlag = vFlag) # Package version
>>> RX
>>> testADMX = ep.setADMs(ADMs=[[0,0,0,1],[2,0,0,0.5]])
>>> testADMX
>>> testADMrot, wDX, wDXre = TKQarrayRotX(testADMX, RX)
>>> testADMrot
>>> testADMrot.attrs['dataType'] = 'ADM'
>>> sph, _ = sphFromBLMPlot(testADMrot, facetDim = 'Euler', plotFlag = True)
"""
# Check dataType and rename if required
if TKQin.dataType is 'ADM':
TKQ = TKQin.copy()
elif TKQin.dataType is 'BLM':
TKQ = TKQin.copy().unstack('BLM').rename({'l':'K','m':'Q'}).stack({'ADM':('K','Q')})
else:
print('***TKQ dataType not recognized, skipping frame rotation.')
return None, None, None
# Test if S is set, and flag for later
# Better way to get MultiIndexes here?
if 'S' in TKQ.unstack().dims:
# incS = TKQ.S.pipe(np.abs).values.max() > 0
incS = True
else:
incS = False
# If S = 0, apply basic TKQ transformation
# if not incS:
#*** Formulate using existing style (looped)
# # Loop over rotations, extract quaternion value from Xarray (better way to do this...?)
# # Note this will fail for looping over RX, then taking values - seems to give size=1 array which throws errors... weird...
# for R in RX.values:
# # Loop over input K values, for all Q
# for Kin in ADMX.K:
# # Set QNs
# # Rotation matrix elements
# sf.Wigner_D_element(R, QNs)
#*** Formulate using existing wDX code, then multiply - should be faster and transparent (?), and allow multiple dims
# Calculate Wigner Ds
wDX = wDcalc(Lrange = np.unique(TKQ.K.values), R = RX.values) # NOTE - alternatively can pass angles as Eulers, but may need type conversion for RX.Euler depending on format, and/or loop over angle sets.
# Rename coords, use dataType for this
# dtList = ep.dataTypesList()
# dtDims = dtList[TKQ.dataType]['dims'] # Best way to get relevant QNs here...? Maybe need to start labelling these in dataTypesList?
# Rename for ADMs for testing...
# ... then mutliply (with existing dims), resort & sum over Q
if incS:
# Test cases
if form == 1:
wDXre = wDX.unstack('QN').rename({'lp':'K','mu':'Q','mu0':'Qp'}).expand_dims({'S':[0]}).stack({'ADM':('K','Q','S')}) # Restack according to D^l_{mu,mu0} > D^K_{Q,Qp}
# This matches Blum 4.41 for CONJ(wD) and conj(TKQ)
# Here formulated as TKQp* = sum_Q(TKQ* x D^K_{Q,Qp}*)
TKQrot = (TKQ.conj() * wDXre.conj()).unstack('ADM').sum('Q').rename({'Qp':'Q'}).stack({'ADM':('K','Q','S')}).conj()
# Gives *no difference* between (x,y) cases? Should be phase rotation?
if form == 2: # ******************* THINK THIS is the correct case.
wDXre = wDX.unstack('QN').rename({'lp':'K','mu':'Qp','mu0':'Q'}).expand_dims({'S':[0]}).stack({'ADM':('K','Q','S')}) # Restack according to D^l_{mu,mu0} > D^K_{Qp,Q}
# This matches Zare, eqn. 3.83, for D*xTKQ
# Here formulated as TKQrot = sum_q(TKq x D^K_{q,Q}*)
TKQrot = (TKQ * wDXre.conj()).unstack('ADM').sum('Q').rename({'Qp':'Q'}).stack({'ADM':('K','Q','S')})
# Gives re/im difference between (x,y) cases? Should be phase rotation?
if form == 3:
wDXre = wDX.unstack('QN').rename({'lp':'K','mu':'Q','mu0':'Qp'}).expand_dims({'S':[0]}).stack({'ADM':('K','Q','S')}) # Restack according to D^l_{mu,mu0} > D^K_{Q,Qp}
# This matches Zare, eqn. 5.8, for sum over Q and REAL wD
# Here formulated as TKQp = sum_Q(TKQ x D^K_{Q,Qp})
TKQrot = (TKQ * wDXre).unstack('ADM').sum('Q').rename({'Qp':'Q'}).stack({'ADM':('K','Q','S')})
# TKQrot = (wDXre * TKQ).unstack('ADM').sum('Q').rename({'Qp':'Q'}).stack({'ADM':('K','Q','S')})
# Gives *no difference* between (x,y) cases? Should be phase rotation?
else:
# wDXre = wDX.unstack('QN').rename({'lp':'K','mu':'Q','mu0':'Qp'}).stack({'ADM':('K','Q')}) # Restack according to D^l_{mu,mu0} > D^K_{Q,Qp}
# TKQrot = (TKQ * wDXre).unstack('ADM').sum('Q').rename({'Qp':'Q'}).stack({'ADM':('K','Q')})
# form = 2 case only.
# wDXre = wDX.unstack('QN').rename({'lp':'K','mu':'Qp','mu0':'Q'}).expand_dims({'S':[0]}).stack({'ADM':('K','Q','S')}) # Restack according to D^l_{mu,mu0} > D^K_{Qp,Q}
wDXre = wDX.unstack('QN').rename({'lp':'K','mu':'Qp','mu0':'Q'}).stack({'ADM':('K','Q')})
# This matches Zare, eqn. 3.83, for D*xTKQ
# Here formulated as TKQrot = sum_q(TKq x D^K_{q,Q}*)
TKQrot = (TKQ * wDXre.conj()).unstack('ADM').sum('Q').rename({'Qp':'Q'}).stack({'ADM':('K','Q')})
#*** Mutliply (with existing dims), then resort & sum over Q
# NOW INCLUDED ABOVE for different test cases
# Propagate frame labels & attribs
# TODO: fix Labels propagation - this seems to drop sometimes, dim issue?
# TKQrot['Labels'] = RX.Labels
TKQrot['Labels']=('Euler',RX.Labels.values) # This seems to work...
TKQrot.attrs = TKQ.attrs
# For BLM data, rename vars.
if TKQin.dataType is 'BLM':
TKQrot = TKQrot.unstack('ADM').rename({'K':'l','Q':'m'}).stack({'BLM':('l','m')})
return TKQrot, wDX, wDXre
| gpl-3.0 |
wathen/PhD | MHD/FEniCS/MHD/CG/PicardIter_Direct/DecoupleTest/MHDnewcastle.py | 1 | 11326 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time as t
import BiLinear as forms
import DirectOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
# locals()
#Modules([forms,Iter,MO,CP,ExactSol,IO,common])
#ff
m = 5
Type = ['Full','MD','CD']
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
nn = 2
mm = 4
MUsave = np.zeros((mm*3,1))
MUit = np.zeros((m-1,mm*3))
print MUit[0,0]
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
R = 010.0
jj = 0
for yy in xrange(1,mm+1):
MU =(R*10**(-yy))
print "++++++++",MU
for ii in xrange(0,3):
jj += 1
MUsave[jj-1] = MU
for xx in xrange(1,m):
IterType = Type[ii]
print xx
level[xx-1] = xx+2
nn = 2**(level[xx-1])
print "==================================",IterType
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
parameters["form_compiler"]["quadrature_degree"] = -1
mesh = UnitSquareMesh(nn,nn)
order = 2
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "CG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity,Pressure,Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4,1)
# plot(interpolate(u0,Velocity))
p0 = interpolate(p0,Pressure)
p0.vector()[:] -= np.max(p0.vector().array() )/2
# plot(interpolate(p0,Pressure))
bcu = DirichletBC(W.sub(0),u0, boundary)
bcb = DirichletBC(W.sub(2),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, p, b, r) = TrialFunctions(W)
(v, q, c,s ) = TestFunctions(W)
kappa = 1.0
Mu_m =10.0
# MU = 1.0
print "================================",MU
F_NS = -MU*Laplacian + Advection + gradPres - kappa*NS_Couple
F_M = Mu_m*kappa*CurlCurl + gradR - kappa*M_Couple
params = [kappa,Mu_m,MU]
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,Neumann=Expression(("0","0")),options ="New")
# plot(u_k)
VelPres = Velocitydim[xx-1]+Pressuredim[xx-1]
# t.sleep(10)
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
pConst = -assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += pConst
x = Iter.u_prev(u_k,p_k,b_k,r_k)
# plot(b_k)
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W, F_M, F_NS, u_k, b_k, params, IterType)
RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params)
bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0")), boundary)
bcb = DirichletBC(W.sub(2),Expression(("0.0","0.0")), boundary)
bcr = DirichletBC(W.sub(3),Expression(("0.0")), boundary)
bcs = [bcu,bcb,bcr]
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-5 # tolerance
iter = 0 # iteration counter
maxiter = 40 # max no of iterations allowed
SolutionTime = 0
outer = 0
parameters['linear_algebra_backend'] = 'uBLAS'
p = forms.Preconditioner(mesh,W,u_k, b_k,params,IterType)
# PP,Pb = assemble_system(p, Lns,bcs)
if IterType == "CD":
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b,u = Iter.RemoveRowCol(AA,bb,VelPres)
while eps > tol and iter < maxiter:
iter += 1
if IterType == "CD":
bb = assemble((Lmaxwell + Lns) - RHSform)
for bc in bcs:
bc.apply(bb)
A,b,u = Iter.RemoveRowCol(AA,bb,VelPres)
else:
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b,u = Iter.RemoveRowCol(AA,bb,VelPres)
ksp = PETSc.KSP().create()
pc = ksp.getPC()#.PC().create()
# P = MO.shift(A,0.000001)
ksp.setOperators(A )
del A
OptDB = PETSc.Options()
OptDB["ksp_type"] = "preonly"
OptDB["pc_type"] = "lu"
OptDB["pc_factor_mat_ordering_type"] = "rcm"
OptDB["pc_factor_mat_solver_package"] = "mumps"
# OptDB["pc_factor_shift_amount"] = 2
ksp.setFromOptions()
tic()
ksp.solve(b, u)
time = toc()
print time
SolutionTime = SolutionTime +time
del ksp, pc
u, p, b, r, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p.vector()[:] += - assemble(p*dx)/assemble(ones*dx)
u_k.assign(u)
p_k.assign(p)
b_k.assign(b)
r_k.assign(r)
# plot(u_k)
# plot(p_k)
uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
x = Iter.u_prev(u_k,p_k,b_k,r_k)
if eps > 1e10:
iter = 0
break
# u_k,b_k,epsu,epsb=Iter.PicardTolerance(x,u_k,b_k,FSpaces,dim,"inf",iter)
MUit[xx-1,jj-1]= iter
# SolTime[xx-1] = SolutionTime/iter
ue =u0
pe = p0
be = b0
re = r0
ExactSolution = [ue,pe,be,re]
# errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(x,mesh,FSpaces,ExactSolution,order,dim)
# if xx == 1:
# l2uorder[xx-1] = 0
# else:
# l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
# H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
# l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
# l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
# Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
# l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
# H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
print MUit
import pandas as pd
LatexTitles = ["l","DoF"]
for x in xrange(1,mm+1):
LatexTitles.extend(["it","it","it"])
LatexValues = np.concatenate((level,Wdim,MUit), axis=1)
title = np.concatenate((np.array([[0,0]]),MUsave.T),axis=1)
MU = ["0","0"]
for x in xrange(1,mm+1):
MU.extend(["Full","MD","CD"])
LatexValues = np.vstack((title,LatexValues))
LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
print LatexTable.to_latex()
# LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
# LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
# LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
# pd.set_option('precision',3)
# LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
# print LatexTable.to_latex()
# print "\n\n Magnetic convergence"
# MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
# MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
# MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
# pd.set_option('precision',3)
# MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
# MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
# print MagneticTable.to_latex()
# print "\n\n Lagrange convergence"
# LagrangeTitles = ["l","B DoF","R DoF","R-L2","L2-order","R-H1","H1-order"]
# LagrangeValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2r,l2rorder,errH1r,H1rorder),axis=1)
# LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles)
# pd.set_option('precision',3)
# LagrangeTable = MO.PandasFormat(LagrangeTable,"R-L2","%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,"H1-order","%1.2f")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'L2-order',"%1.2f")
# print LagrangeTable.to_latex()
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# plot(b_k)
# plot(r_k)
# plot(p_k)
# # # plot(ba)
# plot(interpolate(p0,Pressure))
# # plot(ra)
# plot(interpolate(re,Lagrange))
# interactive()
interactive()
def ModuleCheck():
modulenames = set(sys.modules)&set(globals())
print modulenames
ModuleCheck()
| mit |
manazhao/tf_recsys | tensorflow/contrib/learn/python/learn/learn_io/__init__.py | 79 | 2464 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.numpy_io import numpy_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.generator_io import generator_input_fn
| apache-2.0 |
brodoll/sms-tools | lectures/03-Fourier-properties/plots-code/symmetry-real-even.py | 26 | 1150 | import matplotlib.pyplot as plt
import numpy as np
import sys
import math
from scipy.signal import triang
from scipy.fftpack import fft, fftshift
M = 127
N = 128
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
x = triang(M)
fftbuffer = np.zeros(N)
fftbuffer[:hM1] = x[hM2:]
fftbuffer[N-hM2:] = x[:hM2]
X = fftshift(fft(fftbuffer))
mX = abs(X)
pX = np.unwrap(np.angle(X))
plt.figure(1, figsize=(9.5, 4))
plt.subplot(311)
plt.title('x[n]')
plt.plot(np.arange(-hM2, hM1, 1.0), x, 'b', lw=1.5)
plt.axis([-hM2, hM1, 0, 1])
plt.subplot(323)
plt.title('real(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.real(X), 'r', lw=1.5)
plt.axis([-N/2, N/2, min(np.real(X)), max(np.real(X))])
plt.subplot(324)
plt.title('im(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.imag(X), 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(325)
plt.title('abs(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), mX, 'r', lw=1.5)
plt.axis([-N/2,N/2,min(mX),max(mX)])
plt.subplot(326)
plt.title('angle(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), pX, 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.tight_layout()
plt.savefig('symmetry-real-even.png')
plt.show()
| agpl-3.0 |
nomed/cashup | cashup/controllers/root.py | 1 | 116017 | # -*- coding: utf-8 -*-
"""Main Controller"""
import uuid
from datetime import datetime
from tg import TGController
from tg import tmpl_context
from tg import request, response
from tg import expose, flash, require, url, lurl, request, redirect, validate, predicates, override_template
from tg.i18n import ugettext as _, lazy_ugettext as l_
from tgext.pluggable import app_model
from cashup.model import *
from cashup.model.models import _get_role
from cashup.lib import set_value
import transaction
from tgext.admin.tgadminconfig import BootstrapTGAdminConfig as TGAdminConfig
from tgext.admin.controller import AdminController
from tgext.crud import CrudRestController
from sprox.tablebase import TableBase
from sprox.formbase import EditableForm, AddRecordForm
from sprox.fillerbase import TableFiller, EditFormFiller
from datetime import datetime, timedelta
from lilliput.commands.micros_llpt import dly_salesmix_query, dly_sales_query, wly_cogs_query, dly_sales_query_fp
from lilliput.olap import data
from lilliput.lib import get_cols as _get_cols
from cashup.controllers.reports import CashupReportController
import pandas as pd
import numpy as np
import logging
logger = logging.getLogger(__name__)
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
srv = config.get('olap.server', 'http://localhost:5000')
__all__ = ['ModalController', 'RootController', 'EODController']
def get_dateobj(**kw):
start = kw.get('start')
end = kw.get('end')
if start:
kw['start'] = datetime.strptime(start, '%Y/%m/%d')
if end:
kw['end'] = datetime.strptime(end, '%Y/%m/%d')
return kw
"""
from lilliput.model import *
dbobj=FactMicrosSALES
sm=DBSession.query(dbobj).filter(and_(dbobj.adj==1)).all()
for s in sm:
d = DBSession.query(DimDate).filter_by(date=s.date).one()
s.date_id=d.date_id
DBSession.add(s)
"""
class ModalController(TGController):
"""
Modal Controller
"""
#allow_only = predicates.not_anonymous(msg='Only logged in users can read this post')
#allow_only = predicates.not_anonymous(msg='Only logged in users can read this post')
@expose('cashup.templates.modal.add_cogs')
def add_cogs(self, team_alias, year, week):
""""""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
mjs = DBSession.query(app_model.DimMajorgroup).filter(app_model.DimMajorgroup.default==1).all()
if user in users:
return dict(team=team, year=year, week=week, mjs=mjs)
@expose('cashup.templates.modal.add_report')
def add_report(self, team_alias):
""""""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
reports = team.reports
reports.reverse()
users = [ i.user for i in team.members ]
year_opt = [(x + 2000, x + 2000) for x in range(20)]
month_opt = [(x + 1, x +1) for x in range(12)]
day_opt = [(x + 1, x + 1) for x in range(31)]
today = datetime.now()
day = today.day
month = today.month
year = today.year
if user in users:
return dict(user=user, team=team, year_opt=year_opt,
month_opt=month_opt, day_opt=day_opt,
day=day, month=month, year=year)
@expose('cashup.templates.modal.add_banking')
def add_banking(self, team_alias, report_id):
"""
action="/cashup/add_banking"
"""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
year_opt = [(x + 2000, x + 2000) for x in range(20)]
month_opt = [(x + 1, x +1) for x in range(12)]
day_opt = [(x + 1, x + 1) for x in range(31)]
today = datetime.now()
day = today.day
month = today.month
year = today.year
if user in users:
report = DBSession.query(Report).filter_by(report_id=report_id).one()
return dict(user=user, team=team, report=report, year_opt=year_opt,
month_opt=month_opt, day_opt=day_opt,
day=day, month=month, year=year )
@expose('cashup.templates.modal.add_cash_report')
def add_cash_report(self, team_alias, report_id):
"""
action="/cashup/add_cash_report"
"""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report = DBSession.query(Report).filter_by(report_id=report_id).one()
return dict(user=user, team=team, report=report )
@expose('cashup.templates.modal.add_banking2')
def add_banking2(self, team_alias, bankingcurrency_id, report_id):
""""""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
bankingcurrency = DBSession.query(BankingCurrencyTotal).filter_by(bankingcurrency_id=bankingcurrency_id).one()
report = DBSession.query(Report).filter_by(report_id=report_id).one()
return dict(user=user, team=team, bankingcurrency=bankingcurrency, report=report )
@expose('cashup.templates.modal.add_banking3')
def add_banking3(self, team_alias, bankingcurrency_id):
""""""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
bankingcurrency = DBSession.query(BankingCurrencyTotal).filter_by(bankingcurrency_id=bankingcurrency_id).one()
return dict(user=user, team=team, bankingcurrency=bankingcurrency)
@expose('cashup.templates.modal.edit_mgrcomments')
def edit_mgrcomments(self, team_alias, report_id, key, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report = DBSession.query(Report).get(report_id)
return dict(user=user, team=team, report=report, key=key)
@expose('cashup.templates.modal.edit_cash_report')
def edit_cash_report(self, team_alias, cash_report_id, key, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
cash_report = DBSession.query(CashReport).filter_by(cash_report_id=cash_report_id).one()
return dict(user=user, team=team, cash_report=cash_report, key=key)
@expose('cashup.templates.modal.edit_salesmix')
def edit_salesmix(self, team_alias,rvc, report_id, mj_code, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report = DBSession.query(Report).filter_by(report_id=report_id).one()
cols, data = report.by_mi_rvc(date_from=report.date, date_to=report.date,rvcs=[rvc], location_codes=[team.location_code], mj_codes=[mj_code])
ret = dict(zip(cols, data[0]))
print ret
return dict(user=user, team=team, report=report, rvc=rvc, mj_code=mj_code, ret=ret)
@expose('cashup.templates.modal.edit_sales')
def edit_sales(self, team_alias,rvc, report_id, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report = DBSession.query(Report).filter_by(report_id=report_id).one()
cols, data = report.by_rvc(date_from=report.date, date_to=report.date,rvcs=[rvc], location_codes=[team.location_code])
return dict(user=user, team=team, report=report, rvc=rvc, cols=cols, data=data[0])
@expose('cashup.templates.modal.edit_changerate')
def edit_changerate(self, team_alias, report_id, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report = DBSession.query(Report).filter_by(report_id=report_id).one()
return dict(user=user, team=team, report=report)
@expose('cashup.templates.modal.add_relation')
def add_relation(self, team_alias, cash_report_id, key, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
cash_report = DBSession.query(CashReport).filter_by(cash_report_id=cash_report_id).one()
return dict(user=user, team=team, cash_report=cash_report, key=key)
@expose('cashup.templates.modal.cashier_add_relation')
def cashier_add_relation(self, team_alias, cash_report_id, key, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
cash_report = DBSession.query(CashReport).filter_by(cash_report_id=cash_report_id).one()
return dict(user=user, team=team, cash_report=cash_report, key=key)
@expose('cashup.templates.modal.publish_report')
def publish_report(self, team_alias, report_id, notify, **kw):
#opath = config.get('jde.export')
#ofile = config.get('jde.export.file')
report = DBSession.query(Report).filter_by(report_id=report_id).one()
notify=int(notify)
return dict(value=dict(team_alias=team_alias,report_id=report_id, notify=notify))
@expose('cashup.templates.modal.cashier_publish_report')
def cashier_publish_report(self, team_alias, cash_report_id, notify, **kw):
#opath = config.get('jde.export')
#ofile = config.get('jde.export.file')
#report = DBSession.query(CashReport).get(cash_report_id)
notify=int(notify)
return dict(value=dict(team_alias=team_alias,cash_report_id=cash_report_id, notify=notify))
@expose('cashup.templates.modal.validate_report')
def validate_report(self, team_alias, report_id, **kw):
return dict(value=dict(team_alias=team_alias,report_id=report_id))
@expose('cashup.templates.modal.unpublish_report')
def unpublish_report(self, team_alias, report_id, **kw):
return dict(value=dict(team_alias=team_alias,report_id=report_id))
@expose('cashup.templates.modal.show_check')
def show_check(self, location_code, check_id):
""""""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.location_code==location_code).one()
users = [ i.user for i in team.members ]
if user in users:
ret = DBSession.query(app_model.FactMicrosCHK).filter_by(check_id=check_id).one()
return dict(user=user, team=team, check=ret)
class EODController(TGController):
@expose('json')
def post(self,fact):
data = dict()
if fact == "vat":
Eod = VatTTL
elif fact == "tmed":
Eod = TmedTTL
content = request.json
for cobj in content:
if not cobj["date"] in data:
data[cobj["date"]] = dict()
if not cobj["location_code"] in data[cobj["date"]]:
data[cobj["date"]][cobj["location_code"]] = []
data[cobj["date"]][cobj["location_code"]].append(cobj)
for strdate, locdict in data.iteritems():
objdate = datetime.strptime(strdate , '%Y-%m-%d')
for loccode in locdict.keys():
ret = DBSession.query(Eod).filter(
Eod.location_code==loccode,
Eod.date==objdate).delete()
#DBSession.commit()
for cobj in content:
cobj["date"] = datetime.strptime(cobj["date"] , '%Y-%m-%d')
eod = Eod(**cobj)
DBSession.add(eod)
#DBSession.commit()
return dict(d=data)
@expose('json')
def last(self,fact, location_id):
if fact == "vat":
Eod = VatTTL
elif fact == "tmed":
Eod = TmedTTL
ret = DBSession.query(Eod).filter_by(location_code =location_id).order_by(Eod.date.desc()).first()
if not ret:
ret = "1970-01-01"
else:
ret = ret.date + datetime.timedelta(days=1)
ret = ret.strftime("%Y-%m-%d")
return ret
class AdminUserController(TGController):
allow_only =predicates.has_permission('admin', msg=l_('Only for managers'))
@expose('cashup.templates.users.index')
def index(self, **kw):
username = kw.get('username')
if username:
users = DBSession.query(app_model.User).filter(app_model.User.user_name.like("%"+username+"%")).all()
else:
users= DBSession.query(app_model.User).order_by(app_model.User.display_name).all()
#if len(user.teams) == 1:
# team = user.teams[0]
# return redirect(self.root_url+'/show/%s'%(team.team_alias))
#else:
# return dict(page='index', user=user, root_url = self.root_url)
return dict(users=users, kw=kw)
def update_user(self, user_id, **kw):
if user_id:
user = DBSession.query(app_model.User).filter(app_model.User.user_id==user_id).one()
else:
user = app_model.User()
locations = DBSession.query(app_model.DimLocation).all()
tmp_groups=[]
for key, val in kw.iteritems():
val = val.strip()
if key == 'password':
if len(val) > 0:
user.password = val
if key.startswith('group.'):
g = DBSession.query(app_model.Group).filter(app_model.Group.group_id==val).one()
user.groups.append(g)
tmp_groups.append(g)
if key.startswith('user.'):
setattr(user, key.split('.')[1], val)
if key.startswith('m_'):
prefix , team_id, user_id, m_id = key.split('_')
if m_id != 'NEW':
mobj = DBSession.query(Membership).filter(Membership.membership_id==m_id).one()
if len(val) > 0:
setattr(mobj, 'role', val)
DBSession.add(mobj)
else:
DBSession.delete(mobj)
else:
if len(val) > 0:
mobj = Membership()
mobj.team_id = team_id
mobj.user_id = user.user_id
mobj.role = val
DBSession.add(mobj)
for g in user.groups:
if g not in tmp_groups:
user.groups.remove(g)
DBSession.add(user)
DBSession.flush()
user = DBSession.query(app_model.User).filter(app_model.User.user_id==user.user_id).one()
return user, locations, kw
@expose('cashup.templates.users.edit')
def edit(self, user_id=None, **kw):
user, locations, kw = self.update_user(user_id,**kw)
return dict(user=user, locations=locations, kw=kw, action='edit')
@expose()
def set(self, **kw):
user, locations, kw = self.update_user(None,**kw)
redirect('/cashup/adminusers/edit/%s'%user.user_id)
@expose()
def delete(self, user_id):
from lilliput.model.auth import user_group_table
DBSession.query(app_model.User).filter(app_model.User.user_id==user_id).delete()
DBSession.query(Membership).filter(Membership.user_id==user_id).delete()
DBSession.execute(user_group_table.delete().where(user_group_table.c.user_id==user_id))
DBSession.flush()
redirect('/cashup/adminusers/')
@expose('cashup.templates.users.edit')
def add(self, **kw):
class NewUser(object):
password=''
email_address=''
display_name = ''
user_id = ''
user_name = ''
groups = []
return dict(user=NewUser(), kw=kw, action='set' )
"""
</div>
</div>
class AddLocationForm(AddRecordForm):
__model__ = app_model.DimLocation
__omit_fields__ = ['location_id', 'members', 'reports','wtrs', 'services','cashes']
add_location_form = AddLocationForm(app_model.DBSession)
class EditLocationForm(EditableForm):
__model__ = app_model.DimLocation
__omit_fields__ = ['location_id', 'members', 'reports','wtrs', 'services','cashes']
edit_location_form = EditLocationForm(app_model.DBSession)
class UserController(CrudRestController):
model = app_model.User
class new_form_type(AddRecordForm):
__model__ = app_model.User
#__omit_fields__ = ['location_id', 'members', 'reports','wtrs', 'services','cashes']
class edit_form_type(EditableForm):
__model__ = app_model.User
#__omit_fields__ = ['location_id', 'members', 'reports','wtrs', 'services','cashes']
class edit_filler_type(EditFormFiller):
__model__ = app_model.User
class table_type(TableBase):
__model__ = app_model.User
#__omit_fields__ = ['location_id', 'members', 'reports','wtrs', 'services','cashes']
class table_filler_type(TableFiller):
__model__ = app_model.User
class RvcController(CrudRestController):
model = app_model.DimRvc
class new_form_type(AddRecordForm):
__model__ = app_model.DimRvc
__omit_fields__ = ['location_id', 'members', 'reports','wtrs', 'services','cashes']
class edit_form_type(EditableForm):
__model__ = app_model.DimRvc
__omit_fields__ = ['location_id', 'members', 'reports','wtrs', 'services','cashes']
class edit_filler_type(EditFormFiller):
__model__ = app_model.DimRvc
class table_type(TableBase):
__model__ = app_model.DimRvc
__omit_fields__ = ['location_id', 'members', 'reports','wtrs', 'services','cashes']
class table_filler_type(TableFiller):
__model__ = app_model.DimRvc
class LocationController(CrudRestController):
model = app_model.DimLocation
class new_form_type(AddRecordForm):
__model__ = app_model.DimLocation
__omit_fields__ = ['location_id', 'members', 'reports','wtrs', 'services','cashes']
class edit_form_type(EditableForm):
__model__ = app_model.DimLocation
__omit_fields__ = ['location_id', 'reports','wtrs', 'services','cashes']
class edit_filler_type(EditFormFiller):
__model__ = app_model.DimLocation
class table_type(TableBase):
__model__ = app_model.DimLocation
__omit_fields__ = ['location_id', 'members', 'reports','wtrs', 'services','cashes']
class table_filler_type(TableFiller):
__model__ = app_model.DimLocation
class CashCurrencyController(CrudRestController):
model = CashCurrency
class new_form_type(AddRecordForm):
__model__ = CashCurrency
__omit_fields__ = ['changerates', 'bankingcurrencytotals', 'cashcurrencytotals']
class edit_form_type(EditableForm):
__model__ = CashCurrency
__omit_fields__ = ['changerates', 'bankingcurrencytotals', 'cashcurrencytotals']
class edit_filler_type(EditFormFiller):
__model__ = CashCurrency
class table_type(TableBase):
__model__ = CashCurrency
__omit_fields__ = ['changerates', 'bankingcurrencytotals', 'cashcurrencytotals']
class table_filler_type(TableFiller):
__model__ = CashCurrency
class CreditCardController(CrudRestController):
model = CreditCard
class new_form_type(AddRecordForm):
__model__ = CreditCard
__omit_fields__ = ['creditcardtotals']
class edit_form_type(EditableForm):
__model__ = CreditCard
__omit_fields__ = ['creditcardtotals']
class edit_filler_type(EditFormFiller):
__model__ = CreditCard
class table_type(TableBase):
__model__ = CreditCard
__omit_fields__ = ['creditcardtotals']
class table_filler_type(TableFiller):
__model__ = CreditCard
class DebitCardController(CrudRestController):
model = DebitCard
class new_form_type(AddRecordForm):
__model__ = CreditCard
__omit_fields__ = ['debitcardtotals']
class edit_form_type(EditableForm):
__model__ = DebitCard
__omit_fields__ = ['debitcardtotals']
class edit_filler_type(EditFormFiller):
__model__ = DebitCard
class table_type(TableBase):
__model__ = DebitCard
__omit_fields__ = ['debitcardtotals']
class table_filler_type(TableFiller):
__model__ = DebitCard
class TicketController(CrudRestController):
model = Ticket
class new_form_type(AddRecordForm):
__model__ = Ticket
__omit_fields__ = ['tickettotals']
class edit_form_type(EditableForm):
__model__ = Ticket
__omit_fields__ = ['tickettotals']
class edit_filler_type(EditFormFiller):
__model__ = Ticket
class table_type(TableBase):
__model__ = Ticket
__omit_fields__ = ['tickettotals']
class table_filler_type(TableFiller):
__model__ = Ticket
class CashController(CrudRestController):
model = Cash
class new_form_type(AddRecordForm):
__model__ = Cash
__omit_fields__ = ['cash_reports']
class edit_form_type(EditableForm):
__model__ = Cash
__omit_fields__ = ['cash_reports']
class edit_filler_type(EditFormFiller):
__model__ = Cash
class table_type(TableBase):
__model__ = Cash
__omit_fields__ = ['cash_reports']
class table_filler_type(TableFiller):
__model__ = Cash
class VatController(CrudRestController):
model = Vat
class new_form_type(AddRecordForm):
__model__ = Vat
__omit_fields__ = ['vatfees','cashes']
class edit_form_type(EditableForm):
__model__ = Vat
__omit_fields__ = ['vatfees', 'cashes']
class edit_filler_type(EditFormFiller):
__model__ = Vat
class table_type(TableBase):
__model__ = Vat
__omit_fields__ = ['vatfees','cashes']
class table_filler_type(TableFiller):
__model__ = Vat
class AdminCashupController(TGController):
location = LocationController(app_model.DBSession)
cash = CashController(app_model.DBSession)
vat = VatController(app_model.DBSession)
rvc = RvcController(app_model.DBSession)
user = UserController(app_model.DBSession)
cashcurrency = CashCurrencyController(app_model.DBSession)
creditcard = CreditCardController(app_model.DBSession)
debitcard = DebitCardController(app_model.DBSession)
ticket = TicketController(app_model.DBSession)
@expose('cashup.templates.cashup_admin_index')
def index(self):
locations = DBSession.query(app_model.DimLocation).all()
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
return dict(locations=locations, user=user)
@expose('cashup.templates.cashup_admin_new_location')
def new_location(self, location_id=None, **kw):
tmpl_context.widget = location_form
return dict(value=kw)
@validate(location_form, error_handler=new_location)
@expose()
def insert_location(self,**kw):
print kw
del kw['sprox_id']
location = app_model.DimLocation(**kw)
DBSession.add(location)
flash('your location was successfully added')
redirect('/cashup/admin/')
@expose('cashup.templates.cashup_admin_new_location')
def edit_location(self, location_id, **kw):
tmpl_context.widget = location_form
location=DBSession.query(app_model.DimLocation).filter_by(location_id=location_id).one()
return dict(value=location)
@validate(location_form, error_handler=edit_location)
@expose()
def update_location(self,location_id,**kw):
del kw['sprox_id']
location = DBSession.query(all_model.DimLocation).filter_by(location_id=location_id).one()
for k, v in kw.iteritems():
if hasattr(location, k):
setattr(location, k, v)
DBSession.add(location)
flash('your location was successfully updated')
redirect('/cashup/admin/')
"""
from cashup import model
admin_model = [model.CashCurrency,
model.Ticket,
model.CreditCard,
model.DebitCard,
model.FieldOther,
model.Vat,
app_model.DimLocation,
]
class MyAdminController(AdminController):
allow_only = predicates.has_permission('admin')
class WitnessController(TGController):
"""
"""
allow_only = predicates.not_anonymous(msg='Only logged in users can read this post')
@expose('json')
def index(self, location_id=None, *kw, **args):
"""Handle the front-page."""
users=DBSession.query(Membership).filter(Membership.team_id==location_id).all()
users = [dict(user_id=i.user.user_id, display_name=i.user.display_name) for i in users]
return dict(data=users)
class RootController(TGController):
admin = MyAdminController(admin_model, app_model.DBSession, config_type=TGAdminConfig)
admin_hidden = MyAdminController(model, app_model.DBSession, config_type=TGAdminConfig)
adminusers = AdminUserController()
modal = ModalController()
witness = WitnessController()
allow_only = predicates.not_anonymous(msg='Only logged in users can read this post')
olap = CashupReportController()
@expose('cashup.templates.modal.cashier_add_report')
def cashier_add_report(self, team_alias):
""""""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
#reports = team.reports
#reports.reverse()
users = [ i.user for i in team.members ]
year_opt = [(x + 2000, x + 2000) for x in range(20)]
month_opt = [(x + 1, x +1) for x in range(12)]
day_opt = [(x + 1, x + 1) for x in range(31)]
today = datetime.now()
day = today.day
month = today.month
year = today.year
if user in users:
return dict(user=user, team=team, year_opt=year_opt,
month_opt=month_opt, day_opt=day_opt,
day=day, month=month, year=year)
@expose()
def cashier_add_from_report(self, report_id):
report = DBSession.query(Report).filter(Report.report_id==report_id).one()
kw=dict(team_alias=report.team.team_alias,
year=report.date.year,
month=report.date.month,
day=report.date.day)
return self.cashier_add(**kw)
@expose()
def cashier_add(self, *args, **kw):
"""
team_alias
year
month
day
"""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==kw.get('team_alias')).one()
users = [ i.user for i in team.members ]
if user in users:
report_id = self._add(team, **kw)
#try:
# cash_report = DBSession.query(CashReport).filter(and_(
# CashReport.report_id == report_id,
# CashReport.user_id == user.user_id,
# CashReport.cashier == True,
# )).one()
# cash_report_id = cash_report.cash_report_id
#except:
kw['report_id'] = report_id
kw['code'] = "Chiusura Cassiere: %s"%user.display_name
report_id, cash_report_id = self._add_cash_report(team, **kw)
cash_report = DBSession.query(CashReport).get(cash_report_id)
cash_report.user_id = user.user_id
cash_report.cashier = True
DBSession.add(cash_report)
DBSession.flush()
return redirect('/cashup/cashier_show/'+team.team_alias+'/'+str(cash_report_id ))
@expose('cashup.templates.cashier_index')
def cashier_index(self, team_alias, *args, **kw):
"""Handle the front-page."""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
return dict(team=team, user=user)
@expose('cashup.templates.cashier_show')
def cashier_show(self, team_alias, cash_report_id, *args, **kw):
"""Handle the front-page."""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
cash_report = DBSession.query(CashReport).filter(and_(
CashReport.cash_report_id==cash_report_id,
CashReport.user_id==user.user_id)).one()
return dict(team=team, cash_report=cash_report)
@expose('cashup.templates.cashup_export_weekly')
def export_week(self):
locations = DBSession.query(app_model.DimLocation).all()
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
return dict(locations=locations, user=user)
@expose(content_type='text/csv')
def export(self, year=None, week=None, saletype=None, week_end=None):
location_codes = []
kw =dict(
year = year,
week=week,
week_end=week_end,
location_codes=location_codes,
by_day=False,
by_location=True,
by_rvc=True,
by_daypart=True,
items=True,
only_revenue=True
)
if saletype == 'sales':
qfun = dly_sales_query
if saletype == 'sales_new':
qfun = dly_sales_query_fp
elif saletype=='salesmix':
qfun = dly_salesmix_query
elif saletype=='cogs':
qfun = wly_cogs_query
kw = dict(
year = year,
week=week,
week_end=week_end,
location_codes=location_codes,
)
#if location_code:
# location_codes = location_code.split(',')
# location_codes = [i.strip() for i in location_codes]
query_lst, groupby, fltr, ret, cols = qfun( DBSession,**kw)
ret2 = []
for r in ret:
t = []
for i in r:
if i == None:
t.append('')
else:
t.append(unicode(i).encode('utf-8').replace(',','.'))
ret2.append(t)
csvcontent=""
for r in ret2:
csvcontent=csvcontent + ",".join(r) + "\n"
fname="%s_%s_%s.csv"%(saletype, year, week)
if week_end:
fname=u"%s_%s_%s-%s.csv"%(saletype, year, week,week_end)
#response.headerlist.append((u'Content-Disposition','attachment;filename=%s'%fname))
return unicode(csvcontent.decode('latin-1'))
@expose('cashup.templates.index')
def index(self):
locations = DBSession.query(app_model.DimLocation).all()
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
#if len(user.teams) == 1:
# team = user.teams[0]
# return redirect(self.root_url+'/show/%s'%(team.team_alias))
#else:
# return dict(page='index', user=user, root_url = self.root_url)
return dict(locations=locations, user=user)
@expose('cashup.templates.cashup_banking_cashier')
def banking_cashier(self, team_alias=None, **kw):
locations_all = False
team = None
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
if team_alias:
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if True:#user in users:
start = kw.get('start')
end = kw.get('end')
start = kw.get('start')
end = kw.get('end')
if not end:
end = datetime.now()
if not start:
start = end - timedelta(days=35)
validated = int(kw.get('validated',0))
published = int(kw.get('published',0))
kw["published"] = published
acquired = int(kw.get('acquired',0))
qrand = []
qror = []
loc_ids = kw.get('location[]', [])
if not loc_ids:
locations_all = True
if team_alias and not kw.get('location[]'):
loc_ids=[team.location_id]
if not loc_ids:
loc_ids = [t.team.location_id for t in user.teams ]
if not type(loc_ids) == list:
loc_ids = [loc_ids]
kw['location[]'] = []
for i in loc_ids:
kw['location[]'].append(int(i))
if loc_ids:
for i in loc_ids:
qror.append(Report.team_id == int(i))
qrand = [or_(*qror)]
qrand2 = [
CashCurrencyTotal.cash_report_id == CashReport.cash_report_id,
#BankingCurrencyTotal.published == published,
#BankingCurrencyTotal.validated == validated,
#BankingCurrencyTotal.acquired == acquired,
]
if published == 1:
qrand2 += [CashReport.published == 1]
if published == 2:
qrand2 += [or_(CashReport.published == None,
CashReport.published == 0 )]
qrand = qrand + qrand2
if start:
qrand.append(Report.date >= start)
if end:
qrand.append(Report.date<=end)
bsuspended = DBSession.query(CashCurrencyTotal).outerjoin(CashReport, CashReport.cash_report_id==CashCurrencyTotal.cash_report_id)
bsuspended = bsuspended.outerjoin(Report, CashReport.report_id==Report.report_id).filter(
and_(*qrand)).order_by(Report.date)
if not start :
bsuspended=bsuspended.order_by(Report.date)
bsuspended = bsuspended.all()
bsuspended.reverse()
if locations_all:
kw['location[]'] = ''
return dict(team=team, user=user, bsuspended=bsuspended, kw=kw, action='_cashier')
@expose('cashup.templates.cashup_banking')
def banking(self, team_alias=None, **kw):
locations_all = False
team = None
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
if team_alias:
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if True:#user in users:
start = kw.get('start')
end = kw.get('end')
start = kw.get('start')
end = kw.get('end')
if not end:
end = datetime.now()
if not start:
start = end - timedelta(days=35)
validated = int(kw.get('validated',0))
published = int(kw.get('published',0))
acquired = int(kw.get('acquired',0))
qrand = []
qror = []
loc_ids = kw.get('location[]', [])
if not loc_ids:
locations_all = True
if team_alias and not kw.get('location[]'):
loc_ids=[team.location_id]
if not loc_ids:
loc_ids = [t.team.location_id for t in user.teams if not t.role == 'cashier']
if not type(loc_ids) == list:
loc_ids = [loc_ids]
kw['location[]'] = []
for i in loc_ids:
kw['location[]'].append(int(i))
if loc_ids:
for i in loc_ids:
qror.append(Report.team_id == int(i))
qrand = [or_(*qror)]
qrand2 = [
BankingCurrencyTotal.report_id == Report.report_id,
#BankingCurrencyTotal.published == published,
#BankingCurrencyTotal.validated == validated,
#BankingCurrencyTotal.acquired == acquired,
]
if published == 1:
qrand2 += [BankingCurrencyTotal.report_id2 > 0]
if published == 2:
qrand2 += [BankingCurrencyTotal.report_id2 == None]
qrand = qrand + qrand2
if start:
qrand.append(BankingCurrencyTotal.date >= start)
if end:
qrand.append(BankingCurrencyTotal.date<=end)
bsuspended = DBSession.query(BankingCurrencyTotal).outerjoin(Report, Report.report_id==BankingCurrencyTotal.report_id).filter(
and_(*qrand)).order_by(BankingCurrencyTotal.date2).order_by(BankingCurrencyTotal.date)
if not start :
bsuspended=bsuspended.order_by(BankingCurrencyTotal.date2).order_by(BankingCurrencyTotal.date).order_by()
print bsuspended
bsuspended = bsuspended.all()
bsuspended.reverse()
if locations_all:
kw['location[]'] = ''
return dict(team=team, user=user, bsuspended=bsuspended, kw=kw,action='')
#@paginate('reports', items_per_page=31)
@expose('cashup.templates.report_show_all')
def show_all(self, *args, **kw):
kw = get_dateobj(**kw)
"""Handle the front-page."""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
teams = [Report.team_id==m.team.location_id for m in user.teams ]
print user.teams
start = kw.get('start')
end = kw.get('end')
if not end:
end = datetime.now()
if not start:
start = end - timedelta(days=10)
qrand = [or_(*teams)]
if start:
qrand.append(Report.date >= start)
if end:
qrand.append(Report.date<=end)
reports = DBSession.query(Report).filter(and_(*qrand))
reports = reports.outerjoin(app_model.DimLocation, app_model.DimLocation.location_id==Report.team_id)
reports = reports.order_by(Report.date.asc(),app_model.DimLocation.location_code.asc()).all()
reports.reverse()
"""
for r in reports:
for t in totkeys:
if not totals.has_key(t):
totals[t] = 0
if hasattr(r,t):
totals[t] += getattr(r,t)
"""
flash_active=config.get("module.flash")
if flash_active == 'true':
flash_active=True
else:
flash_active=False
"""
report.dly = {'dsc_item': -93.89999771118164, 'dsc_sbtl': 0.25, 'covers': 350.0, 'location_code': u'0001',
'srv': 63.54999923706055, 'date': datetime.datetime(2015, 6, 5, 0, 0), 'sales_total': 1905.0499877929688,
'sales_srv_total': 1968.5999870300293, 'checks': 457.0}
"""
return dict(reports=reports,filter_action="/cashup/show_all",flash_active=flash_active)
#@paginate('reports', items_per_page=31)
@expose('cashup.templates.report_index')
def show(self, team_alias, *args, **kw):
kw = get_dateobj(**kw)
"""Handle the front-page."""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
role = _get_role(user.user_id, team.location_id)
if role == 'cashier':
return redirect('/cashup/cashier_index/%s'%(team_alias))
start = kw.get('start')
end = kw.get('end')
if not end:
end = datetime.now()
if not start:
start = end - timedelta(days=40)
qrand = [Report.team_id == team.location_id]
if start:
qrand.append(Report.date >= start)
if end:
qrand.append(Report.date<=end)
reports = DBSession.query(Report).filter(and_(*qrand))
reports = reports.order_by(Report.date.asc()).all()
reports.reverse()
totals = {}
totkeys = [ #'tot_invoices',
'fee', 'tot_input_errors', 'ticket','pos', 'credit_card', 'levy', 'tot_suspended', 'tot_others']
for r in reports:
for t in totkeys:
if not totals.has_key(t):
totals[t] = 0
if hasattr(r,t):
totals[t] += getattr(r,t)
flash_active=config.get("module.flash")
if flash_active == 'true':
flash_active=True
else:
flash_active=False
"""
report.dly = {'dsc_item': -93.89999771118164, 'dsc_sbtl': 0.25, 'covers': 350.0, 'location_code': u'0001',
'srv': 63.54999923706055, 'date': datetime.datetime(2015, 6, 5, 0, 0), 'sales_total': 1905.0499877929688,
'sales_srv_total': 1968.5999870300293, 'checks': 457.0}
"""
net_tot = 0
gross_tot = 0
for i in reports:
net_tot += i.net_total
gross_tot += i.total
return dict(team=team, reports=reports, filter_action="/cashup/show/%s"%team.team_alias, totals=totals, totkeys=totkeys, flash_active=flash_active, net_tot=net_tot, gross_tot=gross_tot)
@expose()
def cashier_publish_report(self, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==kw.get('team_alias')).one()
users = [ i.user for i in team.members ]
if user in users:
cash_report = DBSession.query(CashReport).filter_by(
cash_report_id=kw.get('cash_report_id'),
user_id=user.user_id,
).one()
cash_report.cash_report_serial=kw.get('cash_report_serial')
cash_report.published=True
print "==============", kw
DBSession.add(cash_report)
DBSession.flush()
return redirect('/cashup/cashier_show/%s/%s'%(kw.get('team_alias'), kw.get('cash_report_id')))
@expose()
def publish_report(self, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==kw.get('team_alias')).one()
users = [ i.user for i in team.members ]
if user in users:
report = DBSession.query(Report).filter_by(team_id=team.location_id,
report_id=kw.get('report_id')).one()
#report.published=True
flash_report=None
keys = []
flash_active=config.get("module.flash")
fnb_active=config.get('module.fourthfnbftp')
dho_active=config.get('module.dlydho')
if dho_active == 'false':
dho_active=False
try:
dateobj = DBSession.query(app_model.DimDate).filter_by(date=report.date).one()
except:
#raise
datedict = get_dim_date(report.year, report.month, report.day)
dateobj = app_model.DimDate()
for k,v in dateobj.iteritems():
setattr(dateobj, k, v)
DBsession.add(dateobj)
DBSession.flush()
try:
dhhobj = DBSession.query(app_model.FactDHH).filter(and_(
app_model.FactDHH.date==report.date,
app_model.FactDHH.location==team.team_alias)).one()
except:
dhhobj = app_model.FactDHH()
dhhobj.date = report.date
dhhobj.location = team.team_alias
DBSession.add(dhhobj)
DBSession.flush()
dhhobj.date_id = dateobj.date_id
logger.info(dhhobj)
if True: #flash_active == 'true':
flash_active=True
flash_report=QueryWTR.get(team.location_code, report.year, date=report.date)
day=flash_report['days'][0]
keys=flash_report['sales_keys']
day = flash_report['days'][0]
report.total_fiscal = report.tot_fee
if not team.net_sales:
report.total = flash_report['dlyrvc_sales'][0][day][keys[2]] or 0
report.net_total = flash_report['dlyrvc_taxrate_ttl'][0][day]['net_total']
elif team.dtr_vat:
report.total = float(report.tot_fee_dict2['gross'])
report.net_total = float(report.tot_fee_dict2['net'])
else:
report.total = float(report.tot_fee)
report.net_total = float(report.total-report.srv)*100/(100+float(team.net_sales))
#flash_report['dlyrvc_salesmix_ttl'][0][day]['net_total_ad'] or 0
#report.net_total = flash_report['dlyrvc_salesmix'][0][day][keys[1]]
#user.published_reports.append(report)
#alert_others(team=team, user=user, report=report)
#alert_diff(team=team, user=user, report=report)
#alert_published_report(team=team, user=user, report=report)
#alert_dhh(team=team, user=user, report=report)
notify=kw.get('notify')
if report.notified:
notify = 0
if dho_active:
QueryDLY().get_dho(report.report_id, notify=notify)
report.notified=1
if fnb_active:
try:
QueryDLY().put_ftpfnb(team.location_code,dhhobj.date)
except:
pass
report.published=True
DBSession.add(report)
DBSession.add(dhhobj)
DBSession.flush()
self.add_banking_partial(team_alias=team.team_alias, report_id=report.report_id, publish=True)
return redirect('/cashup/report_show/%s/%s'%(kw.get('team_alias'), kw.get('report_id')))
@expose()
def validate_report(self, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==kw.get('team_alias')).one()
users = [ i.user for i in team.members ]
if user in users:
report = DBSession.query(Report).filter_by(team_id=team.location_id,
report_id=kw.get('report_id')).one()
report.validated=True
#user.validated_reports.append(report)
#alert_validated_report(team=team, user=user, report=report)
DBSession.flush()
DBSession.add(report)
return redirect('/cashup/report_show/%s/%s'%(kw.get('team_alias'), kw.get('report_id')))
@expose()
def unpublish_report(self, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==kw.get('team_alias')).one()
users = [ i.user for i in team.members ]
if user in users:
report = DBSession.query(Report).filter_by(team_id=team.location_id,
report_id=kw.get('report_id')).one()
report.published=False
report.validated=False
#user.published_reports.append(report)
#alert_others(team=team, user=user, report=report)
#alert_diff(team=team, user=user, report=report)
#alert_published_report(team=team, user=user, report=report)
#alert_dhh(team=team, user=user, report=report)
DBSession.flush()
DBSession.add(report)
return redirect('/cashup/report_show/%s/%s'%(kw.get('team_alias'), kw.get('report_id')))
def _add(self, team, session=DBSession,**kw):
"""
{u'team_alias': u'LECCIO-FIRENZE', u'month': u'6', u'day': u'16', u'year': u'2014'}
"""
coins = session.query(Coin).all()
date = datetime(int(kw.get('year')), int(kw.get('month')), int(kw.get('day')))
try:
report = session.query(Report).filter_by(team_id=team.location_id, date=date).one()
report_id = report.report_id
except:
report = Report()
report = set_value(report, kw)
report.date = date
report_currency = DBSession.query(CashCurrency).all()
# add change rates for current report
for rc in report_currency:
cr = ChangeRate()
cr.changerate = rc.rate
rc.changerates.append(cr)
report.changerates.append(cr)
session.add(cr)
"""
for cash in team.cashes:
cash_report = CashReport()
#print "==="*10, dir(cash)
for vat in cash.vats:
vatfee = VatFee()
vatfee.amount = 0
vatfee.vat_amount = vat.amount
vatfee.code = vat.code
vatfee.vat_id = vat.vat_id
cash_report.vatfees.append(vatfee)
for coin in coins:
coinpz = CoinPz()
coinpz.amount = 0
coinpz.coin_amount = coin.amount
cash_report.coinspz.append(coinpz)
cash.cash_reports.append(cash_report)
report.cash_reports.append(cash_report)
"""
team.reports.append(report)
session.add(report)
session.flush()
report_id = report.report_id
#transaction.commit()
return report_id
def _add_cash_report(self, team, **kw):
"""
{u'team_alias': u'LECCIO-FIRENZE', u'month': u'6', u'day': u'16', u'year': u'2014'}
"""
coins = DBSession.query(Coin).all()
#date = datetime(int(kw.get('year')), int(kw.get('month')), int(kw.get('day')))
report = DBSession.query(Report).filter_by(report_id=kw['report_id']).one()
report_id = report.report_id
cash_report = CashReport()
#print "==="*10, dir(cash)
vats = DBSession.query(Vat).all()
cash = Cash()
cash.code = kw.get('code')
cash.team_id = team.location_id
#DBSession.flush()
for vat in vats:
vat.cashes.append(cash)
vatfee = VatFee()
vatfee.amount = 0
vatfee.vat_amount = vat.amount
vatfee.code = vat.code
vatfee.vat_id = vat.vat_id
cash_report.vatfees.append(vatfee)
for coin in coins:
coinpz = CoinPz()
coinpz.amount = 0
coinpz.coin_amount = coin.amount
cash_report.coinspz.append(coinpz)
cash.cash_reports.append(cash_report)
report.cash_reports.append(cash_report)
team.reports.append(report)
DBSession.add(report)
DBSession.flush()
report_id = report.report_id
#transaction.commit()
return report_id, cash_report.cash_report_id
@expose()
def add(self, **kw):
team_alias = kw['team_alias']
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report_id = self._add(team, **kw)
return redirect('/cashup/show/'+team_alias+'/'+str(report_id))
@expose()
def add_cash_report(self, **kw):
team_alias = kw['team_alias']
report_id = kw['report_id']
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report_id, cash_report_id = self._add_cash_report(team, **kw)
return redirect('/cashup/report_show/'+team_alias+'/'+str(report_id)+'#%s'%(cash_report_id))
@expose()
def add_banking_partial(self, team_alias=None, report_id=None, publish=False,**kw):
print "@@@@@@@@" , [kw], team_alias, report_id
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
ids = kw.get('ids')
if not ids:
ids = []
elif not type(ids) == list:
ids = [ids]
if user in users:
report = DBSession.query(Report).filter_by(report_id=report_id).one()
filter_or = [
CashCurrencyTotal.cashcurrencytotal_id == i for i in ids]
ret = DBSession.query(CashCurrencyTotal)
ret = ret.join(CashReport, CashReport.cash_report_id == CashCurrencyTotal.cash_report_id)
ret = ret.join(Report, Report.report_id == CashReport.report_id)
if filter_or:
ret = ret.filter(or_(*filter_or))
ret = ret.filter(and_(Report.report_id==report_id))# CashCurrencyTotal.bankingcurrency_id==None))
#ret = ret.all()
print "======== add banking partial", ret
banking = dict()
for b in ret:
if not b.cashcurrency_id in banking:
banking[b.cashcurrency_id] = []
banking[b.cashcurrency_id].append(b)
ccurrencies = [c.cashcurrency_id for c in DBSession.query(CashCurrency).all()]
for key in ccurrencies:
ret_curr = ret.filter(CashCurrencyTotal.cashcurrency_id==key)
filter_curr_or = [CashCurrencyTotal.bankingcurrency_id==None]
create_bnkobj = False
eod = False
if publish:
try:
bnkobj = DBSession.query(BankingCurrencyTotal).filter(
and_(
BankingCurrencyTotal.report_id==report_id,
BankingCurrencyTotal.date==report.date,
BankingCurrencyTotal.cashcurrency_id == key,
BankingCurrencyTotal.eod==True
)
).one()
filter_curr_or.append(CashCurrencyTotal.bankingcurrency_id==bnkobj.bankingcurrency_id)
eod = True
except:
create_bnkobj = True
eod = True
else:
create_bnkobj = True
eod = False
if create_bnkobj:
bnkobj = BankingCurrencyTotal(report_id=report_id,
date=report.date,
cashcurrency_id = key)
bnkobj.eod=eod
bnkobj.amount = 0
#banking.changerate = 0
#banking.amount2 = 0
for cctobj in ret_curr.filter(or_(*filter_curr_or)).all():
bnkobj.cashcurrencytotals.append(cctobj)
bnkobj.currency = cctobj.changerate.cashcurrency.alias
bnkobj.amount += cctobj.amount
#banking.amount2 += cctobj.amount2
#banking.changerate = cctobj.changerate
DBSession.add(bnkobj)
return redirect('/cashup/report_show/'+team_alias+'/'+str(report_id))
@expose()
def add_banking(self, team_alias=None, report_id=None, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report = DBSession.query(Report).filter_by(report_id=report_id).one()
"""
report.banking:
{1: {'amount2': 5149.8099999999995,
'amount': 5149.8099999999995,
'changerate': 1.0,
'currency': u'EUR'},
2: {'amount2': 0.0,
'amount': 2.0,
'changerate': 0.0,
'currency': u'USD'}}
"""
for key, values_dict in report.banking.iteritems():
try:
banking = DBSession.query(BankingCurrencyTotal).filter(
and_(
BankingCurrencyTotal.report_id == report.report_id,
BankingCurrencyTotal.date == report.date,
BankingCurrencyTotal.cashcurrency_id == key,
BankingCurrencyTotal.eod==True,
)).one()
except:
banking = BankingCurrencyTotal(report_id=report_id,
date=report.date,
cashcurrency_id = key,
eod=True)
for k, val in values_dict.iteritems():
if hasattr(banking, k ):
try:
setattr(banking, k, val)
except:
pass
DBSession.add(banking)
report.export
#report.published = 1
DBSession.add(report)
return redirect('/cashup/report_show/'+team_alias+'/'+str(report_id))
def add_banking_uuid(self, team_alias=None, banking_id=None, report_id=None, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report = DBSession.query(Report).filter_by(report_id=report_id).one()
@expose()
def add_banking2(self, team_alias=None, banking_id=None, report_id=None, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report = DBSession.query(Report).filter_by(report_id=report_id).one()
banking = DBSession.query(BankingCurrencyTotal).filter(and_(
BankingCurrencyTotal.bankingcurrency_id == banking_id,
)).one()
banking.serial = "%(serial1)s - %(serial2)s - %(serial3)s"%( kw)
banking.date2 = report.date
banking.report_id2 = report_id
banking.published = 1
banking.banking_uuid = kw.get('banking_uuid')
more_curr_tmp = config.get('cashup.more_currency', [])
if more_curr_tmp:
more_curr_tmp = more_curr_tmp.split(',')
more_curr = [team.team_currency]
for m in more_curr_tmp:
if m:
more_curr.append(m)
if banking.cashcurrency.alias in more_curr:
banking.validated = 1
banking.amount3 = banking.amount
#banking.acquired = 1
DBSession.add(banking)
return redirect('/cashup/report_show/'+team_alias+'/'+str(report_id)+'#banking')
@expose()
def add_banking3(self, team_alias=None, banking_id=None, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
banking = DBSession.query(BankingCurrencyTotal).filter(and_(
BankingCurrencyTotal.bankingcurrency_id == banking_id,
)).one()
banking.amount3 = kw.get('amount3')
banking.deposit_cost = kw.get('deposit_cost')
banking.validated = 1
DBSession.add(banking)
return redirect('/cashup/banking/'+team_alias)
def _get_pmnts(self, team, report):
pmnts = [(DebitCard,DebitCardTotal), (CreditCard, CreditCardTotal)]
df = QueryWTR.get_pmnt(team.location_code, report.date.year, date=report.date, pmntobj=CashCurrency, pmntobj_ttl=CashCurrencyTotal, dataframe=True, changerate=True)
#df = df.dropna()
for pmntobj, pmntobj_ttl in pmnts:
df_tmp = QueryWTR.get_pmnt(team.location_code, report.date.year, date=report.date, pmntobj=pmntobj, pmntobj_ttl=pmntobj_ttl, dataframe=True)
df_tmp['changerate'] = 1.00
df = df.append(df_tmp)
df_tmp = QueryWTR.get_pmnt(team.location_code, report.date.year, date=report.date, pmntobj=Ticket, pmntobj_ttl=TicketTotal, dataframe=True)
df_tmp['changerate'] = 1.00
df = df.append(df_tmp)
df_tmp = QueryWTR.get_pettycash(team.location_code, report.date.year, date=report.date, dataframe=True)
#df_tmp = df_tmp.groupby('date')['amount'].sum()
#df_tmp['amount'] = df_tmp.groupby('date')['amount'].transform(np.sum)
#print df_tmp.groupby('date')['amount'].transform(np.sum)
df_tmp['changerate'] = 1.00
df_tmp['code'] = '000'
df_tmp['alias'] = 'Petty Cash'
df = df.append(df_tmp)
df = df.dropna()
df['amount2'] = df['amount']*df['changerate']
try:
df['amount2'] = np.round(df['amount2'] , decimals=2)
df['amount'] = np.round(df['amount'], decimals=2)
except:
pass
df = df[['date','code','alias', 'changerate', 'amount', 'amount2']]
return df
#@paginate('reports', items_per_page=31)
@expose('json')
@expose('cashup.templates.report_show')
def report_show(self, team_alias, report_id, *args, **kw):
"""Handle the front-page."""
print "========================", [kw]
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
membership = 'user'
for m in team.members:
if m.user == user:
membership = m.role
report = DBSession.query(Report).filter(Report.report_id==report_id).one()
if membership == 'cashier':
return redirect('/cashup/cashier/%s/%s'%(team_alias,report_id))
membership = membership+str(report.published)
flash_active=config.get("module.flash")
flash_report=None
keys = []
if flash_active == 'true':
try:
flash_active=True
flash_report=QueryWTR.get(team.location_code, report.year, date=report.date)
day=flash_report['days'][0]
keys=flash_report['sales_keys']
except:
day = None
keys = None
flash_active = False
else:
flash_active=False
day = None
keys = None
checks_active=config.get("module.checks")
if checks_active == 'true':
checks_active=True
else:
checks_active=False
if config.get('module.cashup2'):
override_template(self.report_show, "genshi:cashup.templates.report_show_new")
try:
df_html = self._get_pmnts(team, report)
#df = pd.pivot_table(df, index=['code', 'alias'], values=['amount', 'amount2'])
#df_html = df.to_html(classes=['table', 'table-bordered', 'table-hover', 'table-striped'])
except:
df_html = ''
else:
df_html = ''
return dict(team=team, report=report, flash_active=flash_active, flash_report=flash_report, day=day, checks_active=checks_active, keys=keys, membership=membership, df_html=df_html)
@expose('json')
@expose('cashup.templates.report_add_banking')
def report_add_banking(self, team_alias, report_id, *args, **kw):
"""Handle the front-page."""
print "===========================", [kw]
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
membership = 'user'
for m in team.members:
if m.user == user:
membership = m.role
report = DBSession.query(Report).filter(Report.report_id==report_id).one()
membership = membership+str(report.published)
banking_uuid = str(uuid.uuid4())
return dict(team=team, report=report , membership=membership, banking_uuid=banking_uuid)
#@paginate('reports', items_per_page=31)
@expose('cashup.templates.report_show2')
def report_show2(self, team_alias, report_id, *args, **kw):
"""Handle the front-page."""
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
membership = 'user'
for m in team.members:
if m.user == user:
membership = m.role
report = DBSession.query(Report).filter(Report.report_id==report_id).one()
membership = membership+str(report.published)
flash_active=config.get("module.flash")
flash_report=None
keys = []
flash_active=True
flash_report=QueryWTR.get(team.location_code, report.year, date=report.date)
day=flash_report['days'][0]
keys=flash_report['sales_keys']
checks_active=config.get("module.checks")
if checks_active == 'true':
checks_active=True
else:
checks_active=False
return dict(team=team, report=report, flash_active=flash_active, flash_report=flash_report, day=day, checks_active=checks_active, keys=keys, membership=membership)
@expose()
def update_cash_report(self, **kw):
team_alias = kw.get('team_alias')
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
cash_report_id = kw.get('cash_report_id')
cash_report = DBSession.query(CashReport).filter_by(cash_report_id=cash_report_id).one()
total=0
for k,v in kw.iteritems():
if k.startswith('vatfee_'):
vatfee_id = k.split('vatfee_')[1]
vatfee = DBSession.query(VatFee).filter_by(vatfee_id=vatfee_id).one()
vatfee.amount=v
total = total + float(v)
cash_report.vatfees.append(vatfee)
if k.startswith('coinpz_'):
coinpz_id = k.split('coinpz_')[1]
coinpz = DBSession.query(CoinPz).filter_by(coinpz_id=coinpz_id).one()
coinpz.amount=v
cash_report.coinspz.append(coinpz)
if k == 'charge':
cash_report.srv_retention = float(v)-(float(v)/team.retention)
cash_report.srv_trunk = float(v)/team.retention
cash_report.srv_perc = team.retention
if k == 'gross_total':
total = v
print "================", v
if hasattr(cash_report, k):
setattr(cash_report, k, v)
if total >= 0:
cash_report.total = total
DBSession.add(cash_report)
return redirect('/cashup/report_show/%s/%s#%s'%(team_alias,
cash_report.report.report_id,
cash_report.cash_report_id))
@expose()
def update_salesmix(self, **kw):
print kw
team_alias = kw.get('team_alias')
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report_id = kw.get('report_id')
rvc = kw.get('rvc')
mj_code=kw.get('mj_code')
report = DBSession.query(Report).filter_by(report_id=report_id).one()
mj = DBSession.query(app_model.DimMajorgroup).filter(and_(
app_model.DimMajorgroup.default==1,
app_model.DimMajorgroup.majorgroup_ref_code==mj_code,
)).first()
location_code=team.location_code
cols, data = report.by_mi_rvc(date_from=report.date, date_to=report.date,rvcs=[rvc], location_codes=[team.location_code], mj_codes=[mj_code],adj=0)
dateobj = DBSession.query(app_model.DimDate).filter(app_model.DimDate.date==report.date).one()
data=data[0]
ret = dict(zip(cols,data))
gt_ad = kw.get('gross_total_ad')
gt_bd = kw.get('gross_total_bd')
nt_ad = kw.get('net_total_ad')
nt_bd = kw.get('net_total_bd')
query_adj = obj = DBSession.query(app_model.FactMicrosSALESMIX).filter(
and_(
app_model.FactMicrosSALESMIX.date==report.date,
app_model.FactMicrosSALESMIX.location_code==location_code,
app_model.FactMicrosSALESMIX.rvc_num==rvc,
app_model.FactMicrosSALESMIX.adj==True,
app_model.FactMicrosSALESMIX.mjr_grp_num==mj_code,
app_model.FactMicrosSALESMIX.mi_num==mj.mi_num))
try:
obj=query_adj.one()
except:
obj = app_model.FactMicrosSALESMIX()
obj.date=report.date
obj.business_date=report.date
obj.location_code=location_code
obj.rvc_num=rvc
obj.mjr_grp_num=mj.majorgroup_code
obj.adj=True
obj.mi_num = mj.mi_num
obj.fam_grp_num=mj.fam_grp_num
obj.fam_grp_name=mj.fam_grp_name
obj.mjr_grp_name=mj.majorgroup_ref
obj.mi_name=mj.mi_name
obj.sls_cnt=1
bd_ttl = float(gt_bd)-ret['gross_total_bd']
ad_ttl = float(gt_ad)-ret['gross_total_ad']
dsc_ttl = ad_ttl - bd_ttl
net_bd_ttl = float(nt_bd)-ret['net_total_bd']
net_ad_ttl = float(nt_ad)-ret['net_total_ad']
net_dsc_ttl = net_ad_ttl - net_bd_ttl
vat_ttl = bd_ttl - net_bd_ttl
dsc_vat_ttl = dsc_ttl - net_dsc_ttl
print "========", ret['gross_total_bd'], gt_bd, bd_ttl
obj.sls_ttl=bd_ttl
obj.vat_ttl=vat_ttl
obj.vat_ttl_ex=vat_ttl
obj.dsc_ttl=dsc_ttl
obj.date_id=dateobj.date_id
obj.dsc_vat_ttl=dsc_vat_ttl
obj.dsc_vat_ttl_ex=dsc_vat_ttl
DBSession.add(obj)
return redirect('/cashup/report_show/%s/%s#%s'%(team_alias,
report.report_id,
'flash'))
@expose()
def update_sales(self, **kw):
team_alias = kw.get('team_alias')
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report_id = kw.get('report_id')
rvc = kw.get('rvc')
report = DBSession.query(Report).filter_by(report_id=report_id).one()
dateobj = DBSession.query(app_model.DimDate).filter(app_model.DimDate.date==report.date).one()
cols, data = report.by_rvc(date_from=report.date, date_to=report.date,rvcs=[rvc], location_codes=[team.location_code], adj=0)
data=data[0]
location_code=team.location_code
srv=float(kw.get('srv'))
total=float(kw.get('total'))
checks=float(kw.get('checks'))
covers=kw.get('covers')
try:
otobj = DBSession.query(app_model.FactMicrosSALESOT).filter(
and_(
app_model.FactMicrosSALESOT.date==report.date,
app_model.FactMicrosSALESOT.location_code==location_code,
app_model.FactMicrosSALESOT.rvc_number==rvc,
app_model.FactMicrosSALESOT.adj==True,
)).one()
except:
obj = app_model.FactMicrosSALES()
obj.date=report.date
obj.business_date=report.date
obj.location_code=location_code
obj.rvc_number=rvc
DBSession.add(obj)
DBSession.flush()
otobj = app_model.FactMicrosSALESOT()
otobj.rvc_number=rvc
otobj.location_code=location_code
otobj.date=report.date
otobj.business_date=report.date
otobj.adj=True
obj.salesot.append(otobj)
otobj.check_count=float(checks)-float(data[6])
otobj.cover_count=float(covers)-float(data[5])
otobj.sales.service_chg_ttl=srv-float(data[1])
otobj.sales.net_sales_ttl=total-float(data[0])
otobj.date_id = dateobj.date_id
otobj.sales.date_id = dateobj.date_id
DBSession.add(otobj)
DBSession.flush()
return redirect('/cashup/report_show/%s/%s#%s'%(team_alias,
report.report_id,
'flash'))
@expose()
def update_changerate(self, **kw):
team_alias = kw.get('team_alias')
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report_id = kw.get('report_id')
report = DBSession.query(Report).filter_by(report_id=report_id).one()
for k,v in kw.iteritems():
if k.startswith('changerate_'):
changerate_id = k.split('changerate_')[1]
changerate = DBSession.query(ChangeRate).filter_by(changerate_id=changerate_id).one()
changerate.changerate=v
report.changerates.append(changerate)
DBSession.add(report)
return redirect('/cashup/report_show/%s/%s#changerate'%(team_alias,
report_id))
@expose()
def update_mgrcomments(self, key, **kw):
team_alias = kw.get('team_alias')
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
report_id = kw.get('report_id')
report = DBSession.query(Report).get(report_id)
try:
dhhobj = DBSession.query(app_model.FactDHH).filter(and_(
app_model.FactDHH.date==report.date,
app_model.FactDHH.location==team.team_alias)).one()
except:
#raise
dhhobj = app_model.FactDHH()
dhhobj.date = report.date
dhhobj.location = team.team_alias
dhhobj.report_id = report_id
setattr(dhhobj, "%s_note"%key, kw[key])
if kw.has_key('wrkdh'):
setattr(dhhobj, 'wrkdh', kw['wrkdh'])
DBSession.add(dhhobj)
return redirect('/cashup/report_show/%s/%s#mgrcomments'%(team_alias,
report_id))
@expose()
def cashier_insert_relation(self, rel_type, **kw):
team_alias = kw.get('team_alias')
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
cash_report_id = kw.get('cash_report_id')
cash_report = DBSession.query(CashReport).filter_by(cash_report_id=cash_report_id).one()
cashcurrencytotal = CashCurrencyTotal()
kw['user_id']=user.user_id
#kw['witness_id']=kw.get('witness[]')
kw['serial'] = "%s"%(kw['serial'])
cashcurrencytotal = set_value(cashcurrencytotal, kw)
cash_report.cashcurrencytotals.append(cashcurrencytotal)
DBSession.add(cashcurrencytotal)
return redirect('/cashup/cashier_show/%s/%s'%(team_alias,
cash_report.cash_report_id))
@expose()
def insert_relation(self, rel_type, **kw):
team_alias = kw.get('team_alias')
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
cash_report_id = kw.get('cash_report_id')
cash_report = DBSession.query(CashReport).filter_by(cash_report_id=cash_report_id).one()
if rel_type == 'tot_input_errors':
input_error = InputError()
input_error = set_value(input_error, kw)
cash_report.input_errors.append(input_error)
DBSession.add(cash_report)
elif rel_type == 'tot_suspended':
suspended = Suspended()
input_error = set_value(suspended, kw)
cash_report.suspended.append(suspended)
DBSession.add(suspended)
elif rel_type == 'tot_invoices':
invoice = Invoice()
invoice = set_value(invoice, kw)
cash_report.invoices.append(invoice)
DBSession.add(invoice)
elif rel_type == 'tot_others':
other = Other()
other = set_value(other, kw)
other.fieldothers_id = kw.get('section')
cash_report.others.append(other)
DBSession.add(other)
elif rel_type.startswith('credit_card'):
creditcardtotal = CreditCardTotal()
creditcardtotal = set_value(creditcardtotal, kw)
cash_report.creditcardtotals.append(creditcardtotal)
DBSession.add(creditcardtotal)
elif rel_type == 'ticket':
tickettotal = TicketTotal()
tickettotal = set_value(tickettotal, kw)
cash_report.tickettotals.append(tickettotal)
DBSession.add(tickettotal)
elif rel_type == 'pos':
debitcardtotal = DebitCardTotal()
debitcardtotal = set_value(debitcardtotal, kw)
cash_report.debitcardtotals.append(debitcardtotal)
DBSession.add(debitcardtotal)
elif rel_type == 'levy':
cashcurrencytotal = CashCurrencyTotal()
cashcurrencytotal = set_value(cashcurrencytotal, kw)
cash_report.cashcurrencytotals.append(cashcurrencytotal)
DBSession.add(cashcurrencytotal)
return redirect('/cashup/report_show/%s/%s#%s'%(team_alias,
cash_report.report.report_id,
cash_report.cash_report_id))
@expose()
def insert_cogs(self, **kw):
print kw
team_alias = kw.get('team_alias')
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
year = kw.get("year")
week = kw.get("week")
mj_id = kw.get("majorgroup")
mj = DBSession.query(app_model.DimMajorgroup).get(mj_id)
cogs = app_model.FactCogs()
cogs.mjr_grp_num = mj.majorgroup_ref_code
cogs.mjr_grp_name = mj.majorgroup_ref
cogs.team_id = team.location_id
cogs.location_code=team.location_code
for k, v in kw.iteritems():
if hasattr(cogs, k):
setattr(cogs, k , v)
DBSession.add(cogs)
return redirect('/cashup/wtr_show/%s/%s/%s#cogs'%(team_alias,
year, week))
@expose()
def delete_cogs(self, team_alias, year, week, cogs_id, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
cogs = DBSession.query(app_model.FactCogs).get(cogs_id)
DBSession.delete(cogs)
return redirect('/cashup/wtr_show/%s/%s/%s#cogs'%(team_alias,
year, week))
@expose()
def cashier_del_relation(self, team_alias, cash_report_id, rel_type, rel_id, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
cash_report = DBSession.query(CashReport).filter_by(cash_report_id=cash_report_id).one()
ret = DBSession.query(CashCurrencyTotal).filter_by(cashcurrencytotal_id=rel_id)
DBSession.delete(ret.one())
return redirect('/cashup/cashier_show/%s/%s'%(team_alias,
cash_report.cash_report_id))
@expose()
def del_relation(self, team_alias, cash_report_id, rel_type, rel_id, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
cash_report = DBSession.query(CashReport).filter_by(cash_report_id=cash_report_id).one()
if rel_type == 'tot_input_errors':
ret = DBSession.query(InputError).filter_by(input_error_id=rel_id)
if rel_type == 'tot_invoices':
ret = DBSession.query(Invoice).filter_by( invoice_id=rel_id)
if rel_type == 'tot_suspended':
ret = DBSession.query(Suspended).filter_by( suspended_id=rel_id)
if rel_type == 'tot_others':
ret = DBSession.query(Other).filter_by( other_id=rel_id)
if rel_type == 'pos':
ret = DBSession.query(DebitCardTotal).filter_by( debitcardtotal_id=rel_id)
if rel_type == 'credit_card':
ret = DBSession.query(CreditCardTotal).filter_by( creditcardtotal_id=rel_id)
if rel_type == 'ticket':
ret = DBSession.query(TicketTotal).filter_by( tickettotal_id=rel_id)
elif rel_type == 'levy':
ret = DBSession.query(CashCurrencyTotal).filter_by(cashcurrencytotal_id=rel_id)
DBSession.delete(ret.one())
return redirect('/cashup/report_show/%s/%s#%s'%(team_alias,
cash_report.report.report_id,
cash_report.cash.number))
#@paginate('reports', items_per_page=31)
@expose('cashup.templates.wtr_index')
def wtr_index(self, team_alias):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
cols, ret = QueryWTR.get_all(team_alias)
return dict(page='/cashup/wtr_index', team=team, ret=ret, cols=cols, team_alias=team_alias)
#@paginate('reports', items_per_page=31)
@expose('cashup.templates._wtr_show')
def _wtr_show(self, team_alias, year, week):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
dsow ,rvcs,mjs,ret,\
cols,ret_ttl,cols_mi,\
ret_mi,cols_mi_ttl,\
ret_mi_cons,cols_mi_cons, \
ret_mi_ttl,\
cols_sales, sales,cols_salesmix, salesmix,\
cols_salesmix_cons, salesmix_cons,\
sales_ttl,\
salesmix_ttl,\
ret_tax = QueryWTR.get(team.location_code, year, week)
rvcs = [{'rvc_name': u'TOTAL', 'rvc_number': 0}]+rvcs
ret_mi[0] = salesmix
ret[0]=sales
ret_mi_cons[0] = salesmix_cons
ret_mi_ttl[0]=salesmix_ttl
ret_ttl[0]=sales_ttl
cogs = QueryWTR.get_cogs(team.location_code, year, week)
return dict(page='/cashup/wtr_show', ret=ret, dsow=dsow, cols=cols,
rvcs=rvcs, ret_ttl=ret_ttl, ret_mi=ret_mi, cols_mi=cols_mi,
mjs=mjs, team_alias=team_alias, week=week, year=year, team=team,
ret_mi_ttl=ret_mi_ttl, cols_mi_ttl=cols_mi_ttl,
ret_mi_cons=ret_mi_cons, cols_mi_cons=cols_mi_cons,
sales = sales,
cols_sales = cols_sales,
salesmix = salesmix,
cols_salesmix = cols_salesmix,
tax=ret_tax)
#@paginate('reports', items_per_page=31)
@expose('cashup.templates.wtr_show')
def wtr_show(self, team_alias, year, week):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
try:
wtr = DBSession.query(Wtr).filter_by(location_code=team.location_code,
week=week, year=year).one()
except:
wtr = Wtr()
wtr.year=year
wtr.week=week
wtr.location_code = team.location_code
DBSession.add(wtr)
DBSession.flush()
ret = QueryWTR.get(team.location_code, year, week)
ret['team'] = team
ret['team_alias'] = team_alias
ret['week'] = week
ret['year'] = year
cogs_cols, cogs = QueryWTR.get_cogs(team.location_code, year, week)
cogs_cols_ttl, cogs_ttl = QueryWTR.get_cogs(team.location_code, year, week, mjs=False)
cogs_ttl = dict(zip(cogs_cols_ttl, cogs_ttl[0]))
for k in cogs_ttl.keys():
if not cogs_ttl[k]:
cogs_ttl[k] = 0
bdgly_cols, bdgly_lst = QueryWTR.get_bdgly(team.location_code, year, week)
bdgly_cols_ttl, bdgly_ttl = QueryWTR.get_bdgly(team.location_code, year, week, rvc=False)
bdgly_ttl = dict(zip(bdgly_cols_ttl, bdgly_ttl[0]))
pmnts = []
try:
pmnts.append(QueryWTR.get_pmnt(team.location_code, year, week, pmntobj=Ticket, pmntobj_ttl=TicketTotal) )
except:
pass
try:
pmnts.append(QueryWTR.get_pmnt(team.location_code, year, week, pmntobj=CreditCard, pmntobj_ttl=CreditCardTotal) )
except:
pass
try:
pmnts.append(QueryWTR.get_pmnt(team.location_code, year, week, pmntobj=DebitCard, pmntobj_ttl=DebitCardTotal) )
except:
pass
try:
pmnts.append(QueryWTR.get_pmnt(team.location_code, year, week, pmntobj=CashCurrency, pmntobj_ttl=CashCurrencyTotal) )
except:
pass
from pprint import pprint
#pprint(pmnt_cols)
#pprint(pmnts)
for k in cogs_ttl.keys():
if not cogs_ttl[k]:
cogs_ttl[k] = 0
bdgly=dict()
for row in bdgly_lst:
rdict = dict(zip(bdgly_cols, row))
bdgly[rdict['rvc_number']] = rdict
try:
pettycash = QueryWTR.get_pettycash(team.location_code, year, week)
print pettycash
except:
pettycash = None
return dict(page='/cashup/wtr_show', cogs_cols=cogs_cols,
cogs=cogs,cogs_ttl=cogs_ttl,
bdgly=bdgly, bdgly_ttl=bdgly_ttl,
pmnts=pmnts,
pettycash=pettycash,
wtr=wtr,
**ret )
@expose()
def publish_wtr(self, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==kw.get('team_alias')).one()
users = [ i.user for i in team.members ]
if user in users:
location_code = team.location_code
week = kw.get('week')
year = kw.get('year')
try:
wtr = DBSession.query(Wtr).filter_by(location_code=location_code,
week=week, year=year).one()
except:
wtr = Wtr()
wtr.year=year
wtr.week=week
wtr.location_code = team.location_code
#notify=kw.get('notify')
wtr.published=True
DBSession.add(wtr)
DBSession.flush()
return redirect('/cashup/wtr_show/%s/%s/%s'%(kw.get('team_alias'), year, week))
def _checkout_accounts(self, location_code, year, week, date_from, date_to, **kw):
ret = []
accounts_data = {}
# get salesmix amounts
attributes,aggregates, df =data.getfacts_df('salesmix', [[location_code]], [], [], srv=srv,date_from=date_from, date_to=date_to,drilldown=['item'])
records = df.to_dict('records')
mapper = {'1': '1', '2': '3', '3': '17', '4': '4', '6': '5'}
for rec in records:
obj_dict = dict(location_code=location_code, year=year, week=week)
from pprint import pprint
pprint(rec)
dim_value = DBSession.query(app_model.DimAccounts).filter_by(accounts_id=mapper.get(rec['item.mjr_id'])).one()
for key , val in dim_value.__dict__.iteritems():
if not key.startswith('_') and not key.endswith('_id'):
obj_dict[key] = val
obj_dict['credit_amount']=rec['gross_sales_sum']
accounts_data[obj_dict['order']] = obj_dict
# subtract 0 taxrate to food
attributes,aggregates, df =data.getfacts_df('taxrate', [[location_code]], [], [], srv=srv,date_from=date_from, date_to=date_to,drilldown=['tax'])
records = df.to_dict('records')
rec = None
for zerorate in records:
if zerorate['tax.tax_id'] == 'V00':
rec = zerorate
obj_dict = dict(location_code=location_code, year=year, week=week)
dim_value = DBSession.query(app_model.DimAccounts).filter_by(accounts_id=2).one()
for key , val in dim_value.__dict__.iteritems():
if not key.startswith('_') and not key.endswith('_id'):
obj_dict[key] = val
obj_dict['credit_amount']=rec['gsales_sum']
accounts_data[1]['credit_amount'] = accounts_data[1]['credit_amount'] - obj_dict['credit_amount']
accounts_data[obj_dict['order']] = obj_dict
# service
attributes,aggregates, df =data.getfacts_df('sales', [[location_code]], [], [], srv=srv,date_from=date_from, date_to=date_to,drilldown=['date'])
records = df.to_dict('records')
service = records[0].get('srv_sum', 0)
obj_dict = dict(location_code=location_code, year=year, week=week)
dim_value = DBSession.query(app_model.DimAccounts).filter_by(accounts_id=18).one()
for key , val in dim_value.__dict__.iteritems():
if not key.startswith('_') and not key.endswith('_id'):
obj_dict[key] = val
obj_dict['credit_amount']=service
accounts_data[obj_dict['order']] = obj_dict
#cash currency
codes, ret, ret_ttl = self.get_pmnt(location_code, year, week, session=DBSession, pmntobj=CashCurrency, pmntobj_ttl=CashCurrencyTotal)
obj_dict = dict(location_code=location_code, year=year, week=week)
dim_value = DBSession.query(app_model.DimAccounts).filter_by(accounts_id=6).one()
for key , val in dim_value.__dict__.iteritems():
if not key.startswith('_') and not key.endswith('_id'):
obj_dict[key] = val
obj_dict['debit_amount']=ret_ttl.get((u'GBP', u'GBP'),0)
accounts_data[obj_dict['order']] = obj_dict
# vouchers
obj_dict = dict(location_code=location_code, year=year, week=week)
dim_value = DBSession.query(app_model.DimAccounts).filter_by(accounts_id=8).one()
for key , val in dim_value.__dict__.iteritems():
if not key.startswith('_') and not key.endswith('_id'):
obj_dict[key] = val
obj_dict['debit_amount']=ret_ttl.get((u'100', u'OTHER'), 0)
accounts_data[obj_dict['order']] = obj_dict
# petty cash
ret_ttl = self.get_pettycash(location_code, year, week, session=DBSession)
obj_dict = dict(location_code=location_code, year=year, week=week)
dim_value = DBSession.query(app_model.DimAccounts).filter_by(accounts_id=7).one()
for key , val in dim_value.__dict__.iteritems():
if not key.startswith('_') and not key.endswith('_id'):
obj_dict[key] = val
obj_dict['debit_amount']=ret_ttl['All']
accounts_data[obj_dict['order']] = obj_dict
# add debit amount Credit Cards
codes, ret, ret_ttl = self.get_pmnt(location_code, year, week, session=DBSession, pmntobj=CreditCard, pmntobj_ttl=CreditCardTotal)
obj_dict = dict(location_code=location_code, year=year, week=week)
dim_value = DBSession.query(app_model.DimAccounts).filter_by(accounts_id=9).one()
for key , val in dim_value.__dict__.iteritems():
if not key.startswith('_') and not key.endswith('_id'):
obj_dict[key] = val
obj_dict['debit_amount']=ret_ttl[('All', '')]
accounts_data[obj_dict['order']] = obj_dict
# add debit amount Amex
codes, ret, ret_ttl = self.get_pmnt(location_code, year, week, session=DBSession, pmntobj=DebitCard, pmntobj_ttl=DebitCardTotal)
obj_dict = dict(location_code=location_code, year=year, week=week)
dim_value = DBSession.query(app_model.DimAccounts).filter_by(accounts_id=10).one()
for key , val in dim_value.__dict__.iteritems():
if not key.startswith('_') and not key.endswith('_id'):
obj_dict[key] = val
obj_dict['debit_amount']=ret_ttl[('All', '')]
accounts_data[obj_dict['order']] = obj_dict
#pprint(accounts_data)
return accounts_data
def _merge_accounts(self, checkout):
"""
Merge Cashup and Budget Branches
"""
keys = checkout.keys()
keys.sort()
ret = []
for key in keys:
ret.append(checkout[key])
index = [
u'order',
u'year',
u'week',
u'location_code',
u'code',
u'description',
u'debit_amount',
u'credit_amount',
u'posting_type',
u'bus_posting_group',
u'prod_posting_group',
u'vat_bus_posting_group',
u'vat_prod_posting_group',
]
df = pd.DataFrame(ret)
df = df[index]
return df
def get_pettycash(self, location_code, year, week=None, date=None, session=DBSession, pandas=False):
# QUERY DAYS OF WEEK
fltr = [
app_model.DimDate.year==year,
]
if week:
fltr=fltr+[
app_model.DimDate.week_year==week,
]
if date:
fltr=fltr+[
app_model.DimDate.date==date,
]
days = session.query(app_model.DimDate).filter(
and_(*fltr)).order_by(app_model.DimDate.date).all()
date_from=days[0]
date_to=days[len(days)-1]
location_codes = [location_code]
groupby = [
app_model.DimDate.date,
FieldOther.value,
Other.note,
]
fltr_locations = []
for loc_code in location_codes:
fltr_locations.append(
app_model.DimLocation.location_code == loc_code,
)
if fltr_locations:
fltr.append(or_(*fltr_locations))
query_lst = groupby + [
label('amount', Other.amount),
]
ret = session.query(*query_lst).group_by(*groupby)
ret = ret.outerjoin(Report,
app_model.DimDate.date == Report.date
)
ret = ret.outerjoin(app_model.DimLocation,
Report.team_id == app_model.DimLocation.location_id
)
ret = ret.outerjoin(CashReport,
CashReport.report_id == Report.report_id
)
ret = ret.outerjoin(Other,
Other.cash_report_id == CashReport.cash_report_id
)
ret = ret.outerjoin(FieldOther,
FieldOther.fieldothers_id == Other.fieldothers_id
)
ret = ret.filter(and_(*fltr)).all()
cols = _get_cols(query_lst)
ret_pmnt = []
for row in ret:
ret_pmnt.append(dict(zip(cols, row)))
import pandas as pd
import numpy as np
df = pd.DataFrame(ret, columns=cols)
df.fillna(value=np.nan)
#print pd.unique([df.code, df.alias])
if pandas:
table = pd.pivot_table(df, index=['date','note'], columns=['value'], values=['amount'],aggfunc=[np.sum], margins=True)
table = np.round(table, decimals=2)
return table
table = pd.pivot_table(df, index=['note'], columns=['value'], values=['amount'],aggfunc=[np.sum], margins=True)
table = np.round(table, decimals=2)
from pprint import pprint
tbl = table.to_dict()
#print table
ret = {}
ret_ttl = tbl[('sum', 'amount', 'All')]
return ret_ttl
def get_pmnt(self, location_code, year, week=None, date=None, session=DBSession, pmntobj=None, pmntobj_ttl=None, pandas=False):
# QUERY DAYS OF WEEK
pmntobj_id = "%s_id"%pmntobj.__name__.lower()
fltr = [
app_model.DimDate.year==year,
]
if week:
fltr=fltr+[
app_model.DimDate.week_year==week,
]
if date:
fltr=fltr+[
app_model.DimDate.date==date,
]
days = session.query(app_model.DimDate).filter(
and_(*fltr)).order_by(app_model.DimDate.date).all()
date_from=days[0]
date_to=days[len(days)-1]
location_codes = [location_code]
groupby = [
app_model.DimDate.date,
getattr(pmntobj,'code'),
getattr(pmntobj,'alias'),
]
fltr_locations = []
for loc_code in location_codes:
fltr_locations.append(
app_model.DimLocation.location_code == loc_code,
)
if fltr_locations:
fltr.append(or_(*fltr_locations))
query_lst = groupby + [
label('amount', func.sum(getattr(pmntobj_ttl, 'amount'))),
]
ret = session.query(*query_lst).group_by(*groupby)
ret = ret.outerjoin(Report,
app_model.DimDate.date == Report.date
)
ret = ret.outerjoin(app_model.DimLocation,
Report.team_id == app_model.DimLocation.location_id
)
ret = ret.outerjoin(CashReport,
CashReport.report_id == Report.report_id
)
ret = ret.outerjoin(pmntobj_ttl,
pmntobj_ttl.cash_report_id == CashReport.cash_report_id
)
ret = ret.outerjoin(pmntobj,
getattr(pmntobj, pmntobj_id) == getattr(pmntobj_ttl, pmntobj_id)
)
ret = ret.filter(and_(*fltr)).all()
cols = _get_cols(query_lst)
ret_pmnt = []
for row in ret:
ret_pmnt.append(dict(zip(cols, row)))
df = pd.DataFrame(ret, columns=cols)
#np.round(df, decimals=2)
if pandas:
return df
#print pd.unique([df.code, df.alias])
table = pd.pivot_table(df, index=['code','alias'], columns=['date'], values=['amount'],aggfunc=[np.sum], margins=True)
table = np.round(table, decimals=2)
from pprint import pprint
tbl = table.to_dict()
ret = {}
ret_ttl = tbl[('sum', 'amount', 'All')]
codes = session.query(getattr(pmntobj,'code'),getattr(pmntobj,'alias')).order_by(getattr(pmntobj,'code')).all()
#print "****",codes
for d in days:
try:
row=tbl[('sum', 'amount', d.date)]
except:
row={('All',''):0.0}
ret[d]=row
#print dir(table)
#print tbl
return codes, ret, ret_ttl
@expose()
def wtr_account(self, team_alias, year, week, **kw):
user= DBSession.query(app_model.User).filter(app_model.User.user_name==request.identity['repoze.who.userid']).one()
team = DBSession.query(app_model.DimLocation).filter(app_model.DimLocation.team_alias==team_alias).one()
users = [ i.user for i in team.members ]
if user in users:
bio = BytesIO()
# By setting the 'engine' in the ExcelWriter constructor.
writer = pd.ExcelWriter(bio, engine='xlsxwriter')
location_code = team.location_code
dobjs = DBSession.query(app_model.DimDate).filter(and_(app_model.DimDate.year==year, app_model.DimDate.week_year==week)).order_by(app_model.DimDate.date).all()
d1 = dobjs[0].date
d2 = dobjs[len(dobjs)-1].date
date_from = [str(d1.year), str(d1.month), str(d1.day)]
date_to = [str(d2.year), str(d2.month), str(d2.day) ]
#writer = pd.ExcelWriter('%s-%s-%s.xlsx'%(year,week,location_code), engine='xlsxwriter')
checkout = []
#for d in dates:
# #checkout = checkout + self.checkout(options, d, d)
# pprint(self.checkout(options, d, d))
checkout = self._checkout_accounts(location_code, year, week, date_from, date_to)
df = self._merge_accounts(checkout)
#np.round(df, decimals=2)
df.to_excel(writer, sheet_name='Accounts')
table = self.get_pettycash(location_code, year, week, pandas=True)
table.to_excel(writer, sheet_name='Pettycash')
frames = []
try:
df1 = self.get_pmnt(location_code, year, week, pmntobj=Ticket, pmntobj_ttl=TicketTotal, pandas=True)
df1['TYPE'] = 'TICKET'
#df1.to_excel(writer, sheet_name='TICKET')
frames.append(df1)
except:
pass
try:
df2 = self.get_pmnt(location_code, year, week, pmntobj=CreditCard, pmntobj_ttl=CreditCardTotal,pandas=True)
df2['TYPE'] = 'CARD 01'
frames.append(df2)
#df2.to_excel(writer, sheet_name='CARD 01')
except:
pass
try:
df3 = self.get_pmnt(location_code, year, week, pmntobj=DebitCard, pmntobj_ttl=DebitCardTotal, pandas=True)
df3['TYPE'] = 'CARD 02'
frames.append(df3)
#df3.to_excel(writer, sheet_name='CARD 02')
except:
pass
try:
df4 = self.get_pmnt(location_code, year, week, pmntobj=CashCurrency, pmntobj_ttl=CashCurrencyTotal, pandas=True)
df4['TYPE'] = 'CASH'
frames.append(df4)
#df4.to_excel(writer, sheet_name='CASH')
except:
pass
result = pd.concat(frames)
table = pd.pivot_table(result, index=['TYPE'], columns=['date'], values=['amount'],aggfunc=[np.sum], margins=True)
#for tp in table.index.get_level_values(0).unique():
# ntb = table.xs(tp, level=0)#
# ntb.to_excel(writer, sheet_name='Payments')
# Safe import for either Python 2.x or 3.x
table.to_excel(writer, sheet_name='Payments')
print frames
for df in frames:
df = df[pd.notnull(df['amount'])]
try:
tbl = pd.pivot_table(df, index=['alias'], columns=['date'], values=['amount'],aggfunc=[np.sum], margins=True)
print df.iloc[0]['TYPE']
tbl.to_excel(writer, sheet_name=df.iloc[0]['TYPE'])
except:
pass
#writer.save()
# Seek to the beginning and read to copy the workbook to a variable in memory
#bio.seek(0)
#workbook = bio.read()
# Save the workbook
writer.save()
# Seek to the beginning and read to copy the workbook to a variable in memory
bio.seek(0)
workbook = bio.read()
response.content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
response.headerlist.append(('Content-Disposition','attachment;filename=export.xlsx'))
return workbook
| mit |
samfu1994/cs838webpage | stage5/src/boxplot_demo.py | 1 | 4501 | """
Thanks Josh Hemann for the example
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
# Generate some data from five different probability distributions,
# each with different characteristics. We want to play with how an IID
# bootstrap resample of the data preserves the distributional
# properties of the original sample, and a boxplot is one visual tool
# to make this assessment
numDists = 5
randomDists = ['Normal(1,1)', ' Lognormal(1,1)', 'Exp(1)', 'Gumbel(6,4)',
'Triangular(2,9,11)']
N = 500
np.random.seed(0)
norm = np.random.normal(1, 1, N)
logn = np.random.lognormal(1, 1, N)
expo = np.random.exponential(1, N)
gumb = np.random.gumbel(6, 4, N)
tria = np.random.triangular(2, 9, 11, N)
# Generate some random indices that we'll use to resample the original data
# arrays. For code brevity, just use the same random indices for each array
bootstrapIndices = np.random.random_integers(0, N - 1, N)
normBoot = norm[bootstrapIndices]
expoBoot = expo[bootstrapIndices]
gumbBoot = gumb[bootstrapIndices]
lognBoot = logn[bootstrapIndices]
triaBoot = tria[bootstrapIndices]
data = [norm, normBoot, logn, lognBoot, expo, expoBoot, gumb, gumbBoot,
tria, triaBoot]
for item in data:
print(item)
fig, ax1 = plt.subplots(figsize=(10, 6))
fig.canvas.set_window_title('A Boxplot Example')
plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = plt.boxplot(data, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.set_title('Comparison of IID Bootstrap Resampling Across Five Distributions')
ax1.set_xlabel('Distribution')
ax1.set_ylabel('Value')
# Now fill the boxes with desired colors
boxColors = ['darkkhaki', 'royalblue']
numBoxes = numDists*2
medians = list(range(numBoxes))
for i in range(numBoxes):
box = bp['boxes'][i]
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = list(zip(boxX, boxY))
# Alternate between Dark Khaki and Royal Blue
k = i % 2
boxPolygon = Polygon(boxCoords, facecolor=boxColors[k])
ax1.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
plt.plot(medianX, medianY, 'k')
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
plt.plot([np.average(med.get_xdata())], [np.average(data[i])],
color='w', marker='*', markeredgecolor='k')
# Set the axes ranges and axes labels
ax1.set_xlim(0.5, numBoxes + 0.5)
top = 40
bottom = -5
ax1.set_ylim(bottom, top)
xtickNames = plt.setp(ax1, xticklabels=np.repeat(randomDists, 2))
plt.setp(xtickNames, rotation=45, fontsize=8)
# Due to the Y-axis scale being different across samples, it can be
# hard to compare differences in medians across the samples. Add upper
# X-axis tick labels with the sample medians to aid in comparison
# (just use two decimal places of precision)
pos = np.arange(numBoxes) + 1
upperLabels = [str(np.round(s, 2)) for s in medians]
weights = ['bold', 'semibold']
for tick, label in zip(range(numBoxes), ax1.get_xticklabels()):
k = tick % 2
ax1.text(pos[tick], top - (top*0.05), upperLabels[tick],
horizontalalignment='center', size='x-small', weight=weights[k],
color=boxColors[k])
# Finally, add a basic legend
plt.figtext(0.80, 0.08, str(N) + ' Random Numbers',
backgroundcolor=boxColors[0], color='black', weight='roman',
size='x-small')
plt.figtext(0.80, 0.045, 'IID Bootstrap Resample',
backgroundcolor=boxColors[1],
color='white', weight='roman', size='x-small')
plt.figtext(0.80, 0.015, '*', color='white', backgroundcolor='silver',
weight='roman', size='medium')
plt.figtext(0.815, 0.013, ' Average Value', color='black', weight='roman',
size='x-small')
plt.show() | mit |
xyguo/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
nesterione/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
largelymfs/w2vtools | build/scipy/scipy/interpolate/fitpack2.py | 5 | 55068 | """
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
from __future__ import division, print_function, absolute_import
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'LSQSphereBivariateSpline',
'SmoothSphereBivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline']
import warnings
from numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones
import numpy as np
from . import fitpack
from . import dfitpack
################ Univariate spline ####################
_curfit_messages = {1:"""
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2:"""
A theoretically impossible result was found during the iteration
proces for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3:"""
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
class UnivariateSpline(object):
"""
One-dimensional smoothing spline fit to a given set of data points.
Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. `s`
specifies the number of knots by specifying a smoothing condition.
Parameters
----------
x : (N,) array_like
1-D array of independent input data. Must be increasing.
y : (N,) array_like
1-D array of dependent input data, of the same length as `x`.
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be <= 5.
s : float or None, optional
Positive smoothing factor used to choose the number of knots. Number
of knots will be increased until the smoothing condition is satisfied:
sum((w[i]*(y[i]-s(x[i])))**2,axis=0) <= s
If None (default), s=len(w) which should be a good value if 1/w[i] is
an estimate of the standard deviation of y[i]. If 0, spline will
interpolate through all data points.
See Also
--------
InterpolatedUnivariateSpline : Subclass with smoothing forced to 0
LSQUnivariateSpline : Subclass in which knots are user-selected instead of
being set by smoothing condition
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> from numpy import linspace,exp
>>> from numpy.random import randn
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import UnivariateSpline
>>> x = linspace(-3, 3, 100)
>>> y = exp(-x**2) + randn(100)/10
>>> s = UnivariateSpline(x, y, s=1)
>>> xs = linspace(-3, 3, 1000)
>>> ys = s(xs)
>>> plt.plot(x, y, '.-')
>>> plt.plot(xs, ys)
>>> plt.show()
xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y.
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
s - positive smoothing factor defined for
estimation condition:
sum((w[i]*(y[i]-s(x[i])))**2,axis=0) <= s
Default s=len(w) which should be a good value
if 1/w[i] is an estimate of the standard
deviation of y[i].
"""
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=s)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
@classmethod
def _from_tck(cls, tck):
"""Construct a spline object from given tck"""
self = cls.__new__(cls)
t, c, k = tck
self._eval_args = tck
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = (None,None,None,None,None,k,None,len(t),t,
c,None,None,None,None)
return self
def _reset_class(self):
data = self._data
n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1]
self._eval_args = t[:n],c[:n],k
if ier == 0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier == -1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier == -2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier == 1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k,m = data[5],len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
if not n <= nest:
raise ValueError("`nest` can only be increased")
t, c, fpint, nrdata = [np.resize(data[j], nest) for j in [8,9,11,12]]
args = data[:8] + (t,c,n,fpint,nrdata,data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
"""
data = self._data
if data[6] == -1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=0):
""" Evaluate spline (or its nu-th derivative) at positions x.
Note: x can be unordered but the evaluation is more efficient
if x is (partially) ordered.
"""
x = np.asarray(x)
# empty input yields empty output
if x.size == 0:
return array([])
# if nu is None:
# return dfitpack.splev(*(self._eval_args+(x,)))
# return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))
return fitpack.splev(x, self._eval_args, der=nu)
def get_knots(self):
""" Return positions of (boundary and interior) knots of the spline.
"""
data = self._data
k,n = data[5],data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k,n = data[5],data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline
approximation: ``sum((w[i] * (y[i]-s(x[i])))**2, axis=0)``.
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two given points.
"""
return dfitpack.splint(*(self._eval_args+(a,b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x."""
d,ier = dfitpack.spalde(*(self._eval_args+(x,)))
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k == 3:
z,m,ier = dfitpack.sproot(*self._eval_args[:2])
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return z[:m]
raise NotImplementedError('finding roots unsupported for '
'non-cubic splines')
def derivative(self, n=1):
"""
Construct a new spline representing the derivative of this spline.
.. versionadded:: 0.13.0
Parameters
----------
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k-n representing the derivative of this
spline.
See Also
--------
splder, antiderivative
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = UnivariateSpline(x, y, k=4, s=0)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> spl.derivative().roots() / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\pi/2 + n\pi` of `cos(x) = sin'(x)`.
"""
tck = fitpack.splder(self._eval_args, n)
return UnivariateSpline._from_tck(tck)
def antiderivative(self, n=1):
"""
Construct a new spline representing the antiderivative of this spline.
.. versionadded:: 0.13.0
Parameters
----------
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k+n representing the antiderivative of this
spline.
See Also
--------
splantider, derivative
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = UnivariateSpline(x, y, s=0)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> spl(1.7), spl.antiderivative().derivative()(1.7)
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = spl.antiderivative()
>>> ispl(np.pi/2) - ispl(0)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
tck = fitpack.splantider(self._eval_args, n)
return UnivariateSpline._from_tck(tck)
class InterpolatedUnivariateSpline(UnivariateSpline):
"""
One-dimensional interpolating spline for a given set of data points.
Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. Spline
function passes through all provided points. Equivalent to
`UnivariateSpline` with s=0.
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
input dimension of data points
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), bbox=[x[0],x[-1]].
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
See Also
--------
UnivariateSpline : Superclass -- allows knots to be selected by a
smoothing condition
LSQUnivariateSpline : spline for which knots are user-selected
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> from numpy import linspace,exp
>>> from numpy.random import randn
>>> from scipy.interpolate import InterpolatedUnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = linspace(-3, 3, 100)
>>> y = exp(-x**2) + randn(100)/10
>>> s = InterpolatedUnivariateSpline(x, y)
>>> xs = linspace(-3, 3, 1000)
>>> ys = s(xs)
>>> plt.plot(x, y, '.-')
>>> plt.plot(xs, ys)
>>> plt.show()
xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
"""
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=0)
self._reset_class()
class LSQUnivariateSpline(UnivariateSpline):
"""
One-dimensional spline with explicit internal knots.
Fits a spline y=s(x) of degree `k` to the provided `x`, `y` data. `t`
specifies the internal knots of the spline
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
Input dimension of data points
t : (M,) array_like
interior knots of the spline. Must be in ascending order
and bbox[0]<t[0]<...<t[-1]<bbox[-1]
w : (N,) array_like, optional
weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), bbox=[x[0],x[-1]].
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
Raises
------
ValueError
If the interior knots do not satisfy the Schoenberg-Whitney conditions
See Also
--------
UnivariateSpline : Superclass -- knots are specified by setting a
smoothing condition
InterpolatedUnivariateSpline : spline passing through all points
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> from numpy import linspace,exp
>>> from numpy.random import randn
>>> from scipy.interpolate import LSQUnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = linspace(-3,3,100)
>>> y = exp(-x**2) + randn(100)/10
>>> t = [-1,0,1]
>>> s = LSQUnivariateSpline(x,y,t)
>>> xs = linspace(-3,3,1000)
>>> ys = s(xs)
>>> plt.plot(x, y, '.-')
>>> plt.plot(xs, ys)
>>> plt.show()
xs,ys is now a smoothed, super-sampled version of the noisy gaussian x,y
with knots [-3,-1,0,1,3]
"""
def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3):
"""
Input:
x,y - 1-d sequences of data points (x must be
in strictly ascending order)
t - 1-d sequence of the positions of user-defined
interior knots of the spline (t must be in strictly
ascending order and bbox[0]<t[0]<...<t[-1]<bbox[-1])
Optional input:
w - positive 1-d sequence of weights
bbox - 2-sequence specifying the boundary of
the approximation interval.
By default, bbox=[x[0],x[-1]]
k=3 - degree of the univariate spline.
"""
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb = bbox[0]
xe = bbox[1]
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
t = concatenate(([xb]*(k+1),t,[xe]*(k+1)))
n = len(t)
if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0,axis=0):
raise ValueError('Interior knots t must satisfy '
'Schoenberg-Whitney conditions')
data = dfitpack.fpcurfm1(x,y,k,t,w=w,xb=xb,xe=xe)
self._data = data[:-3] + (None,None,data[-1])
self._reset_class()
################ Bivariate spline ####################
class _BivariateSplineBase(object):
""" Base class for Bivariate spline s(x,y) interpolation on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
BivariateSpline :
implementation of bivariate spline interpolation on a plane grid
SphereBivariateSpline :
implementation of bivariate spline interpolation on a spherical grid
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
def __call__(self, x, y, mth=None, dx=0, dy=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
x, y : array-like
Input coordinates.
If `grid` is False, evaluate the spline at points ``(x[i],
y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays x, y. The arrays must be
sorted to increasing order.
dx : int
Order of x-derivative
.. versionadded:: 0.14.0
dy : int
Order of y-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
mth : str
Deprecated argument. Has no effect.
"""
x = np.asarray(x)
y = np.asarray(y)
if mth is not None:
warnings.warn("The `mth` argument is deprecated and will be removed",
FutureWarning)
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
if grid:
if x.size == 0 or y.size == 0:
return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.parder(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by parder: %s" % ier)
else:
z,ier = dfitpack.bispev(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
else:
# standard Numpy broadcasting
if x.shape != y.shape:
x, y = np.broadcast_arrays(x, y)
shape = x.shape
x = x.ravel()
y = y.ravel()
if x.size == 0 or y.size == 0:
return np.zeros(shape, dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.pardeu(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by pardeu: %s" % ier)
else:
z,ier = dfitpack.bispeu(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
z = z.reshape(shape)
return z
_surfit_messages = {1:"""
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2:"""
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3:"""
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4:"""
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5:"""
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3:"""
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(_BivariateSplineBase):
"""
Base class for bivariate splines.
This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
of data points ``(x, y, z)``.
This class is meant to be subclassed, not instantiated directly.
To construct these splines, call either `SmoothBivariateSpline` or
`LSQBivariateSpline`.
See Also
--------
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline :
to create a BivariateSpline through the given points
LSQBivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
SphereBivariateSpline :
bivariate spline interpolation in spherical cooridinates
bisplrep : older wrapping of FITPACK
bisplev : older wrapping of FITPACK
"""
def ev(self, xi, yi, dx=0, dy=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(xi[i], yi[i]),
i=0,...,len(xi)-1``.
Parameters
----------
xi, yi : array-like
Input coordinates. Standard Numpy broadcasting is obeyed.
dx : int
Order of x-derivative
.. versionadded:: 0.14.0
dy : int
Order of y-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb)
class SmoothBivariateSpline(BivariateSpline):
"""
Smooth bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
w : array_like, optional
Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
LSQUnivariateSpline : to create a BivariateSpline using weighted
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
eps=None):
xb,xe,yb,ye = bbox
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=1)
if ier > 10: # lwrk2 was to small, re-run
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
message = _surfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx,ky
class LSQBivariateSpline(BivariateSpline):
"""
Weighted least-squares bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
tx, ty : array_like
Strictly ordered 1-D sequences of knots coordinates.
w : array_like, optional
Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
bbox : (4,) array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline : create a smoothing BivariateSpline
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
eps=None):
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
tx1 = zeros((nx,),float)
ty1 = zeros((ny,),float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb,xe,yb,ye = bbox
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=1)
if ier > 10:
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
if ier < -2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1, ty1, c
self.degrees = kx, ky
class RectBivariateSpline(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default is ``s=0``, which is for interpolation.
See Also
--------
SmoothBivariateSpline : a smoothing bivariate spline for scattered data
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
x, y = ravel(x), ravel(y)
if not all(diff(x) > 0.0):
raise TypeError('x must be strictly increasing')
if not all(diff(y) > 0.0):
raise TypeError('y must be strictly increasing')
if not ((x.min() == x[0]) and (x.max() == x[-1])):
raise TypeError('x must be strictly ascending')
if not ((y.min() == y[0]) and (y.max() == y[-1])):
raise TypeError('y must be strictly ascending')
if not x.size == z.shape[0]:
raise TypeError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise TypeError('y dimension of z must have same number of '
'elements as y')
z = ravel(z)
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s)
if not ier in [0, -1, -2]:
msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
class SphereBivariateSpline(_BivariateSplineBase):
"""
Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
given set of data points (theta,phi,r).
.. versionadded:: 0.11.0
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothUnivariateSpline :
to create a BivariateSpline through the given points
LSQUnivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
"""
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
theta, phi : array-like
Input coordinates.
If `grid` is False, evaluate the spline at points
``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
Numpy broadcasting is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays theta, phi. The arrays
must be sorted to increasing order.
dtheta : int
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int
Order of phi-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
theta = np.asarray(theta)
phi = np.asarray(phi)
if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
raise ValueError("requested theta out of bounds.")
if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
raise ValueError("requested phi out of bounds.")
return _BivariateSplineBase.__call__(self, theta, phi,
dx=dtheta, dy=dphi, grid=grid)
def ev(self, theta, phi, dtheta=0, dphi=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(theta[i], phi[i]),
i=0,...,len(theta)-1``.
Parameters
----------
theta, phi : array-like
Input coordinates. Standard Numpy broadcasting is obeyed.
dtheta : int
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int
Order of phi-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
class SmoothSphereBivariateSpline(SphereBivariateSpline):
"""
Smooth bivariate spline approximation in spherical coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
w : array_like, optional
Positive 1-D sequence of weights.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if 1/w[i] is an
estimate of the standard deviation of r[i].
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object
>>> lats, lons = np.meshgrid(theta, phi)
>>> from scipy.interpolate import SmoothSphereBivariateSpline
>>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(),s=3.5)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2 * np.pi, 90)
>>> data_smth = lut(fine_lats, fine_lons)
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_smth, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
r, w=w, s=s,
eps=eps)
if not ier in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
self.degrees = (3, 3)
class LSQSphereBivariateSpline(SphereBivariateSpline):
"""
Weighted least-squares bivariate spline approximation in spherical
coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
tt, tp : array_like
Strictly ordered 1-D sequences of knots coordinates.
Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
w : array_like, optional
Positive 1-D sequence of weights, of the same length as `theta`, `phi`
and `r`.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object. Here, we must also specify the
coordinates of the knots to use.
>>> lats, lons = np.meshgrid(theta, phi)
>>> knotst, knotsp = theta.copy(), phi.copy()
>>> knotst[0] += .0001
>>> knotst[-1] -= .0001
>>> knotsp[0] += .0001
>>> knotsp[-1] -= .0001
>>> from scipy.interpolate import LSQSphereBivariateSpline
>>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(),knotst,knotsp)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2*np.pi, 90)
>>> data_lsq = lut(fine_lats, fine_lons)
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_lsq, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, np_ = 8 + len(tt), 8 + len(tp)
tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
tt_[4:-4], tp_[4:-4] = tt, tp
tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
w=w, eps=eps)
if ier < -2:
deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier
message = _spherefit_messages.get(-3) % (deficiency, -ier)
warnings.warn(message)
elif not ier in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_, tp_, c
self.degrees = (3, 3)
_spfit_messages = _surfit_messages.copy()
_spfit_messages[10] = """
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)<u(i)< pi,i=2,..,mu,
-pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
0<tu(5)<tu(6)<...<tu(nu-4)< pi
8<=nv<=min(nvest,mv+7)
v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
the schoenberg-whitney conditions, i.e. there must be
subset of grid co-ordinates uu(p) and vv(q) such that
tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
(iopt(2)=1 and iopt(3)=1 also count for a uu-value
tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
(vv(q) is either a value v(j) or v(j)+2*pi)
if iopt(1)>=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned."""
class RectSphereBivariateSpline(SphereBivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh on a sphere.
Can be used for smoothing data.
.. versionadded:: 0.11.0
Parameters
----------
u : array_like
1-D array of latitude coordinates in strictly ascending order.
Coordinates must be given in radians and lie within the interval
(0, pi).
v : array_like
1-D array of longitude coordinates in strictly ascending order.
Coordinates must be given in radians, and must lie within (0, 2pi).
r : array_like
2-D array of data with shape ``(u.size, v.size)``.
s : float, optional
Positive smoothing factor defined for estimation condition
(``s=0`` is for interpolation).
pole_continuity : bool or (bool, bool), optional
Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
will be 1 or 0 when this is True or False, respectively.
Defaults to False.
pole_values : float or (float, float), optional
Data values at the poles ``u=0`` and ``u=pi``. Either the whole
parameter or each individual element can be None. Defaults to None.
pole_exact : bool or (bool, bool), optional
Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
value is considered to be the right function value, and it will be
fitted exactly. If False, the value will be considered to be a data
value just like the other data values. Defaults to False.
pole_flat : bool or (bool, bool), optional
For the poles at ``u=0`` and ``u=pi``, specify whether or not the
approximation has vanishing derivatives. Defaults to False.
See Also
--------
RectBivariateSpline : bivariate spline approximation over a rectangular
mesh
Notes
-----
Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
least-squares spline approximation is not implemented yet.
When actually performing the interpolation, the requested `v` values must
lie within the same length 2pi interval that the original `v` values were
chosen from.
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
Examples
--------
Suppose we have global data on a coarse grid
>>> lats = np.linspace(10, 170, 9) * np.pi / 180.
>>> lons = np.linspace(0, 350, 18) * np.pi / 180.
>>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
We want to interpolate it to a global one-degree grid
>>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
>>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
>>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
We need to set up the interpolator object
>>> from scipy.interpolate import RectSphereBivariateSpline
>>> lut = RectSphereBivariateSpline(lats, lons, data)
Finally we interpolate the data. The `RectSphereBivariateSpline` object
only takes 1-D arrays as input, therefore we need to do some reshaping.
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
Looking at the original and the interpolated data, one can see that the
interpolant reproduces the original data very well:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(212)
>>> ax2.imshow(data_interp, interpolation='nearest')
>>> plt.show()
Chosing the optimal value of ``s`` can be a delicate task. Recommended
values for ``s`` depend on the accuracy of the data values. If the user
has an idea of the statistical errors on the data, she can also find a
proper estimate for ``s``. By assuming that, if she specifies the
right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
reproduces the function underlying the data, she can evaluate
``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
For example, if she knows that the statistical errors on her
``r(i,j)``-values are not greater than 0.1, she may expect that a good
``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
be determined by trial and error. The best is then to start with a very
large value of ``s`` (to determine the least-squares polynomial and the
corresponding upper bound ``fp0`` for ``s``) and then to progressively
decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
shows more detail) to obtain closer fits.
The interpolation results for different values of ``s`` give some insight
into this process:
>>> fig2 = plt.figure()
>>> s = [3e9, 2e9, 1e9, 1e8]
>>> for ii in xrange(len(s)):
>>> lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
>>> ax = fig2.add_subplot(2, 2, ii+1)
>>> ax.imshow(data_interp, interpolation='nearest')
>>> ax.set_title("s = %g" % s[ii])
>>> plt.show()
"""
def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
pole_exact=False, pole_flat=False):
iopt = np.array([0, 0, 0], dtype=int)
ider = np.array([-1, 0, -1, 0], dtype=int)
if pole_values is None:
pole_values = (None, None)
elif isinstance(pole_values, (float, np.float32, np.float64)):
pole_values = (pole_values, pole_values)
if isinstance(pole_continuity, bool):
pole_continuity = (pole_continuity, pole_continuity)
if isinstance(pole_exact, bool):
pole_exact = (pole_exact, pole_exact)
if isinstance(pole_flat, bool):
pole_flat = (pole_flat, pole_flat)
r0, r1 = pole_values
iopt[1:] = pole_continuity
if r0 is None:
ider[0] = -1
else:
ider[0] = pole_exact[0]
if r1 is None:
ider[2] = -1
else:
ider[2] = pole_exact[1]
ider[1], ider[3] = pole_flat
u, v = np.ravel(u), np.ravel(v)
if not np.all(np.diff(u) > 0.0):
raise TypeError('u must be strictly increasing')
if not np.all(np.diff(v) > 0.0):
raise TypeError('v must be strictly increasing')
if not u.size == r.shape[0]:
raise TypeError('u dimension of r must have same number of '
'elements as u')
if not v.size == r.shape[1]:
raise TypeError('v dimension of r must have same number of '
'elements as v')
if pole_continuity[1] is False and pole_flat[1] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
if pole_continuity[0] is False and pole_flat[0] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
r = np.ravel(r)
nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
u.copy(), v.copy(), r.copy(), r0, r1, s)
if not ier in [0, -1, -2]:
msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
self.degrees = (3, 3)
| mit |
operalib/operalib | operalib/metrics.py | 2 | 1391 | """
:mod:`operalib.metrics` define upplementary metrics not
available in scikit-learn
"""
# Author: Romain Brault <[email protected]> with help from
# the scikit-learn community.
# License: MIT
from numpy import log, pi, sqrt, exp, cos
from sklearn.metrics.pairwise import check_pairwise_arrays, euclidean_distances
def first_periodic_kernel(X, Y=None, gamma=None, period=None):
# TODO: Add mathematical form of the kernel in the docstring
"""Compute the first periodic kernel between *X* and *Y*.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, default to 1.0 / n_samples_X
period : float, default None
If None, default to 2 * pi.
This parameter should not be default as
wrong estimation lead to poor learning score.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 0.8
if period is None:
period = 2. * pi
a = -log(gamma) / period
b = 2 * pi / period
c = sqrt(pi / a) * (exp(- b ** 2 / (4 * a)) + 1)
K = euclidean_distances(X, Y, squared=True)
# TODO: Optimize to avoid temporary?
return exp(-a * K) * (1 + cos(b * sqrt(K))) / c
| bsd-3-clause |
lzamparo/SdA_reduce | plot_scripts/validation_set_NOP10_ggplot_densities.py | 1 | 4334 | # coding: utf-8
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
from common_density_plot_utils import *
#################### The script part to generate the plots, and find the limits ####################
import pickle as pkl
from numpy.random import shuffle
# Grab the headers for the AreaShape features
as_header_file = open("/data/NOP10/Cells_headers.txt")
as_list = as_header_file.readlines()
as_header_file.close()
as_headers = [item.strip().split()[1] for item in as_list]
as_positions = [item.strip().split()[0] for item in as_list]
# Select data from samples using the as_headers as well as 'Label'
as_headers.append('Label')
# Grab the labeled data, randomly sub-sample one of each labeled files with stratification
labeled_examples_pd = pd.DataFrame.from_csv('/data/NOP10/Phenotypes_Nucleolus_Samples_TS2.csv',index_col=False)
label_groups = labeled_examples_pd.groupby('Label')['FileName']
indices = [shuffle(v) for k, v in label_groups.groups.iteritems()]
indices = [v[0] for k, v in label_groups.groups.iteritems()]
sample_labeled_files = labeled_examples_pd.iloc[indices,:]
labeled_files = pd.unique(sample_labeled_files['FileName'])
plates = pd.unique(sample_labeled_files['Plate'])
# Grab the data for what labeled FileNames we have, keep only those
data_reader = pd.read_csv('/data/NOP10/SQL_Image_Object_GeneNames_Merged_TS2_NoGhosts.csv',index_col=5,iterator=True,chunksize=50000)
labeled_data = None
for chunk in data_reader:
chunk['ImageNumber'] = chunk.index
#labeled_file_pts = chunk[chunk['FileName'].isin(labeled_files) & chunk['Plate'].isin()]
labeled_file_pts = pd.merge(chunk, sample_labeled_files, on=["Plate","FileName"])
# skip chunks with no data from the files we've selected
if len(labeled_file_pts) == 0:
continue
# merge the labeled_file_pts with the labels of their matching FileNames
#labeled_data_pts = pd.merge(labeled_file_pts, labeled_files, on='FileName')
if labeled_data is None:
labeled_data = labeled_file_pts.loc[:,as_headers]
else:
labeled_data = labeled_data.append(labeled_file_pts.loc[:,as_headers], ignore_index=True)
# Go through the features, calculate the thresholds
thresholds = {}
as_headers.remove("Label")
for feature in as_headers:
wt_mean = labeled_data[feature].where(labeled_data['Label'] == 'negative').mean()
wt_std = labeled_data[feature].where(labeled_data['Label'] == 'negative').std()
lower,upper = wt_mean - 2*wt_std, wt_mean + 2*wt_std
thresholds[feature] = (lower,upper)
# Pickle the thresholds, along with their column positions
filename = as_headers[0].split('_')[0] + "_" + "nop10"+ "_" + "thresholds.pkl"
pkl.dump((zip(as_positions,as_headers),thresholds), open(filename,'wb'))
# Pickle the labeled_data sample
filename = "NOP10_labeled_df.pkl"
pkl.dump((labeled_data),open(filename,'wb'))
#################### Plot the data and thresholds ####################
(ph, thresholds) = pkl.load(open("Cells_nop10_thresholds.pkl", mode='rb'))
labeled_data = pkl.load(open("NOP10_labeled_df.pkl", mode='rb'))
# We only care about these labels
labels_used = np.unique(labeled_data['Label']).tolist()
# Try a faceted density plot for each feature
fig = plt.figure(figsize=(24,11))
for n,key in enumerate(thresholds.keys()):
lower,upper = thresholds[key]
sp = fig.add_subplot(2,7,n+1)
x_vals = make_x_axis(labeled_data[labeled_data['Label'] == "negative"][key])
# plot all labels worth of densities, as well as the thresholds
for label in labels_used:
data = labeled_data[labeled_data['Label'] == label][key]
kde = make_kde(data)
rfill_between(sp, x_vals, kde(x_vals),label)
sp.set_title(key.split('_')[-1])
sp.axvline(lower,ls='--',color='k')
sp.axvline(upper,ls='--',color='k')
rstyle(sp)
# Put a legend below current axis
sp.legend(loc='upper center', bbox_to_anchor=(-3.35, -0.05),
fancybox=True, shadow=True, ncol=len(labels_used)/2)
# Put a title on the main figure
fig.suptitle("NOP10: Area and Shape Parameter Density Plots by Label (with 2 x std WT dashed)",fontsize=20)
fig.subplots_adjust(left=.03, right=.97, top=0.91,hspace=0.14,wspace=0.27)
plt.show()
| bsd-3-clause |
studywolf/blog | train_AHF/plot_error.py | 1 | 4956 | '''
Copyright (C) 2016 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
import matplotlib.pyplot as plt
import glob
import sys
import seaborn
def gen_data_plot(folder="weights", index=None, show_plot=True,
save_plot=None, save_paths=False, verbose=True):
files = sorted(glob.glob('%s/rnn*' % folder))
files = files[:index] if index is not None else files
# plot the values over time
vals = []
for ii, name in enumerate(files):
if verbose:
print(name)
name = name.split('err')[1]
name = name.split('.npz')[0]
vals.append(float(name))
vals = np.array(vals)
plt.figure(figsize=(10, 3))
ax = plt.subplot2grid((1, 3), (0, 0), colspan=2)
ax.loglog(vals)
ax.loglog(range(len(vals)), np.ones(len(vals)) * min(vals), 'r--')
ax.loglog(range(len(vals)), np.ones(len(vals)) * min(vals), 'r--')
plt.xlim([0, len(files)])
plt.ylim([10**-5, 10])
plt.title('AHF training error')
plt.xlabel('Training iterations')
plt.ylabel('Error')
plt.yscale('log')
# load in the weights and see how well they control the arm
dt = 1e-2
sig_len = 40
# HACK: append system path to have access to the arm code
# NOTE: Change this path to wherever your plant model is kept!
sys.path.append("../../../studywolf_control/studywolf_control/")
# from arms.two_link.arm_python import Arm as Arm
from arms.three_link.arm import Arm as Arm
if verbose:
print('Plant is: %s' % str(Arm))
arm = Arm(dt=dt)
from hessianfree import RNNet
from hessianfree.nonlinearities import (Tanh, Linear)
from train_hf_3link import PlantArm, gen_targets
rec_coeff = [1, 1]
rec_type = "sparse"
eps = 1e-6
num_states = arm.DOF * 2
targets = gen_targets(arm, sig_len=sig_len)
init_state = np.zeros((len(targets), num_states), dtype=np.float32)
init_state[:, :arm.DOF] = arm.init_q # set up the initial joint angles
plant = PlantArm(arm, targets=targets,
init_state=init_state, eps=eps)
index = -1 if index is None else index
W = np.load(files[index])['arr_0']
# make sure this network is the same as the one you trained!
net_size = 96
if '32' in folder:
net_size = 32
rnn = RNNet(shape=[num_states * 2,
net_size,
net_size,
num_states,
num_states],
layers=[Linear(), Tanh(), Tanh(), Linear(), plant],
debug=False,
rec_layers=[1, 2],
conns={0: [1, 2], 1: [2], 2: [3], 3: [4]},
W_rec_params={"coeff": rec_coeff, "init_type": rec_type},
load_weights=W,
use_GPU=False)
rnn.forward(plant, rnn.W)
states = np.asarray(plant.get_vecs()[0][:, :, num_states:])
targets = np.asarray(plant.get_vecs()[1])
def kin(q):
x = np.sum([arm.L[ii] * np.cos(np.sum(q[:, :ii+1], axis=1))
for ii in range(arm.DOF)], axis=0)
y = np.sum([arm.L[ii] * np.sin(np.sum(q[:, :ii+1], axis=1))
for ii in range(arm.DOF)], axis=0)
return x,y
ax = plt.subplot2grid((1, 3), (0, 2))
# plot start point
initx, inity = kin(init_state)
ax.plot(initx, inity, 'x', mew=10)
for jj in range(0, len(targets)):
# plot target
targetx, targety = kin(targets[jj])
ax.plot(targetx, targety, 'rx', mew=1)
# plat path
pathx, pathy = kin(states[jj, :, :])
path = np.hstack([pathx[:, None], pathy[:, None]])
if save_paths is True:
np.savez_compressed('end-effector position%.3i.npz' % int(jj/8),
array1=path)
ax.plot(path[:, 0], path[:, 1])
plt.tight_layout()
# plt.xlim([-.1, .1])
# plt.ylim([.25, .45])
plt.title('Hand trajectory')
plt.xlabel('x')
plt.ylabel('y')
if save_plot is not None:
plt.savefig(save_plot)
if show_plot is True:
plt.show()
plt.close()
if __name__ == '__main__':
if len(sys.argv) < 2:
folder = "weights"
else:
folder = sys.argv[1]
if len(sys.argv) < 3:
index = None
else:
index = int(sys.argv[2])
gen_data_plot(folder=folder, index=index)
| gpl-3.0 |
alshedivat/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 30 | 70017 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testEstimatorWithCoreFeatureColumns(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(iris.data[:, i], dtype=dtypes.float32),
[-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [fc_core.numeric_column(str(i)) for i in range(4)]
linear_features = [
fc_core.bucketized_column(
cont_features[i],
sorted(set(test_data.get_quantile_based_buckets(
iris.data[:, i], 10)))) for i in range(4)
]
linear_features.append(
fc_core.categorical_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
global_step = classifier.get_variable_value('global_step')
if global_step == 100:
# Expected is 100, but because of the global step increment bug, is 50.
# Occasionally, step increments one more time due to a race condition,
# reaching 51 steps.
self.assertIn(step_counter.steps, [50, 51])
else:
# Occasionally, training stops when global_step == 102, due to a race
# condition. In addition, occasionally step increments one more time due
# to a race condition reaching 52 steps.
self.assertIn(step_counter.steps, [51, 52])
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=110)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=110)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
mehdidc/scikit-learn | sklearn/tests/test_calibration.py | 2 | 11711 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method,
random_state=42)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
| bsd-3-clause |
enram/infrastructure | file_transfer/ad_hoc_management/bash_file_info.py | 2 | 1281 | #
# S. Van Hoey
#
# ENRAM meeting 2017-01
#
import sys
import argparse
import h5py
import pandas as pd
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filename', type=str,
help='filename of hdf5 file to check')
args = parser.parse_args()
filename = args.filename
quantities = []
elangles = []
shapes = []
hf = h5py.File(filename, 'r')
gen_info = dict(hf.get("what").attrs.items())
for key, val in hf.items():
if hf.get("/{}/data1/what".format(key)):
quantities.append(dict(hf.get("/{}/data1/what".format(key)).attrs.items())["quantity"].decode("utf-8"))
if hf.get("/{}/where".format(key)):
elangles.append(dict(hf.get("/{}/where".format(key)).attrs.items())["elangle"])
if hf.get("/{}/data1".format(key)):
temp = hf.get("/{}/data1/data".format(key)).__str__()
shapes.append(temp[28:38])
file_info = pd.DataFrame({"quantity":quantities, "elangle": elangles, "shape": shapes})
print(gen_info['date'].decode("utf-8"), gen_info["time"].decode("utf-8"), gen_info["source"].decode("utf-8").split(":")[-1][:5])
print(file_info.sort_values("elangle"))
if __name__ == "__main__":
sys.exit(main()) | mit |
pnedunuri/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
koroder/Chord-DHT-simulation | plots/examples/plengthvsnodes.py | 1 | 1507 | #import everything from matplotlib (numpy is accessible via 'np' alias)
from pylab import *
# Create a new figure of size 8x6 points, using 80 dots per inch
figure(figsize=(8,6), dpi=80)
# Create a new subplot from a grid of 1x1
ax = subplot(1,1,1)
#X = [10,20,40,80,160,320,640,1280,2560,5120,10240]
X = [100,1000,5000,10000]
#C = [2.6211,3.105,3.5564,4.1597,4.5139,4.9515,5.5412,6.0272,6.5374,7.0474,7.5223]
C = [4.35,6.987,8.787,9.965]
#LOGX = [3.32,4.32,5.32,6.32,7.32,8.32,9.32,10.32,11.32,12.32,13.32]
LOGX = [6.64,9.96,13.28]
# Plot cosine using blue color with a continuous line of width 1 (pixels)
plot(X, C, color="blue", linewidth=2.0, linestyle="--", marker='o')
#plot(LOGX, LOGX, color="blue", linewidth=1.0, linestyle="--")
# Plot sine using green color with a continuous line of width 1 (pixels)
# plot(X, S, color="green", linewidth=1.0, linestyle="-")
# Set x limits
xlim(0,10050)
# Set x ticks
#xticks([0,3.32,5.32,7.32,9.32,11.32,13.32],
# [r'$1$', r'$10$', r'$40$', r'$160$', r'$640$', r'$2560$', r'$10240$'])
xticks([1,100,1000,5000,10000],
[r'$1$', r'$100$', r'$1000$', r'$5000$', r'$10000$'])
#set x label
ax.set_xlabel('Number of nodes')
# Set y limits
ylim(0,50)
# Set y ticks
yticks([0,5,10,15,20,25,30,35,40,45,50],
[r'$0$', r'$5$', r'$15$', r'$20$', r'$25$', r'$30$', r'$35$',r'$40$',r'$45$',r'$50$'])
#set y label
ax.set_ylabel('Occupancy')
# Save figure using 72 dots per inch
# savefig("exercice_2.png",dpi=72)
# Show result on screen
show()
| mit |
JohannesBuchner/PyMirage | pymirage/filter.py | 1 | 2678 | import numpy
from numpy import linspace, arange, log, zeros, cos, transpose, pi, logical_and, log10
import scipy.sparse
import tempfile
def gen_mfcc_filters(samplingrate, winsize, numfilters, nummfccs):
# Precompute the MFCC filterweights and DCT.
# Adopted from Malcolm Slaneys mfcc.m, August 1993
# Mirage uses filters computed by the following command in octave 3.0
# as of August 26th, 2008
# writefilters(22050, 1024, 36, 20, 'dct.filter', 'filterweights.filter');
#
# see http://www.ee.columbia.edu/~dpwe/muscontent/practical/mfcc.m
fft_freq = linspace(0, samplingrate/2, winsize/2 + 1)
f = arange(20, samplingrate/2 + 1)
mel = log(1 + f/700.) * 1127.01048
m_idx = linspace(1, mel.max(), numfilters+2)
f_idx = numpy.array([numpy.abs(mel - m_idx[i]).argmin() for i in range(numfilters+2)])
freqs = f[f_idx]
lo = freqs[:numfilters]
ce = freqs[1:numfilters+1]
up = freqs[2:numfilters+2]
# filters outside of spectrum
idx = numpy.arange(numfilters)[ce <= samplingrate/2][-1]
numfilters = min(idx + 1, numfilters)
mfcc_filterweights = zeros((numfilters, winsize/2 + 1))
triangleh = 2. / (up - lo)
for i in range(1, numfilters):
lovals = triangleh[i] * (fft_freq - lo[i]) / (ce[i] - lo[i])
mfcc_filterweights[i,:] += numpy.where(logical_and(fft_freq > lo[i], fft_freq <= ce[i]), lovals, 0)
upvals = triangleh[i] * (up[i] - fft_freq) / (up[i] - ce[i])
mfcc_filterweights[i,:] += numpy.where(logical_and(fft_freq > ce[i], fft_freq < up[i]), upvals, 0)
dct = 1/(numfilters/2)**0.5 * cos(arange(nummfccs).reshape((-1, 1))) * (2*(arange(numfilters).reshape((1, -1))+1) * pi/2/numfilters)
dct[0,:] *= 2**0.5 / 2
return dct, mfcc_filterweights
class Filter(object):
def __init__(self, winsize, srate, filters, cc):
dct, filterWeights = gen_mfcc_filters(srate, winsize, filters, cc)
self.filterWeights = scipy.sparse.csr_matrix(filterWeights)
self.dct = dct
def __mul__(self, m):
#print 'multiplying:', m.shape, 'with', self.filterWeights.shape
a = self.filterWeights * m
#print 'multiplying:', a.shape
#mel = numpy.zeros_like(a)
#mel[a >= 1] = 10 * log10(a[a >= 1])
mel = numpy.where(a < 1, 0, 10 * log10(a))
#print 'multiplying:', mel.shape, self.dct.shape
return self.dct * mel
if __name__ == '__main__':
a, b = gen_mfcc_filters(22050, 1024, 36, 20)
print a, b
print b.shape, b.size, (b == 0).sum()
indices = [(i, j) for i in range(b.shape[0]) for j in range(b.shape[1]) if b[i,j] != 0]
print indices, len(indices), b.size
#import matplotlib.pyplot as plt
#plt.imshow(b, aspect='auto', interpolation='none')
#plt.show()
#print indices[b == 0]
print numpy.diag(b), (numpy.diag(b) == 0).sum()
| gpl-2.0 |
qifeigit/scikit-learn | examples/classification/plot_classifier_comparison.py | 181 | 4699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
aabadie/scikit-learn | sklearn/tests/test_isotonic.py | 34 | 14159 | import warnings
import numpy as np
import pickle
import copy
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permutation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
y = np.array([10, 0, 2])
y_ = np.array([4, 4, 4])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [1, 1, 2, 3, 4, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_ymin_ymax():
# Test from @NelleV's issue:
# https://github.com/scikit-learn/scikit-learn/issues/6921
x = np.array([1.263, 1.318, -0.572, 0.307, -0.707, -0.176, -1.599, 1.059,
1.396, 1.906, 0.210, 0.028, -0.081, 0.444, 0.018, -0.377,
-0.896, -0.377, -1.327, 0.180])
y = isotonic_regression(x, y_min=0., y_max=0.1)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Also test decreasing case since the logic there is different
y = isotonic_regression(x, y_min=0., y_max=0.1, increasing=False)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Finally, test with only one bound
y = isotonic_regression(x, y_min=0., increasing=False)
assert(np.all(y >= 0))
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
def test_fast_predict():
# test that the faster prediction change doesn't
# affect out-of-sample predictions:
# https://github.com/scikit-learn/scikit-learn/pull/6206
rng = np.random.RandomState(123)
n_samples = 10 ** 3
# X values over the -10,10 range
X_train = 20.0 * rng.rand(n_samples) - 10
y_train = np.less(
rng.rand(n_samples),
1.0 / (1.0 + np.exp(-X_train))
).astype('int64')
weights = rng.rand(n_samples)
# we also want to test that everything still works when some weights are 0
weights[rng.rand(n_samples) < 0.1] = 0
slow_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
fast_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
# Build interpolation function with ALL input data, not just the
# non-redundant subset. The following 2 lines are taken from the
# .fit() method, without removing unnecessary points
X_train_fit, y_train_fit = slow_model._build_y(X_train, y_train,
sample_weight=weights,
trim_duplicates=False)
slow_model._build_f(X_train_fit, y_train_fit)
# fit with just the necessary data
fast_model.fit(X_train, y_train, sample_weight=weights)
X_test = 20.0 * rng.rand(n_samples) - 10
y_pred_slow = slow_model.predict(X_test)
y_pred_fast = fast_model.predict(X_test)
assert_array_equal(y_pred_slow, y_pred_fast)
def test_isotonic_copy_before_fit():
# https://github.com/scikit-learn/scikit-learn/issues/6628
ir = IsotonicRegression()
copy.copy(ir)
| bsd-3-clause |
niamoto/niamoto-core | niamoto/data_providers/sql_provider/sql_plot_provider.py | 2 | 2033 | # coding: utf-8
import sqlalchemy as sa
import pandas as pd
from niamoto.data_providers.base_plot_provider import BasePlotProvider
from niamoto.exceptions import MalformedDataSourceError
class SQLPlotProvider(BasePlotProvider):
"""
SQL plot provider. Instantiated with a sql query, that must return
must contain AT LEAST the following columns:
id -> The provider's identifier for the plot.
name -> The name of the plot.
x -> The longitude of the plot (WGS84).
y -> The latitude of the plot (WGS84).
All the remaining column will be stored as properties.
"""
REQUIRED_COLUMNS = set(['id', 'name', 'x', 'y'])
def __init__(self, data_provider, plot_sql):
super(SQLPlotProvider, self).__init__(data_provider)
self.plot_sql = plot_sql
def get_provider_plot_dataframe(self):
connection = sa.create_engine(self.data_provider.db_url).connect()
df = pd.read_sql(self.plot_sql, connection, index_col='id')
cols = set(list(df.columns) + ['id', ])
inter = cols.intersection(self.REQUIRED_COLUMNS)
if not inter == self.REQUIRED_COLUMNS:
m = "The queried data does not contains the required columns " \
"('id', 'taxon_id', 'x', 'y'), " \
"queried data has: {}".format(cols)
raise MalformedDataSourceError(m)
if len(df) == 0:
return df
property_cols = cols.difference(self.REQUIRED_COLUMNS)
if len(property_cols) > 0:
properties = df[list(property_cols)].apply(
lambda x: x.to_json(),
axis=1
)
else:
properties = '{}'
df.drop(property_cols, axis=1, inplace=True)
df['properties'] = properties
location = df[['x', 'y']].apply(
lambda x: "SRID=4326;POINT({} {})".format(x['x'], x['y']),
axis=1
)
df['location'] = location
df.drop(['x', 'y'], axis=1, inplace=True)
return df
| gpl-3.0 |
alexvmarch/atomic | exatomic/nwchem/inputs.py | 3 | 9184 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Input Generator and Parser
#############################
Every attempt is made to follow the Documentation on the
NWChem `website`_ with a general theme of the input Generator
accepting keyword arguments mirroring the keywords accepted by
NWChem and values corresponding to the parameters in a calculation.
.. _website: http://www.nwchem-sw.org/index.php/Release66:NWChem_Documentation
"""
# """
# Due to the complexity of the NWChem program and the innumerable
# permutations of input file formats, this is in no way meant to be
# an exhaustive wrapper of NWChem input files. Alternatively,
# valid key words are handled according to the types of the
# arguments being passed to it. If the argument is a string, it should
# be formatted how you want with new line breaks (see default argument
# for geomopts). Multiple options for the same keyword are handled as
# lists of tuples (example: basis=[('C', '3-21G'), ('H', '6-31G**')]).
# Similarly, convergence criteria may be specified with convergence =
# ['nolevelshifting', 'ncydp 30', 'damp 70']. The closer your string
# formatting is to what NWChem expects, the less likely it is that you
# will obtain syntax errors in the written input file.
# """
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
#import pandas as pd
#import numpy as np
from .editor import Editor
#from exa.util.units import Length as L
#from exatomic import Universe
_template = """echo
start {{name}}
title {{title}}
charge {{charge}}
geometry {{geomopts}}
{{atom}}
end
basis {{basisopts}}
{{basis}}
end
{extras}
{calc}{{prop}}
task {{task}}"""
_calcscf = """scf
nopen {mult}
maxiter {iterations}
end"""
_calcdft = """dft
direct
mult {mult}
grid xfine
xc {xc}
iterations {iterations}
{convergence}
{restart}
end"""
class Input(Editor):
@classmethod
def from_universe(cls, uni, task='scf', fp=None, name=None, title=None,
charge=0, geomopts='units bohr\nsymmetry c1',
basisopts='spherical', basis='* library 6-31G',
mult=1, xc='b3lyp', iterations=100,
convergence='nolevelshifting', prop=' nbofile 2',
relativistic='', tddft='', ecp=''):
calc = _calcdft if task == 'dft' else _calcscf
extras = ''
extradict = {}
for arg, extra in [('ecp', ecp), ('property', prop),
('relativistic', relativistic), ('tddft', tddft)]:
if extra:
extras += '{' + arg + '}'
extradict[arg] = _handle_arg(arg, extra)
fl = cls(_template.format(calc=calc, extras=extras))
keys = [key.split('}')[0].split(':')[0] for key in _template.split('{')[1:]]
keys += [key.split('}')[0].split(':')[0] for key in _calcscf.split('{')[1:]]
keys += [key.split('}')[0].split(':')[0] for key in _calcdft.split('{')[1:]]
kwargs = {key: '' for key in keys}
kwargs['atom'] = uni.atom.to_xyz()[:-1]
if name is not None:
kwargs['name'] = name
else:
kwargs['name'] = ''.join(atom['symbol'])
kwargs['title'] = title if title is not None else kwargs['name']
kwargs['charge'] = charge
kwargs['geomopts'] = _handle_arg('geomopts', geomopts)
kwargs['basisopts'] = _handle_arg('basisopts', basisopts)
kwargs['basis'] = _handle_arg('basis', basis)
if task == 'dft':
kwargs['mult'] = mult
elif mult - 1 > 0:
kwargs['mult'] = str(mult - 1) + '\n uhf'
else:
kwargs['mult'] = mult - 1
kwargs['xc'] = xc
kwargs['iterations'] = iterations
kwargs['convergence'] = _handle_arg('convergence', convergence)
kwargs['task'] = task
if prop and 'property' not in task:
kwargs['task'] += ' property'
#extras = {'ecp': _handle_arg('ecp', ecp),
# 'tddft': _handle_arg('tddft', tddft),
# 'property': _handle_arg('property', prop),
# 'relativistic': _handle_arg('relativistic', relativistic)}
kwargs.update(extradict)
#### TASK AND EXTRAS
#kwargs['prop'] = '\n\nproperty\n nbofile 2\nend'
#kwargs['task'] = 'property'
#kwargs['calc'] = calc
#if options is not None:
# for opt, info in options.items():
# if opt in extras:
# _handle_info(opt, info, extras)
# elif kind == 'scf' and opt == 'mult':
# kwargs['mult'] = str(int(info) - 1) + '\n uhf' if int(info) > 1 else info
# else:
# _handle_info(opt, info, kwargs)
#extras = ['\n' + key + '\n' + val for key, val in extras.items() if val]
#kwargs['extras'] = '\n'.join([extra + '\nend' for extra in extras])
fl.format(inplace=True, **kwargs)
if fp is not None:
fl.write(fp)
else:
return fl
def __init__(self, *args, **kwargs):
super(Input, self).__init__(*args, **kwargs)
def _handle_arg(opt, info):
type1 = {'basis': 'library', 'ecp': 'library'}
type2 = ['convergence']
type3 = ['ecp', 'property', 'tddft', 'relativistic']
if isinstance(info, str):
if opt in type3:
return '\n{0}\n{1}\n{2}\n'.format(opt, info, 'end')
return info
if opt in type1:
ret = ''
for i, tup in enumerate(info):
if i == len(info) - 1:
ret = ' '.join([ret, tup[0], type1[opt], tup[1]])
else:
ret = ' '.join([ret, tup[0], type1[opt], tup[1], '\n'])
if opt in type3:
return '\n{0}\n{1}\n{2}\n'.format(opt, ret, 'end')
return ret
elif opt in type2:
ret = ''
if type(info) != list:
info = [info]
for i, arg in enumerate(info):
if i == len(info) - 1:
ret = ' '.join([ret, opt, arg])
else:
ret = ' '.join([ret, opt, arg, '\n'])
if opt in type3:
return '\n{0}\n{1}\n{2}\n'.format(opt, ret, 'end')
return ret
else:
if type(info) is list:
return ' '.join([item for item in info])
else:
print('{} keyword not handled correctly with value {}'.format(opt, info))
def tuning_inputs(uni, name, mult, charge, basis, gammas, alphas,
route=[('Pop', 'full')], link0=None,
field=None, writedir=None, deep=False):
"""
Provided a universe, generate input files for functional tuning.
Includes input keywords for orbital visualization within exatomic.
Assumes you will copy restart checkpoint files to have the same
names as the input files.
Args
uni (exatomic.container.Universe): molecular specification
name (str): prefix for job names
mult (int): spin multiplicity
charge (int): charge of the system
basis (list): tuples of atomic symbol, string of basis name
gammas (iter): values of range separation parameter (omega)
alphas (iter): fractions of Hartree-Fock in the short range
route (list): strings or tuples of keyword, value pairs
link0 (list): strings or tuples of keyword, value pairs
writedir (str): directory path to write input files
Returns
editors (list): input files as exa.Editors
"""
fnstr = 'xcampbe96 1.0 cpbe96 1.0 HFexch 1.0\n'\
' cam {gam:.4f} cam_alpha {alp:.4f} cam_beta {bet:.4f}'.format
jbnm = '{name}-{{gam:.2f}}-{{alp:.2f}}-{{chg}}'.format(name=name).format
chgnms = ['cat', 'neut', 'an']
chgs = [charge + 1, charge, charge - 1]
mults = [2, 1, 2] if mult == 1 else [mult - 1, mult, mult + 1]
fls = []
for gam in gammas:
for alp in alphas:
#bet = 1 - alp
for chgnm, chg, mult in zip(chgnms, chgs, mults):
fnc = fnstr(gam=gam, alp=alp, bet=1-alp)
jnm = jbnm(gam=gam, alp=alp, bet=1-alp, chg=chgnm)
opts = {'charge': chg, 'mult': mult, 'task': 'dft',
'title': jnm, 'name': jnm, 'xc': fnc,
'basis': basis, 'prop': ''} #, 'writedir': writedir}
fls.append(Input.from_universe(uni, **opts))
fls[-1].name = jnm + '.nw'
return fls
# def tuning_inputs(uni, name, mult, charge, basis, gammas, alphas,
# route=[('Pop', 'full')], link0=None, nproc=4, mem=4,
# field=None, writedir=None, deep=False):
# def from_universe(cls, uni, task='scf', fp=None, name=None, title=None,
# charge=0, geomopts='units bohr\nsymmetry c1',
# basisopts='spherical', basis='* library 6-31G',
# mult=1, xc='b3lyp', iterations=100,
# convergence='nolevelshifting', prop=' nbofile 2',
# relativistic='', tddft='', ecp=''):
| apache-2.0 |
ExaScience/smurff | data/synthetic/make.py | 1 | 8259 | #!/usr/bin/env python
import numpy as np
from scipy import sparse
import scipy.io as sio
import argparse
import os
import itertools
import smurff.matrix_io as mio
from sklearn import preprocessing
#parser = argparse.ArgumentParser(description='SMURFF tests')
#parser.add_argument('--envdir', metavar='DIR', dest='envdir', nargs=1, help='Env dir', default='conda_envs')
#parser.add_argument('--data', metavar='DIR', dest='datadir', nargs=1, help='Data dir', default='data')
#parser.add_argument('--outdir', metavar='DIR', dest='outdir', nargs=1, help='Output dir',
# default = 'work/' + datetime.datetime.today().strftime("%Y%m%d-%H%M%S"))
#
#args = parser.parse_args()
# product of two gaussian low-rank matrices + noise
def normal_dense(shape, K):
X = np.random.normal(size=(shape[0],K))
W = np.random.normal(size=(shape[1],K))
return np.dot(X, W.transpose()) + np.random.normal(size=shape)
# product of two low-rank 'ones' matrices
def ones_dense(shape, K):
X = np.ones((shape[0],K))
W = np.ones((shape[1],K))
return np.dot(X,W.transpose())
def col_rep(shape, K):
W = np.arange(shape[1]).reshape(1, shape[1])
return np.repeat(W, shape[0], 0)
# dense -> sparse
# or
# sparse -> even sparser
def sparsify(A, density):
if sparse.issparse(A):
(I, J, V) = sparse.find(A)
else:
V = A.reshape(A.size)
(I, J) = np.indices(A.shape)
I = I.reshape(A.size)
J = J.reshape(A.size)
size = V.size
num = int(size * density)
idx = np.random.choice(size, num, replace=False)
return sparse.coo_matrix((V[idx], (I[idx], J[idx])), shape = A.shape)
def gen_matrix(shape, K, func = "normal", density = 1.0 ):
func_dict = {
"normal": normal_dense,
"ones": ones_dense,
"col": col_rep,
}
m = func_dict[func] (shape,K)
if density < 1.0:
m = sparsify(m, density)
return m
def write_matrix(filename, A):
if sparse.issparse(A):
mio.write_sparse_float64(filename + ".sdm", A)
else:
# TRANSPOSE BECAUSE OF INTERNAL REPRESENTATION OF DENSE
# mio.write_dense_float64(filename + ".ddm", A.transpose())
# mio.write_dense_float64(filename + ".ddm", A)
mio.write_dense_float64(filename + ".ddm", A)
sio.mmwrite(filename, A)
def write_tensor(filename, A):
if sparse.issparse(A):
mio.write_sparse_float64_matrix_as_tensor(filename + ".sdt", A)
else:
mio.write_dense_float64_matrix_as_tensor(filename + ".ddt", A)
def write_feat(base, features):
for (indx,F) in enumerate(features):
write_matrix("feat_%d_%d" % (base, indx), F)
def write_test_data(dirname, test):
os.chdir(dirname)
write_matrix("test", test)
write_tensor("test", test)
os.chdir("..")
def write_train_data(dirname, train, features = ([],[])):
os.makedirs(dirname)
os.chdir(dirname)
write_matrix("train", train)
write_tensor("train", train)
for (indx,feat) in enumerate(features):
write_feat(indx, feat)
os.chdir("..")
def gen_test_and_write(m, shape, K,func,density, row_split = 1, col_split = 1, center_list=["none"]):
# split rows and cols
rows_blocked = np.array_split(m, row_split, axis=0)
blocks = [ np.array_split(b, col_split, axis=1) for b in rows_blocked]
m = blocks[0][0]
col_feat = [b[0] for b in blocks[1:]]
row_feat = blocks[0][1:]
assert len(col_feat) == row_split - 1
assert len(row_feat) == col_split - 1
for r in row_feat: assert r.shape[0] == m.shape[0]
for r in col_feat: assert r.shape[1] == m.shape[1]
test = sparsify(m, 0.2)
for center in center_list:
if (func == "ones" and center != "none"):
continue
shape_str = "_".join(map(str,shape))
dirname = "%s_%s_%d_%d_%d_%d_%s" % (func, shape_str, K, int(density * 100), row_split, col_split, center)
print("%s..." % dirname)
write_test_data(dirname, test)
def gen_train_and_write(m, shape, K,func,density, row_split = 1, col_split = 1, center = "none"):
if (func == "ones" and center != "none"):
return
shape_str = "_".join(map(str,shape))
dirname = "%s_%s_%d_%d_%d_%d_%s" % (func, shape_str, K, int(density * 100), row_split, col_split, center)
if os.path.exists(dirname):
print("Already exists: %s. Skipping" % dirname)
return
print("%s..." % dirname)
# split rows and cols
rows_blocked = np.array_split(m, row_split, axis=0)
blocks = [ np.array_split(b, col_split, axis=1) for b in rows_blocked]
m = blocks[0][0]
col_feat = [b[0] for b in blocks[1:]]
row_feat = blocks[0][1:]
assert len(col_feat) == row_split - 1
assert len(row_feat) == col_split - 1
for r in row_feat: assert r.shape[0] == m.shape[0]
for r in col_feat: assert r.shape[1] == m.shape[1]
# PAY ATTENTION TO AXIS ORDER
if (center == "row"):
m = preprocessing.scale(m, axis = 0, with_std=False)
elif (center == "col"):
m = preprocessing.scale(m, axis = 1, with_std=False)
elif (center == "global"):
m = m - np.mean(m)
for i in range(len(row_feat)):
if (center == "row"):
row_feat[i] = preprocessing.scale(row_feat[i], axis = 0, with_std=False)
elif (center == "col"):
row_feat[i] = preprocessing.scale(row_feat[i], axis = 1, with_std=False)
elif (center == "global"):
row_feat[i] = row_feat[i] - np.mean(row_feat[i])
for i in range(len(col_feat)):
if (center == "row"):
col_feat[i] = preprocessing.scale(col_feat[i], axis = 0, with_std=False)
elif (center == "col"):
col_feat[i] = preprocessing.scale(col_feat[i], axis = 1, with_std=False)
elif (center == "global"):
col_feat[i] = col_feat[i] - np.mean(col_feat[i])
write_train_data(dirname, m, (row_feat, col_feat))
def gen_matrix_tests():
shape = [2000,100]
#shape = [40,30]
num_latent = 4
for density in (1, .2):
for func in ("normal", "ones"):
# CALL GEN MATRIX ONLY ONCE
m = gen_matrix(shape,num_latent,func)
for row_split in (1,2,3):
for col_split in (1,2,3,):
for center in ("none", "global", "row", "col"):
gen_train_and_write(m,shape,num_latent,func,density, row_split, col_split, center)
# SPARSIFY SHOULD BE CALLED ONLY ONCE
gen_test_and_write(m,shape,num_latent,func,density, row_split, col_split, ("none", "global", "row", "col"))
def gen_and_write_train_tensor(shape, dirname):
train_tensor = np.random.normal(size=shape)
os.makedirs(dirname)
os.chdir(dirname)
filename = "train.ddt"
with open(filename, 'wb') as f:
np.array(len(train_tensor.shape)).astype(np.int64).tofile(f)
np.array(train_tensor.shape).astype(np.int64).tofile(f)
f.write(train_tensor.astype(np.float64).tobytes(order='F'))
os.chdir("..")
return train_tensor
def gen_and_write_test_tensor(train_tensor, density, dirname):
val = train_tensor.reshape(train_tensor.size)
num = int(val.size * density)
idx = np.random.choice(val.size, num, replace=False)
os.chdir(dirname)
filename = "test.sdt"
with open(filename, 'wb') as f:
np.array(len(train_tensor.shape)).astype(np.uint64).tofile(f)
np.array(train_tensor.shape).astype(np.uint64).tofile(f)
np.array(idx.size).astype(np.uint64).tofile(f)
for i in range(len(train_tensor.shape)):
indices = np.indices(train_tensor.shape)[i].reshape(train_tensor.size)
(indices[idx] + 1).astype(np.uint32, copy=False).tofile(f)
val[idx].astype(np.float64, copy=False).tofile(f)
os.chdir("..")
def gen_tensor_tests():
shape = [200, 50, 10]
shape_str = "_".join(map(str, shape))
for density in (1, .2):
dirname = "normal_%s_%d" % (shape_str, int(density * 100))
print("%s..." % dirname)
train_tensor = gen_and_write_train_tensor(shape, dirname)
gen_and_write_test_tensor(train_tensor, density, dirname)
if __name__ == "__main__":
gen_matrix_tests()
#gen_tensor_tests() | mit |
PrashntS/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
joshuagryphon/minihmm | docs/source/conf.py | 1 | 10964 | # -*- coding: utf-8 -*-
#
# minihmm documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 5 00:24:09 2015.
#
import sys
import os
import minihmm
import mock
# Manual mocking
class SetMock():
def __contains__(self, val):
return True
class DocMock(mock.MagicMock):
"""Proxy class to stand in for modules/packages that can't be built
or installed on readthedocs.org .
Thanks to https://read-the-docs.readthedocs.org/en/latest/faq.html"""
g_code = SetMock()
@classmethod
def __getattr__(cls, name):
return mock.MagicMock()
mock_modules = [
'numpy',
'numpy.testing',
'scipy',
'scipy.sparse',
'scipy.stats',
'scipy.stats.distributions',
'jsonpickle',
'jsonpickle.ext.numpy',
]
# insert mock modules
for mod_name in mock_modules:
sys.modules[mod_name] = DocMock()
# allows autodoc to find seqmodels without installing it
sys.path.insert(0, os.path.join(os.getcwd(), "..", ".."))
# -- Extensions & options ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'numpydoc',
]
autodoc_default_flags = [
"show-inheritance",
"undoc-members",
"special-members",
"private-members",
"inherited-members",
]
autodoc_member_order = "groupwise"
autodoc_docstring_signature = True
numpydoc_show_class_members = False
intersphinx_mapping = {
"python" : ("http://docs.python.org", None),
"numpy" : ("http://docs.scipy.org/doc/numpy/", None),
"scipy" : ("http://docs.scipy.org/doc/scipy/reference/", None),
"pandas" : ("http://pandas-docs.github.io/pandas-docs-travis/", None),
"jsonpickle" : ("https://jsonpickle.github.io/", None),
}
# Enable substitutions
rst_prolog = """
.. include:: /links.txt
"""
templates_path = ['_templates']
source_suffix = '.rst'
project = u'minihmm'
copyright = u'2015, Joshua Griffin Dunn'
version = str(minihmm.__version__)
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
master_doc = "index"
modindex_common_prefix = ["minihmm."]
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'minihmmdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'minihmm.tex', u'minihmm Documentation',
u'Joshua Griffin Dunn', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'minihmm', u'minihmm Documentation',
[u'Joshua Griffin Dunn'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'minihmm', u'minihmm Documentation',
u'Joshua Griffin Dunn', 'minihmm', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'minihmm'
epub_author = u'Joshua Griffin Dunn'
epub_publisher = u'Joshua Griffin Dunn'
epub_copyright = u'2015, Joshua Griffin Dunn'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'minihmm'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| bsd-3-clause |
tmrowco/electricitymap | parsers/CA_AB.py | 1 | 6277 | #!/usr/bin/env python3
import arrow
from bs4 import BeautifulSoup
import datetime
import re
import requests
import pandas as pd
from pytz import timezone
ab_timezone = 'Canada/Mountain'
def convert_time_str(ts):
"""Takes a time string and converts into an aware datetime object."""
dt_naive = datetime.datetime.strptime(ts, ' %b %d, %Y %H:%M')
localtz = timezone('Canada/Mountain')
dt_aware = localtz.localize(dt_naive)
return dt_aware
def fetch_production(zone_key='CA-AB', session=None, target_datetime=None, logger=None):
"""Requests the last known production mix (in MW) of a given country
Arguments:
zone_key (optional) -- used in case a parser is able to fetch multiple countries
session (optional) -- request session passed in order to re-use an existing session
Return:
A dictionary in the form:
{
'zoneKey': 'FR',
'datetime': '2017-01-01T00:00:00Z',
'production': {
'biomass': 0.0,
'coal': 0.0,
'gas': 0.0,
'hydro': 0.0,
'nuclear': null,
'oil': 0.0,
'solar': 0.0,
'wind': 0.0,
'geothermal': 0.0,
'unknown': 0.0
},
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
"""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
r = session or requests.session()
url = 'http://ets.aeso.ca/ets_web/ip/Market/Reports/CSDReportServlet'
response = r.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
findtime = soup.find('td', text=re.compile('Last Update')).get_text()
time_string = findtime.split(':', 1)[1]
dt = convert_time_str(time_string)
df_generations = pd.read_html(response.text, match='GENERATION', skiprows=1, index_col=0, header=0)
total_net_generation = df_generations[1]['TNG']
maximum_capability = df_generations[1]['MC']
return {
'datetime': dt,
'zoneKey': zone_key,
'production': {
'coal': float(total_net_generation['COAL']),
'gas': float(total_net_generation['GAS']),
'hydro': float(total_net_generation['HYDRO']),
'wind': float(total_net_generation['WIND']),
'unknown': float(total_net_generation['OTHER'])
},
'capacity': {
'coal': float(maximum_capability['COAL']),
'gas': float(maximum_capability['GAS']),
'hydro': float(maximum_capability['HYDRO']),
'wind': float(maximum_capability['WIND']),
'unknown': float(maximum_capability['OTHER'])
},
'source': 'ets.aeso.ca',
}
def fetch_price(zone_key='CA-AB', session=None, target_datetime=None, logger=None):
"""Requests the last known power price of a given country
Arguments:
zone_key (optional) -- used in case a parser is able to fetch multiple countries
session (optional) -- request session passed in order to re-use an existing session
Return:
A dictionary in the form:
{
'zoneKey': 'FR',
'currency': EUR,
'datetime': '2017-01-01T00:00:00Z',
'price': 0.0,
'source': 'mysource.com'
}
"""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
r = session or requests.session()
url = 'http://ets.aeso.ca/ets_web/ip/Market/Reports/SMPriceReportServlet?contentType=html/'
response = r.get(url)
df_prices = pd.read_html(response.text, match='Price', index_col=0, header=0)
prices = df_prices[1]
data = {}
for rowIndex, row in prices.iterrows():
price = row['Price ($)']
if (isfloat(price)):
hours = int(rowIndex.split(' ')[1]) - 1
data[rowIndex] = {
'datetime': arrow.get(rowIndex, 'MM/DD/YYYY').replace(hours=hours, tzinfo=ab_timezone).datetime,
'zoneKey': zone_key,
'currency': 'CAD',
'source': 'ets.aeso.ca',
'price': float(price),
}
return [data[k] for k in sorted(data.keys())]
def fetch_exchange(zone_key1='CA-AB', zone_key2='CA-BC', session=None, target_datetime=None, logger=None):
"""Requests the last known power exchange (in MW) between two countries
Arguments:
zone_key (optional) -- used in case a parser is able to fetch multiple countries
session (optional) -- request session passed in order to re-use an existing session
Return:
A dictionary in the form:
{
'sortedZoneKeys': 'DK->NO',
'datetime': '2017-01-01T00:00:00Z',
'netFlow': 0.0,
'source': 'mysource.com'
}
"""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
r = session or requests.session()
url = 'http://ets.aeso.ca/ets_web/ip/Market/Reports/CSDReportServlet'
response = r.get(url)
df_exchanges = pd.read_html(response.text, match='INTERCHANGE', skiprows=0, index_col=0)
flows = {
'CA-AB->CA-BC': df_exchanges[1][1]['British Columbia'],
'CA-AB->CA-SK': df_exchanges[1][1]['Saskatchewan'],
'CA-AB->US-MT': df_exchanges[1][1]['Montana']
}
sortedZoneKeys = '->'.join(sorted([zone_key1, zone_key2]))
if sortedZoneKeys not in flows:
raise NotImplementedError('This exchange pair is not implemented')
return {
'datetime': arrow.now(tz=ab_timezone).datetime,
'sortedZoneKeys': sortedZoneKeys,
'netFlow': float(flows[sortedZoneKeys]),
'source': 'ets.aeso.ca'
}
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_production() ->')
print(fetch_production())
print('fetch_price() ->')
print(fetch_price())
print('fetch_exchange(CA-AB, CA-BC) ->')
print(fetch_exchange('CA-AB', 'CA-BC'))
print('fetch_exchange(CA-AB, CA-SK) ->')
print(fetch_exchange('CA-AB', 'CA-SK'))
print('fetch_exchange(CA-AB, US-MT) ->')
print(fetch_exchange('CA-AB', 'US-MT'))
| gpl-3.0 |
jh23453/privacyidea | privacyidea/lib/stats.py | 3 | 5545 | # -*- coding: utf-8 -*-
#
# 2015-07-16 Initial writeup
# (c) Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """This module reads audit data and can create statistics from
audit data using pandas.
This module is tested in tests/test_lib_stats.py
"""
import logging
from privacyidea.lib.log import log_with
import datetime
import StringIO
log = logging.getLogger(__name__)
try:
import matplotlib
MATPLOT_READY = True
matplotlib.style.use('ggplot')
matplotlib.use('Agg')
except Exception as exx:
MATPLOT_READY = False
log.warning("If you want to see statistics you need to install python "
"matplotlib.")
customcmap = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
@log_with(log)
def get_statistics(auditobject, start_time=datetime.datetime.now()
-datetime.timedelta(days=7),
end_time=datetime.datetime.now()):
"""
Create audit statistics and return a JSON object
The auditobject is passed from the upper level, usually from the REST API
as g.auditobject.
:param auditobject: The audit object
:type auditobject: Audit Object as defined in auditmodules.base.Audit
:return: JSON
"""
result = {}
df = auditobject.get_dataframe(start_time=start_time, end_time=end_time)
# authentication successful/fail per user or serial
for key in ["user", "serial"]:
result["validate_{0!s}_plot".format(key)] = _get_success_fail(df, key)
# get simple usage
for key in ["serial", "action"]:
result["{0!s}_plot".format(key)] = _get_number_of(df, key)
# failed authentication requests
for key in ["user", "serial"]:
result["validate_failed_{0!s}_plot".format(key)] = _get_fail(df, key)
result["admin_plot"] = _get_number_of(df, "action", nums=20)
return result
def _get_success_fail(df, key):
try:
output = StringIO.StringIO()
series = df[df.action.isin(["POST /validate/check",
"GET /validate/check"])].groupby([key,
'success']).size().unstack()
fig = series.plot(kind="bar", stacked=True,
legend=True,
title="Authentications",
grid=True,
color=customcmap).get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,{0!s}'.format(image_data)
except Exception as exx:
log.info(exx)
image_uri = "{0!s}".format(exx)
return image_uri
def _get_fail(df, key):
try:
output = StringIO.StringIO()
series = df[(df.success==0)
& (df.action.isin(["POST /validate/check",
"GET /validate/check"]))][
key].value_counts()[:5]
plot_canvas = matplotlib.pyplot.figure()
ax = plot_canvas.add_subplot(1,1,1)
fig = series.plot(ax=ax, kind="bar",
colormap="Reds",
stacked=False,
legend=False,
grid=True,
title="Failed Authentications").get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,{0!s}'.format(image_data)
except Exception as exx:
log.info(exx)
image_uri = "{0!s}".format(exx)
return image_uri
def _get_number_of(df, key, nums=5):
"""
return a data url image with a single keyed value.
It plots the "nums" most occurrences of the "key" column in the dataframe.
:param df: The DataFrame
:type df: Pandas DataFrame
:param key: The key, which should be plotted.
:param count: how many of the most often values should be plotted
:return: A data url
"""
output = StringIO.StringIO()
output.truncate(0)
try:
plot_canvas = matplotlib.pyplot.figure()
ax = plot_canvas.add_subplot(1, 1, 1)
series = df[key].value_counts()[:nums]
fig = series.plot(ax=ax, kind="bar", colormap="Blues",
legend=False,
stacked=False,
title="Numbers of {0!s}".format(key),
grid=True).get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,{0!s}'.format(image_data)
except Exception as exx:
log.info(exx)
image_uri = "No data"
return image_uri
| agpl-3.0 |
eramirem/astroML | book_figures/chapter3/fig_transform_distribution.py | 3 | 2529 | r"""
Transformation of Distribution
------------------------------
Figure 3.4.
An example of transforming a uniform distribution. In the left panel, x
is sampled from a uniform distribution of unit width centered on x = 0.5
(:math:`\mu` = 0 and W = 1; see Section 3.3.1). In the right panel,
the distribution is transformed via y = exp(x). The form of the resulting
pdf is computed from eq. 3.20.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy import stats
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set up the data
np.random.seed(0)
# create a uniform distribution
uniform_dist = stats.uniform(0, 1)
x_sample = uniform_dist.rvs(1000)
x = np.linspace(-0.5, 1.5, 1000)
Px = uniform_dist.pdf(x)
# transform the data
y_sample = np.exp(x_sample)
y = np.exp(x)
Py = Px / y
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(left=0.11, right=0.95, wspace=0.3, bottom=0.17, top=0.9)
ax = fig.add_subplot(121)
ax.hist(x_sample, 20, histtype='stepfilled', fc='#CCCCCC', normed=True)
ax.plot(x, Px, '-k')
ax.set_xlim(-0.2, 1.2)
ax.set_ylim(0, 1.4001)
ax.xaxis.set_major_locator(plt.MaxNLocator(6))
ax.text(0.95, 0.95, r'$p_x(x) = {\rm Uniform}(x)$',
va='top', ha='right',
transform=ax.transAxes)
ax.set_xlabel('$x$')
ax.set_ylabel('$p_x(x)$')
ax = fig.add_subplot(122)
ax.hist(y_sample, 20, histtype='stepfilled', fc='#CCCCCC', normed=True)
ax.plot(y, Py, '-k')
ax.set_xlim(0.85, 2.9)
ax.xaxis.set_major_locator(plt.MaxNLocator(6))
ax.text(0.95, 0.95, '$y=\exp(x)$\n$p_y(y)=p_x(\ln y) / y$',
va='top', ha='right',
transform=ax.transAxes)
ax.set_xlabel('$y$')
ax.set_ylabel('$p_y(y)$')
plt.show()
| bsd-2-clause |