repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
alexjc/pylearn2 | pylearn2/train_extensions/plots.py | 34 | 9617 | """
Plot monitoring extensions while training.
"""
__authors__ = "Laurent Dinh"
__copyright__ = "Copyright 2014, Universite de Montreal"
__credits__ = ["Laurent Dinh"]
__license__ = "3-clause BSD"
__maintainer__ = "Laurent Dinh"
__email__ = "dinhlaur@iro"
import logging
import os
import os.path
import stat
import numpy
np = numpy
from pylearn2.train_extensions import TrainExtension
from theano.compat.six.moves import xrange
from pylearn2.utils import as_floatX, wraps
if os.getenv('DISPLAY') is None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import warnings
log = logging.getLogger(__name__)
def make_readable(fn):
"""
Make a file readable by all.
Practical when the plot is in your public_html.
Parameters
----------
fn : str
Filename you wish to make public readable.
"""
st = os.stat(fn)
# Create the desired permission
st_mode = st.st_mode
read_all = stat.S_IRUSR
read_all |= stat.S_IRGRP
read_all |= stat.S_IROTH
# Set the permission
os.chmod(fn, st_mode | read_all)
def get_best_layout(n_plots):
"""
Find the best basic layout for a given number of plots.
Minimize the perimeter with a minimum area (``n_plots``) for
an integer rectangle.
Parameters
----------
n_plots : int
The number of plots to display
Returns
-------
n_rows : int
Number of rows in the layout
n_cols :
Number of columns in the layout
"""
assert n_plots > 0
# Initialize the layout
n_rows = 1
n_cols = np.ceil(n_plots*1./n_rows)
n_cols = int(n_cols)
half_perimeter = n_cols + 1
# Limit the range of possible layouts
max_row = np.sqrt(n_plots)
max_row = np.round(max_row)
max_row = int(max_row)
for l in xrange(1, max_row + 1):
width = np.ceil(n_plots*1./l)
width = int(width)
if half_perimeter >= (width + l):
n_rows = l
n_cols = np.ceil(n_plots*1./n_rows)
n_cols = int(n_cols)
half_perimeter = n_rows + n_cols
return n_rows, n_cols
def create_colors(n_colors):
"""
Create an array of n_colors
Parameters
----------
n_colors : int
The number of colors to create
Returns
-------
colors_rgb : np.array
An array of shape (n_colors, 3) in RGB format
"""
# Create the list of color hue
colors_hue = np.arange(n_colors)
colors_hue = as_floatX(colors_hue)
colors_hue *= 1./n_colors
# Set the color in HSV format
colors_hsv = np.ones((n_colors, 3))
colors_hsv[:, 2] *= .75
colors_hsv[:, 0] = colors_hue
# Put in a matplotlib-friendly format
colors_hsv = colors_hsv.reshape((1, )+colors_hsv.shape)
# Convert to RGB
colors_rgb = matplotlib.colors.hsv_to_rgb(colors_hsv)
colors_rgb = colors_rgb[0]
return colors_rgb
class Plotter(object):
"""
Base class for plotting.
Parameters
----------
freq : int, optional
The number of epochs before producing plot.
Default is None (set by the PlotManager).
"""
def __init__(self, freq=None):
self.filenames = []
self.freq = freq
def setup(self, model, dataset, algorithm):
"""
Setup the plotters.
Parameters
----------
model : pylearn2.models.Model
The model trained
dataset : pylearn2.datasets.Dataset
The dataset on which the model is trained
algorithm : pylearn2.training_algorithms.TrainingAlgorithm
The algorithm the model is trained with
"""
raise NotImplementedError(str(type(self))+" does not implement setup.")
def plot(self):
"""
The method that draw and save the desired figure, which depend
on the object and its attribute. This method is called by the
PlotManager object as frequently as the `freq` attribute defines it.
"""
raise NotImplementedError(str(type(self))+" does not implement plot.")
def set_permissions(self, public):
"""
Make the produced files readable by everyone.
Parameters
----------
public : bool
If public is True, then the associated files are
readable by everyone.
"""
if public:
for filename in self.filenames:
make_readable(filename)
class Plots(Plotter):
"""
Plot different monitors.
Parameters
----------
channel_names : list of str
List of monitor channels to plot
save_path : str
Filename of the plot file
share : float, optional
The percentage of epochs shown. Default is .8 (80%)
per_second : bool, optional
Set if the x-axis is in seconds, in epochs otherwise.
Default is False.
kwargs : dict
Passed on to the superclass.
"""
def __init__(self, channel_names,
save_path, share=.8,
per_second=False,
** kwargs):
super(Plots, self).__init__(** kwargs)
if not save_path.endswith('.png'):
save_path += '.png'
self.save_path = save_path
self.filenames = [self.save_path]
self.channel_names = channel_names
self.n_colors = len(self.channel_names)
self.colors_rgb = create_colors(self.n_colors)
self.share = share
self.per_second = per_second
@wraps(Plotter.setup)
def setup(self, model, dataset, algorithm):
self.model = model
@wraps(Plotter.plot)
def plot(self):
monitor = self.model.monitor
channels = monitor.channels
channel_names = self.channel_names
# Accumulate the plots
plots = np.array(channels[channel_names[0]].val_record)
plots = plots.reshape((1, plots.shape[0]))
plots = plots.repeat(self.n_colors, axis=0)
for i, channel_name in enumerate(channel_names[1:]):
plots[i+1] = np.array(channels[channel_name].val_record)
# Keep the relevant part
n_min = plots.shape[1]
n_min -= int(np.ceil(plots.shape[1] * self.share))
plots = plots[:, n_min:]
# Get the x axis
x = np.arange(plots.shape[1])
x += n_min
# Put in seconds if needed
if self.per_second:
seconds = channels['training_seconds_this_epoch'].val_record
seconds = np.array(seconds)
seconds = seconds.cumsum()
x = seconds[x]
# Plot the quantities
plt.figure()
for i in xrange(self.n_colors):
plt.plot(x, plots[i], color=self.colors_rgb[i],
alpha=.5)
plt.legend(self.channel_names)
plt.xlim(x[0], x[-1])
plt.ylim(plots.min(), plots.max())
plt.axis('on')
plt.savefig(self.save_path)
plt.close()
class PlotManager(TrainExtension):
"""
Class to manage the Plotter classes.
Parameters
----------
plots : list of pylearn2.train_extensions.Plotter
List of plots to make during training
freq : int
The default number of epochs before producing plot.
public : bool
Whether the files are made public or not. Default is true.
html_path : str
The path where the HTML page is saved. The associated files should be
in the same folder. Default is None, then there is no HTML page.
"""
def __init__(self, plots, freq, public=True, html_path=None):
self.plots = plots
self.freq = freq
# Set a default freq
for plot in self.plots:
if plot.freq is None:
plot.freq = self.freq
self.public = public
self.html_path = html_path
self.filenames = []
self.count = 0
@wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
for plot in self.plots:
plot.setup(model, dataset, algorithm)
for filename in plot.filenames:
warn = ("/home/www-etud/" in filename)
warn |= (os.environ['HOME'] in filename)
warn &= ('umontreal' in os.environ['HOSTNAME'])
if warn:
warnings.warn('YOU MIGHT RUIN THE NFS'
'BY SAVING IN THIS PATH !')
self.filenames.append(filename)
if self.html_path is not None:
header = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<html xmlns="http://www.w3.org/1999/xhtml"'
'xml:lang="en">\n'
'\t<body>\n')
footer = ('\t</body>\n'
'</html>')
body = ''
for filename in self.filenames:
basename = os.path.basename(filename)
body += '<img src = "' + basename + '"><br/>\n'
with open(self.html_path, 'w') as f:
f.write(header + body + footer)
f.close()
if self.public:
make_readable(self.html_path)
@wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
self.count += 1
for plot in self.plots:
if self.count % plot.freq == 0:
try:
plot.plot()
plot.set_permissions(self.public)
except Exception as e:
warnings.warn(str(plot) + ' has failed.\n'
+ str(e))
| bsd-3-clause |
arianna-bis/glass-box-nmt | plots/languageplots.py | 1 | 5684 | """
========
Barchart
========
A bar plot with errorbars and height labels on individual bars
"""
import numpy as np
import matplotlib.pyplot as plt
base = {}
baseStd = {}
acc = {}
acc['embed'] = {}
acc['lstmo'] = {}
std = {}
std['embed'] = {}
std['lstmo'] = {}
# LANGS: FR-IT, FR-DE, FR-EN
base['gen'] = 0.5030
base['num'] = 0.6968
base['Per'] = 0.6141
base['Ten'] = 0.7629
base['Moo'] = 0.2450
base['genITM'] = base['gen']
base['avgAllFeats'] = np.mean([base['gen'],base['num'],base['Per'],base['Ten'],base['Moo']])
base['genNumTen'] = np.mean([base['gen'],base['num'],base['Ten']])
baseStd['gen'] = 0.0043
baseStd['num'] = 0.0073
baseStd['Per'] = 0.0392
baseStd['Ten'] = 0.0238
baseStd['Moo'] = 0.0504
baseStd['genITM'] = baseStd['gen']
#baseStd['avgAllFeats'] = np.mean([baseStd['gen'],baseStd['num'],baseStd['Per'],baseStd['Ten'],baseStd['Moo']])
baseStd['avgAllFeats'] = 0 ## HACK!
baseStd['genNumTen'] = 0 ## HACK!
#gender
acc['embed']['gen'] = (0.5804, 0.5304, 0.5085)
std['embed']['gen'] = (0.0272, 0.0321, 0.0357)
#gender with itNoMasc (2nd lang)
acc['embed']['genITM'] = (0.5804, 0.5196, 0.5304, 0.5085)
std['embed']['genITM'] = (0.0272, 0.0226, 0.0321, 0.0357)
# number
acc['embed']['num'] = (0.6804, 0.6623, 0.6563)
std['embed']['num'] = (0.0131, 0.0106, 0.0184)
# person
acc['embed']['Per'] = (0.5648, 0.5789, 0.6017)
std['embed']['Per'] = (0.0984, 0.0493, 0.0405)
# tense
acc['embed']['Ten'] = (0.7219, 0.7090, 0.7483)
std['embed']['Ten'] = (0.0051, 0.0466, 0.0073)
# mood
acc['embed']['Moo'] = (0.4752,0.4515, 0.4908)
std['embed']['Moo'] = (0.0370, 0.0640, 0.0250)
#
# all features averaged
layer = 'embed'
acc_array = []
for L in range(3):
acc_array.append(np.mean([acc[layer]['gen'][L],acc[layer]['num'][L],acc[layer]['Per'][L],acc[layer]['Ten'][L],acc[layer]['Moo'][L]]))
acc[layer]['avgAllFeats'] = acc_array
print(acc[layer]['avgAllFeats'])
acc_array = []
for L in range(3):
acc_array.append(np.mean([acc[layer]['gen'][L],acc[layer]['num'][L],acc[layer]['Ten'][L]]))
acc[layer]['genNumTen'] = acc_array
print(acc[layer]['genNumTen'])
# std_array = []
# for L in range(3):
# std_array.append(np.mean([std[layer]['gen'][L],std[layer]['num'][L],std[layer]['Per'][L],std[layer]['Ten'][L],std[layer]['Moo'][L]]))
# std[layer]['avgAllFeats'] = std_array
#print(std[layer]['avgAllFeats'])
std[layer]['avgAllFeats'] = (0,0,0) # HACK!
std[layer]['genNumTen'] = (0,0,0) # HACK!
#gender
acc['lstmo']['gen'] = (0.8045,0.6505,0.5949)
std['lstmo']['gen'] = (0.0094,0.0228,0.0106)
#gender with itNoMasc (2nd lang)
acc['lstmo']['genITM'] = (0.8045,0.6191,0.6505,0.5949)
std['lstmo']['genITM'] = (0.0094,0.0175,0.0228,0.0106)
#number
acc['lstmo']['num'] = (0.9413, 0.9463, 0.9278)
std['lstmo']['num'] = (0.0016,0.0036, 0.0050)
#person
acc['lstmo']['Per'] = (0.6777, 0.6727, 0.6888)
std['lstmo']['Per'] = (0.0329, 0.0297, 0.0220)
# tense
acc['lstmo']['Ten'] = (0.9019, 0.8880, 0.8897)
std['lstmo']['Ten'] = (0.0080, 0.0086, 0.0169)
#mood
acc['lstmo']['Moo'] = (0.8182, 0.8070, 0.8041)
std['lstmo']['Moo'] = (0.0067, 0.0126, 0.0240)
#
# all features averaged
layer = 'lstmo'
acc_array = []
for L in range(3):
acc_array.append(np.mean([acc[layer]['gen'][L],acc[layer]['num'][L],acc[layer]['Per'][L],acc[layer]['Ten'][L],acc[layer]['Moo'][L]]))
acc[layer]['avgAllFeats'] = acc_array
print(acc[layer]['avgAllFeats'])
acc_array = []
for L in range(3):
acc_array.append(np.mean([acc[layer]['gen'][L],acc[layer]['num'][L],acc[layer]['Ten'][L]]))
acc[layer]['genNumTen'] = acc_array
print(acc[layer]['genNumTen'])
# std_array = []
# for L in range(3):
# std_array.append(np.mean([std[layer]['gen'][L],std[layer]['num'][L],std[layer]['Per'][L],std[layer]['Ten'][L],std[layer]['Moo'][L]]))
# std[layer]['avgAllFeats'] = std_array
#print(std[layer]['avgAllFeats'])
std[layer]['avgAllFeats'] = (0,0,0) # HACK!
std[layer]['genNumTen'] = (0,0,0) # HACK!
#############
#############
feats = ['gen','num','Per','Ten','Moo','avgAllFeats','genITM','genNumTen']
featNames = ['Gender','Number','Person','Tense','Mood','All 5 Features','Gender', 'All Features']
#for i in range(6):
for i in range(7,8): # only genNumTen
feat = feats[i]
featName = featNames[i]
N = 3 # for: baseline, embedding, lstm-state
if feat == 'genITM':
N = 4
ind = np.arange(N) # the x locations for the groups
width = 0.3 # the width of the bars
fig, ax = plt.subplots()
colors1 = ('#9999FF')
colors2 = ('#0000FF')
#if feat == 'genITM': # use diff color for Fr-ITnoMasc
# colors1 = ('#9999FF','#85e085','#9999FF','#9999FF')
# colors2 = ('#0000FF','#248F24','#0000FF','#0000FF')
rects0 = ax.bar(0.5*width, base[feat], width, color='#FF9900', yerr=baseStd[feat])
rects1 = ax.bar(2.5*width+ind+0.5*width, acc['embed'][feat], width, color=colors1, yerr=std['embed'][feat])
rects2 = ax.bar(2.5*width+ind+0.5*width + width, acc['lstmo'][feat], width, color=colors2, yerr=std['lstmo'][feat])
# add some text for labels, title and axes ticks
ax.set_ylabel('Prediction Accuracy',size=12)
ax.set_title(featName + ' prediction',size=16)
xticks = (np.arange(N+1) + 0.05)
xticks[0] = width/2
#ax.set_xticks(width/2, np.arange(N) + width / 2)
ax.set_xticks(xticks)
ax.set_xticklabels(('majClass', 'FR-IT', 'FR-DE', 'FR-EN'))
if feat == 'genITM':
ax.set_xticklabels(('majClass', 'FR-IT', 'FR-IT*', 'FR-DE', 'FR-EN'))
ax.set_ylim(0.2,1)
ax.legend((rects1[0], rects2[0]), ('Word embedding', 'LSTM state'))
filename = feat + '_byLang.pdf'
plt.savefig(filename, bbox_inches='tight')
#plt.show()
| mit |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/datasets/tests/test_lfw.py | 55 | 7877 | """This test for the LFW require medium-size data downloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| mit |
ottermegazord/ottermegazord.github.io | onexi/data_processing/s07_analyzePred.py | 1 | 4074 | import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import pdb
import sys
plt.style.use("ggplot")
os.chdir("..")
ipath = "./Data/Final_Data/"
ifile1 = "Final_Data_Aug_Pred"
ifile2 = "Final_Data"
opath = "./Data/Final_Data/Neighborhoods/"
imgpath = "./Plots/Neighborhood_TS/"
ext = ".csv"
input_var = raw_input("Run mode (analysis/plot): ")
if input_var == "analysis":
df0 = pd.read_csv(ipath + ifile1 + ext, low_memory=False)
df1 = pd.read_csv(ipath + ifile2 + ext, low_memory=False)
df = pd.concat([df0, df1], axis=1)
df.columns = ['t','x','y','PRED_REG','PRED_NNET','TIME','X','Y','TRUE','NEIGHBORHOOD']
df = df[['TIME', 'X', 'Y', 'NEIGHBORHOOD', 'TRUE', 'PRED_REG', 'PRED_NNET']]
df_2017 = df[df["TIME"] == 2017.0].reset_index(drop=True)
known_sample = len(df[df["TIME"] <= 2017.0])
df_2018 = df.ix[[df.index[i] for i in range(known_sample, known_sample + len(df_2017))]]
df_2019 = df.ix[[df.index[i] for i in range(known_sample + len(df_2017), known_sample + 2*len(df_2017))]]
df_2020 = df.ix[[df.index[i] for i in range(known_sample + 2*len(df_2017), known_sample + 3*len(df_2017))]]
df_2018["TIME"], df_2019["TIME"], df_2020["TIME"] = 2018.0, 2019.0, 2020.0
df_2018 = df_2018[["TIME", "PRED_REG", "PRED_NNET"]].reset_index(drop=True)
df_2018 = pd.concat([df_2018, df_2017], axis=1)
df_2018.columns = ["TIME", "PRED_REG", "PRED_NNET", "t", "X", "Y", "NEIGHBORHOOD", "TRUE", "p1", "p2"]
df_2018 = df_2018[["TIME", "X", "Y", "NEIGHBORHOOD", "TRUE", "PRED_REG", "PRED_NNET"]]
df_2018["TRUE"] = 0
df_2019 = df_2019[["TIME", "PRED_REG", "PRED_NNET"]].reset_index(drop=True)
df_2019 = pd.concat([df_2019, df_2017], axis=1)
df_2019.columns = ["TIME", "PRED_REG", "PRED_NNET", "t", "X", "Y", "NEIGHBORHOOD", "TRUE", "p1", "p2"]
df_2019 = df_2019[["TIME", "X", "Y", "NEIGHBORHOOD", "TRUE", "PRED_REG", "PRED_NNET"]]
df_2019["TRUE"] = 0
df_2020 = df_2020[["TIME", "PRED_REG", "PRED_NNET"]].reset_index(drop=True)
df_2020 = pd.concat([df_2020, df_2017], axis=1)
df_2020.columns = ["TIME", "PRED_REG", "PRED_NNET", "t", "X", "Y", "NEIGHBORHOOD", "TRUE", "p1", "p2"]
df_2020 = df_2020[["TIME", "X", "Y", "NEIGHBORHOOD", "TRUE", "PRED_REG", "PRED_NNET"]]
df_2020["TRUE"] = 0
df_known = df[df["TIME"] <= 2017.0]
df = df_known.append([df_2018, df_2019, df_2020]).reset_index(drop=True)
df['TIME'] = df['TIME'].astype(np.int64)
df.to_csv(ipath + "MapData_Full.csv", index=False)
df2 = df.groupby(["TIME", "NEIGHBORHOOD"]).mean().unstack()
time = df["TIME"].unique().tolist()
nhood = df["NEIGHBORHOOD"].unique().tolist()
nhood = [x for x in nhood if str(x) != 'nan']
for n in nhood:
mean_true, mean_reg, mean_nnet = [], [], []
for t in time:
mean_true.append(df2.loc[t, ("TRUE", n)])
mean_reg.append(df2.loc[t, ("PRED_REG", n)])
mean_nnet.append(df2.loc[t, ("PRED_NNET", n)])
out_df = pd.DataFrame({'TIME': time, 'MEAN_TRUE': mean_true, 'MEAN_REG': mean_reg, 'MEAN_NNET': mean_nnet})
out_df.to_csv(opath + n + ext, index=False)
elif input_var == "plot":
def makePlot(x, true, reg, nnet, xlabel, ylabel, title, filename):
N = 17
ind = np.arange(N)
width = 0.25
plt.bar(ind - width, true, width=0.25, align="center", color='green', label="True Value")
plt.bar(ind, reg, width=0.25, align="center", color="blue", label="Predicted Value (Regression)")
plt.bar(ind + width, nnet, width=0.25, align="center", color="red", label="Predicted Value (Neural Net)")
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.title(title)
plt.xticks(ind + width, map(int, x), fontsize=6)
plt.legend(loc="best")
plt.savefig(filename, bbox_inches="tight", dpi=300)
plt.close()
nhood_files = os.listdir(opath)
for f in nhood_files:
nhood = f[:-4]
df = pd.read_csv(opath + f, low_memory=False)
df["YEAR"] = df["TIME"]
makePlot(x=df["YEAR"].tolist(), true=df["MEAN_TRUE"].tolist(), reg=df["MEAN_REG"].tolist(), nnet=df["MEAN_NNET"].tolist(), ylabel="AVG LAND VALUE ($/sqft)", xlabel="TIME (year)", title=nhood, filename=imgpath + nhood +".png")
| mit |
cjayb/mne-python | examples/inverse/plot_mixed_source_space_inverse.py | 2 | 6574 | """
===================================================================
Compute MNE inverse solution on evoked data in a mixed source space
===================================================================
Create a mixed source space and compute an MNE inverse solution on an
evoked dataset.
"""
# Author: Annalisa Pascarella <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import matplotlib.pyplot as plt
from nilearn import plotting
import mne
from mne.minimum_norm import make_inverse_operator, apply_inverse
# Set dir
data_path = mne.datasets.sample.data_path()
subject = 'sample'
data_dir = op.join(data_path, 'MEG', subject)
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
# Set file names
fname_mixed_src = op.join(bem_dir, '%s-oct-6-mixed-src.fif' % subject)
fname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
fname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)
fname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)
fname_evoked = data_dir + '/sample_audvis-ave.fif'
fname_trans = data_dir + '/sample_audvis_raw-trans.fif'
fname_fwd = data_dir + '/sample_audvis-meg-oct-6-mixed-fwd.fif'
fname_cov = data_dir + '/sample_audvis-shrunk-cov.fif'
###############################################################################
# Set up our source space
# -----------------------
# List substructures we are interested in. We select only the
# sub structures we want to include in the source space:
labels_vol = ['Left-Amygdala',
'Left-Thalamus-Proper',
'Left-Cerebellum-Cortex',
'Brain-Stem',
'Right-Amygdala',
'Right-Thalamus-Proper',
'Right-Cerebellum-Cortex']
###############################################################################
# Get a surface-based source space, here with few source points for speed
# in this demonstration, in general you should use oct6 spacing!
src = mne.setup_source_space(subject, spacing='oct5',
add_dist=False, subjects_dir=subjects_dir)
###############################################################################
# Now we create a mixed src space by adding the volume regions specified in the
# list labels_vol. First, read the aseg file and the source space bounds
# using the inner skull surface (here using 10mm spacing to save time,
# we recommend something smaller like 5.0 in actual analyses):
vol_src = mne.setup_volume_source_space(
subject, mri=fname_aseg, pos=10.0, bem=fname_model,
volume_label=labels_vol, subjects_dir=subjects_dir,
add_interpolator=False, # just for speed, usually this should be True
verbose=True)
# Generate the mixed source space
src += vol_src
# Visualize the source space.
src.plot(subjects_dir=subjects_dir)
n = sum(src[i]['nuse'] for i in range(len(src)))
print('the src space contains %d spaces and %d points' % (len(src), n))
###############################################################################
# Viewing the source space
# ------------------------
# We could write the mixed source space with::
#
# >>> write_source_spaces(fname_mixed_src, src, overwrite=True)
#
# We can also export source positions to nifti file and visualize it again:
nii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject)
src.export_volume(nii_fname, mri_resolution=True, overwrite=True)
plotting.plot_img(nii_fname, cmap='nipy_spectral')
###############################################################################
# Compute the fwd matrix
# ----------------------
fwd = mne.make_forward_solution(
fname_evoked, fname_trans, src, fname_bem,
mindist=5.0, # ignore sources<=5mm from innerskull
meg=True, eeg=False, n_jobs=1)
leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
src_fwd = fwd['src']
n = sum(src_fwd[i]['nuse'] for i in range(len(src_fwd)))
print('the fwd src space contains %d spaces and %d points' % (len(src_fwd), n))
# Load data
condition = 'Left Auditory'
evoked = mne.read_evokeds(fname_evoked, condition=condition,
baseline=(None, 0))
noise_cov = mne.read_cov(fname_cov)
###############################################################################
# Compute inverse solution
# ------------------------
snr = 3.0 # use smaller SNR for raw data
inv_method = 'dSPM' # sLORETA, MNE, dSPM
parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'
loose = dict(surface=0.2, volume=1.)
lambda2 = 1.0 / snr ** 2
inverse_operator = make_inverse_operator(
evoked.info, fwd, noise_cov, depth=None, loose=loose, verbose=True)
stc = apply_inverse(evoked, inverse_operator, lambda2, inv_method,
pick_ori=None)
src = inverse_operator['src']
###############################################################################
# Plot the mixed source estimate
# ------------------------------
# sphinx_gallery_thumbnail_number = 3
initial_time = 0.1
stc_vec = apply_inverse(evoked, inverse_operator, lambda2, inv_method,
pick_ori='vector')
brain = stc_vec.plot(
hemi='both', src=inverse_operator['src'], views='coronal',
initial_time=initial_time, subjects_dir=subjects_dir)
###############################################################################
# Plot the surface
# ----------------
brain = stc.surface().plot(initial_time=initial_time,
subjects_dir=subjects_dir)
###############################################################################
# Plot the volume
# ----------------
fig = stc.volume().plot(initial_time=initial_time, src=src,
subjects_dir=subjects_dir)
###############################################################################
# Process labels
# --------------
# Average the source estimates within each label of the cortical parcellation
# and each sub structure contained in the src space
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(
subject, parc=parc, subjects_dir=subjects_dir)
label_ts = mne.extract_label_time_course(
[stc], labels_parc, src, mode='mean', allow_empty=True)
# plot the times series of 2 labels
fig, axes = plt.subplots(1)
axes.plot(1e3 * stc.times, label_ts[0][0, :], 'k', label='bankssts-lh')
axes.plot(1e3 * stc.times, label_ts[0][71, :].T, 'r', label='Brain-stem')
axes.set(xlabel='Time (ms)', ylabel='MNE current (nAm)')
axes.legend()
mne.viz.tight_layout()
| bsd-3-clause |
blechta/dolfin-tape | dolfintape/demo_problems/pLaplaceAdaptiveSolver.py | 1 | 9447 | # Copyright (C) 2016 Jan Blechta
#
# This file is part of dolfin-tape.
#
# dolfin-tape is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dolfin-tape is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with dolfin-tape. If not, see <http://www.gnu.org/licenses/>.
from dolfin import *
import ufl
import matplotlib.pyplot as plt
import numpy as np
from itertools import chain
from dolfintape import FluxReconstructor
from dolfintape.utils import logn
from dolfintape.cell_diameter import CellDiameters
from dolfintape.poincare import poincare_const
from dolfintape.sobolev_norm import sobolev_norm
__all__ = ['solve_p_laplace_adaptive', 'pLapLaplaceAdaptiveSolver',
'geometric_progression']
def solve_p_laplace_adaptive(p, criterion, V, f, df, u_ex=None,
eps0=1.0, eps_decrease=0.1**0.5,
solver_parameters=None):
"""Approximate p-Laplace problem with rhs of the form
(f, v) - (df, grad(v)) with test function v
on initial space V. Compute adaptively in regularization parameter
and refine mesh adaptively until
criterion = lambda u_h, Est_h, Est_eps, Est_tot, Est_up: bool
returns True. Return u."""
if isinstance(p, Constant):
q = Constant(float(p)/(float(p)-1.0))
else:
q = p/(p-1)
eps = geometric_progression(eps0, eps_decrease)
solver = pLaplaceAdaptiveSolver(p, q, f, df, u_ex)
return solver.solve(V, criterion, eps, solver_parameters)
def geometric_progression(a0, q):
"""Return function returning generator taking values
of geometric progression:
a0,
a0*q,
a0*q**2,
a0*q**3,
...
"""
def generator():
_a0 = a0
while True:
yield _a0
_a0 *= q
return generator
class pLaplaceAdaptiveSolver(object):
"""Adaptive solver for p-Laplace problem with spatial adaptivity and
adaptivity in regularization parameter.
"""
def __init__(self, p, q, f, df, exact_solution=None):
"""Adaptive solver for p-Laplace problem (q should be dual exponent
q = p/(p-1)) with rhs of the form
(f, v) - (df, grad(v)) with test function v.
"""
assert np.allclose(1.0/float(p) + 1.0/float(q), 1.0), \
"Expected conjugate Lebesgue exponents " \
"p, q (of type int, float, or Constatnt)!"
self.p = p
self.q = q
self.f = f
self.df = df
self.u_ex = exact_solution
self.boundary = CompiledSubDomain("on_boundary")
def solve(self, V, criterion, eps, solver_parameters=None):
"""Start on initial function space V, refine adaptively mesh and
regularization parameter provided by decreasing generator eps
until
criterion = lambda u_h, Est_h, Est_eps, Est_tot, Est_up: bool
returns True. Return found approximation."""
p = float(self.p)
u = Function(V)
while True:
logn(25, 'Adapting mesh (space dimension %s): ' % V.dim())
result = self._adapt_eps(criterion, u, eps, solver_parameters)
u, est_h, est_eps, est_tot, Est_h, Est_eps, Est_tot, Est_up = result
# Check convergence
log(25, 'Estimators h, eps, tot, up: %s' % (result[4:],))
log(25, r'||\nabla u_h||_p^{p-1} = %s' % sobolev_norm(u, p)**(p-1.0))
if criterion(u, Est_h, Est_eps, Est_tot, Est_up):
break
# Refine mesh
markers = self.estimator_to_markers(est_h, p/(p-1.0), fraction=0.5)
log(25, 'Marked %s of %s cells for refinement'
% (sum(markers), markers.mesh().num_cells()))
adapt(V.mesh(), markers)
mesh = V.mesh().child()
adapt(u, mesh)
u = u.child()
V = u.function_space()
return u
def _solve(self, eps, u, reconstructor, P0, solver_parameters):
"""Find approximate solution with fixed eps and mesh. Use
reconstructor to reconstruct the flux and estimate errors.
"""
p, q = self.p, self.q
f, df = self.f, self.df
boundary = self.boundary
exact_solution = self.u_ex
V = u.function_space()
mesh = V.mesh()
dx = Measure('dx', domain=mesh)
eps = Constant(eps)
# Problem formulation
S = inner(grad(u), grad(u))**(p/2-1) * grad(u) + df
S_eps = (eps + inner(grad(u), grad(u)))**(p/2-1) * grad(u) + df
v = TestFunction(V)
F_eps = ( inner(S_eps, grad(v)) - f*v ) * dx
bc = DirichletBC(V, exact_solution if exact_solution else 0.0, boundary)
# Solve
solve(F_eps == 0, u, bc, solver_parameters=solver_parameters or {})
# Reconstruct flux q in H^q(div) s.t.
# q ~ -S
# div q ~ f
Q = reconstructor.reconstruct(S, f).sub(0, deepcopy=False)
# Compute error estimate using equilibrated stress reconstruction
v = TestFunction(P0)
h = CellDiameters(mesh)
Cp = Constant(poincare_const(mesh.type(), p))
est0 = assemble(((Cp*h*(f-div(Q)))**2)**(0.5*q)*v*dx)
est1 = assemble(inner(S_eps+Q, S_eps+Q)**(0.5*q)*v*dx)
est2 = assemble(inner(S_eps-S, S_eps-S)**(0.5*q)*v*dx)
q = float(q)
est_h = est0.array()**(1.0/q) + est1.array()**(1.0/q)
est_eps = est2.array()**(1.0/q)
est_tot = est_h + est_eps
Est_h = MPI.sum( mesh.mpi_comm(), (est_h **q).sum() )**(1.0/q)
Est_eps = MPI.sum( mesh.mpi_comm(), (est_eps**q).sum() )**(1.0/q)
Est_tot = MPI.sum( mesh.mpi_comm(), (est_tot**q).sum() )**(1.0/q)
# Wrap arrays as cell functions
est_h = self.vecarray_to_cellfunction(est_h, P0)
est_eps = self.vecarray_to_cellfunction(est_eps, P0)
est_tot = self.vecarray_to_cellfunction(est_tot, P0)
# Upper estimate using exact solution
if exact_solution:
S_exact = ufl.replace(S, {u: exact_solution})
Est_up = sobolev_norm(S-S_exact, q, k=0)
else:
Est_up = None
log(18, 'Error estimates: overall %g, discretization %g, '
'regularization %g, estimate_up %s'
% (Est_tot, Est_h, Est_eps, Est_up))
return u, est_h, est_eps, est_tot, Est_h, Est_eps, Est_tot, Est_up
def _adapt_eps(self, criterion, u, epsilons, solver_parameters):
"""Solve adaptively in eps on fixed space (given by u) until
criterion = lambda None, Est_h, Est_eps, Est_tot, Est_up: bool
return True. Notice None supplied instead of u_h, thus not taking
discretization error criterion into account.
"""
# Prepare flux reconstructor and P0 space
log(25, 'Initializing flux reconstructor')
reconstructor = FluxReconstructor(u.function_space().mesh(),
u.function_space().ufl_element().degree())
P0 = FunctionSpace(u.function_space().mesh(),
'Discontinuous Lagrange', 0)
# Adapt regularization
logn(25, 'Adapting regularization')
for eps in epsilons():
debug('Regularizing using eps = %s' % eps)
logn(25, '.')
result = self._solve(eps, u, reconstructor, P0, solver_parameters)
u, est_h, est_eps, est_tot, Est_h, Est_eps, Est_tot, Est_up = result
if criterion(None, Est_h, Est_eps, Est_tot, Est_up):
break
log(25, '')
return result
@staticmethod
def estimator_to_markers(est, q, cf=None, fraction=0.5):
"""Take double CellFunction and convert it to bool cell function
using Dorfler marking strategy.
"""
assert isinstance(est, CellFunctionDouble)
if cf is None:
cf = CellFunction('bool', est.mesh())
else:
assert isinstance(cf, CellFunctionBool)
# Take appropriate powers (operating on a copy)
_est = MeshFunction('double', est)
np.abs(_est.array(), out=_est.array())
_est.array()[:] **= q
# Call Dorfler marking
not_working_in_parallel("Dorfler marking strategy")
dorfler_mark(cf, _est, fraction)
return cf
@staticmethod
def vecarray_to_cellfunction(arr, space, cf=None):
"""Convert numpy array coming from function.vector().array() for
P0 function to CellFunction, optionally existing cf.
"""
assert space.ufl_element().family() == "Discontinuous Lagrange"
assert space.ufl_element().degree() == 0
assert space.ufl_element().value_shape() == ()
if cf is None:
cf = CellFunction('double', space.mesh())
else:
assert isinstance(cf, CellFunctionDouble)
cell_dofs = space.dofmap().cell_dofs
for c in cells(space.mesh()):
cf[c] = arr[cell_dofs(c.index())[0]]
return cf
| gpl-3.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/dask/array/numpy_compat.py | 6 | 13707 | from __future__ import absolute_import, division, print_function
from ..compatibility import builtins
import numpy as np
import warnings
try:
isclose = np.isclose
except AttributeError:
def isclose(*args, **kwargs):
raise RuntimeError("You need numpy version 1.7 or greater to use "
"isclose.")
try:
full = np.full
except AttributeError:
def full(shape, fill_value, dtype=None, order=None):
"""Our implementation of numpy.full because your numpy is old."""
if order is not None:
raise NotImplementedError("`order` kwarg is not supported upgrade "
"to Numpy 1.8 or greater for support.")
return np.multiply(fill_value, np.ones(shape, dtype=dtype),
dtype=dtype)
# Taken from scikit-learn:
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/fixes.py#L84
try:
with warnings.catch_warnings():
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float)) or
not np.allclose(np.divide(1, .5, dtype='i8'), 2) or
not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Divide with dtype doesn't work on Python 3
def divide(x1, x2, out=None, dtype=None):
"""Implementation of numpy.divide that works with dtype kwarg.
Temporary compatibility fix for a bug in numpy's version. See
https://github.com/numpy/numpy/issues/3484 for the relevant issue."""
x = np.divide(x1, x2, out)
if dtype is not None:
x = x.astype(dtype)
return x
# functions copied from numpy
try:
from numpy import broadcast_to, nanprod, nancumsum, nancumprod
except ImportError: # pragma: no cover
# these functions should arrive in numpy v1.10 to v1.12. Until then,
# they are duplicated here
# See https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array
def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = np.array(array, copy=False, subok=subok)
if not shape and array.shape:
raise ValueError('cannot broadcast a non-scalar to a scalar array')
if builtins.any(size < 0 for size in shape):
raise ValueError('all elements of broadcast shape must be non-'
'negative')
broadcast = np.nditer(
(array,), flags=['multi_index', 'zerosize_ok', 'refs_ok'],
op_flags=['readonly'], itershape=shape, order='C').itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
if not readonly and array.flags.writeable:
result.flags.writeable = True
return result
def broadcast_to(array, shape, subok=False):
"""Broadcast an array to a new shape.
Parameters
----------
array : array_like
The array to broadcast.
shape : tuple
The shape of the desired array.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
ValueError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> np.broadcast_to(x, (3, 3)) # doctest: +SKIP
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
return _broadcast_to(array, shape, subok=subok, readonly=True)
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.
Note that scalars will end up as array scalars, which is important
for using the result as the value of the out argument in some
operations.
Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.
Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, None}
If `a` is of inexact type, return a boolean mask marking locations of
NaNs, otherwise return None.
"""
is_new = not isinstance(a, np.ndarray)
if is_new:
a = np.array(a)
if not issubclass(a.dtype.type, np.inexact):
return a, None
if not is_new:
# need copy
a = np.array(a, subok=True)
mask = np.isnan(a)
np.copyto(a, val, where=mask)
return a, mask
def nanprod(a, axis=None, dtype=None, out=None, keepdims=0):
"""
Return the product of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
One is returned for slices that are all-NaN or empty.
.. versionadded:: 1.10.0
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the product is computed. The default is to compute
the product of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
keepdims : bool, optional
If True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the original `arr`.
Returns
-------
y : ndarray or numpy scalar
See Also
--------
numpy.prod : Product across array propagating NaNs.
isnan : Show which elements are NaN.
Notes
-----
Numpy integer arithmetic is modular. If the size of a product exceeds
the size of an integer accumulator, its value will wrap around and the
result will be incorrect. Specifying ``dtype=double`` can alleviate
that problem.
Examples
--------
>>> np.nanprod(1)
1
>>> np.nanprod([1])
1
>>> np.nanprod([1, np.nan])
1.0
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanprod(a)
6.0
>>> np.nanprod(a, axis=0)
array([ 3., 2.])
"""
a, mask = _replace_nan(a, 1)
return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nancumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are
encountered and leading NaNs are replaced by zeros.
Zeros are returned for slices that are all-NaN or empty.
.. versionadded:: 1.12.0
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
nancumsum : ndarray.
A new array holding the result is returned unless `out` is
specified, in which it is returned. The result has the same
size as `a`, and the same shape as `a` if `axis` is not None
or `a` is a 1-d array.
See Also
--------
numpy.cumsum : Cumulative sum across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nancumsum(1) #doctest: +SKIP
array([1])
>>> np.nancumsum([1]) #doctest: +SKIP
array([1])
>>> np.nancumsum([1, np.nan]) #doctest: +SKIP
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumsum(a) #doctest: +SKIP
array([ 1., 3., 6., 6.])
>>> np.nancumsum(a, axis=0) #doctest: +SKIP
array([[ 1., 2.],
[ 4., 2.]])
>>> np.nancumsum(a, axis=1) #doctest: +SKIP
array([[ 1., 3.],
[ 3., 3.]])
"""
a, mask = _replace_nan(a, 0)
return np.cumsum(a, axis=axis, dtype=dtype, out=out)
def nancumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of array elements over a given axis treating Not a
Numbers (NaNs) as one. The cumulative product does not change when NaNs are
encountered and leading NaNs are replaced by ones.
Ones are returned for slices that are all-NaN or empty.
.. versionadded:: 1.12.0
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
nancumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case it is returned.
See Also
--------
numpy.cumprod : Cumulative product across array propagating NaNs.
isnan : Show which elements are NaN.
Examples
--------
>>> np.nancumprod(1) #doctest: +SKIP
array([1])
>>> np.nancumprod([1]) #doctest: +SKIP
array([1])
>>> np.nancumprod([1, np.nan]) #doctest: +SKIP
array([ 1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumprod(a) #doctest: +SKIP
array([ 1., 2., 6., 6.])
>>> np.nancumprod(a, axis=0) #doctest: +SKIP
array([[ 1., 2.],
[ 3., 2.]])
>>> np.nancumprod(a, axis=1) #doctest: +SKIP
array([[ 1., 2.],
[ 3., 3.]])
"""
a, mask = _replace_nan(a, 1)
return np.cumprod(a, axis=axis, dtype=dtype, out=out)
| mit |
pnedunuri/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
marcusmueller/gnuradio | gr-fec/python/fec/polar/channel_construction_bec.py | 7 | 8225 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import numpy as np
from . import helper_functions as hf
def bec_channel(eta):
'''
binary erasure channel (BEC)
for each y e Y
W(y|0) * W(y|1) = 0 or W(y|0) = W(y|1)
transitions are 1 -> 1 or 0 -> 0 or {0, 1} -> ? (erased symbol)
'''
# looks like BSC but should be interpreted differently.
w = np.array((1 - eta, eta, 1 - eta), dtype=float)
return w
def odd_rec(iwn):
return iwn ** 2
def even_rec(iwn):
return 2 * iwn - iwn ** 2
def calc_one_recursion(iw0):
iw1 = np.zeros(2 * len(iw0)) # double values
for i in range(len(iw0)):
# careful indices screw you because paper is '1' based :(
iw1[2 * i] = odd_rec(iw0[i])
iw1[2 * i + 1] = even_rec(iw0[i])
return iw1
def calculate_bec_channel_capacities_loop(initial_channel, block_power):
# compare [0, Arikan] eq. 6
iw = np.array([initial_channel, ], dtype=float)
for i in range(block_power):
iw = calc_one_recursion(iw)
return iw
def calc_vector_capacities_one_recursion(iw0):
degraded = odd_rec(iw0)
upgraded = even_rec(iw0)
iw1 = np.empty(2 * len(iw0), dtype=degraded.dtype)
iw1[0::2] = degraded
iw1[1::2] = upgraded
return iw1
def calculate_bec_channel_capacities_vector(initial_channel, block_power):
# compare [0, Arikan] eq. 6
# this version is ~ 180 times faster than the loop version with 2**22 synthetic channels
iw = np.array([initial_channel, ], dtype=float)
for i in range(block_power):
iw = calc_vector_capacities_one_recursion(iw)
return iw
def calculate_bec_channel_capacities(eta, block_size):
# compare [0, Arikan] eq. 6
iw = 1 - eta # holds for BEC as stated in paper
lw = hf.power_of_2_int(block_size)
return calculate_bec_channel_capacities_vector(iw, lw)
def calculate_z_parameters_one_recursion(z_params):
z_next = np.empty(2 * z_params.size, dtype=z_params.dtype)
z_sq = z_params ** 2
z_low = 2 * z_params - z_sq
z_next[0::2] = z_low
z_next[1::2] = z_sq
return z_next
def calculate_bec_channel_z_parameters(eta, block_size):
# compare [0, Arikan] eq. 38
block_power = hf.power_of_2_int(block_size)
z_params = np.array([eta, ], dtype=float)
for block_size in range(block_power):
z_params = calculate_z_parameters_one_recursion(z_params)
return z_params
def design_snr_to_bec_eta(design_snr):
# minimum design snr = -1.5917 corresponds to BER = 0.5
s = 10. ** (design_snr / 10.)
return np.exp(-s)
def bhattacharyya_bounds(design_snr, block_size):
'''
Harish Vangala, Emanuele Viterbo, Yi Hong: 'A Comparative Study of Polar Code Constructions for the AWGN Channel', 2015
In this paper it is called Bhattacharyya bounds channel construction and is abbreviated PCC-0
Best design SNR for block_size = 2048, R = 0.5, is 0dB.
Compare with Arikan: 'Channel Polarization: A Method for Constructing Capacity-Achieving Codes for Symmetric Binary-Input Memoryless Channels.
Proposition 5. inequalities turn into equalities for BEC channel. Otherwise they represent an upper bound.
Also compare [0, Arikan] eq. 6 and 38
For BEC that translates to capacity(i) = 1 - bhattacharyya(i)
:return Z-parameters in natural bit-order. Choose according to desired rate.
'''
eta = design_snr_to_bec_eta(design_snr)
return calculate_bec_channel_z_parameters(eta, block_size)
def plot_channel_capacities(capacity, save_file=None):
block_size = len(capacity)
try:
import matplotlib.pyplot as plt
# FUN with matplotlib LaTeX fonts! http://matplotlib.org/users/usetex.html
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('figure', autolayout=True)
plt.plot(capacity)
plt.xlim([0, block_size])
plt.ylim([-0.01, 1.01])
plt.xlabel('synthetic channel number')
plt.ylabel('channel capacity')
# plt.title('BEC channel construction')
plt.grid()
plt.gcf().set_size_inches(plt.gcf().get_size_inches() * .5)
if save_file:
plt.savefig(save_file)
plt.show()
except ImportError:
pass # only plot in case matplotlib is installed
def plot_average_channel_distance(save_file=None):
eta = 0.5 # design_snr_to_bec_eta(-1.5917)
powers = np.arange(4, 26)
try:
import matplotlib.pyplot as plt
import matplotlib
# FUN with matplotlib LaTeX fonts! http://matplotlib.org/users/usetex.html
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('figure', autolayout=True)
dist = []
medians = []
initial_channel = 1 - eta
for p in powers:
bs = int(2 ** p)
capacities = calculate_bec_channel_capacities(eta, bs)
avg_capacity = np.repeat(initial_channel, len(capacities))
averages = np.abs(capacities - avg_capacity)
avg_distance = np.sum(averages) / float(len(capacities))
dist.append(avg_distance)
variance = np.std(averages)
medians.append(variance)
plt.errorbar(powers, dist, yerr=medians)
plt.grid()
plt.xlabel(r'block size $N$')
plt.ylabel(r'$\frac{1}{N} \sum_i |I(W_N^{(i)}) - 0.5|$')
axes = plt.axes()
tick_values = np.array(axes.get_xticks().tolist())
tick_labels = np.array(tick_values, dtype=int)
tick_labels = ['$2^{' + str(i) + '}$' for i in tick_labels]
plt.xticks(tick_values, tick_labels)
plt.xlim((powers[0], powers[-1]))
plt.ylim((0.2, 0.5001))
plt.gcf().set_size_inches(plt.gcf().get_size_inches() * .5)
if save_file:
plt.savefig(save_file)
plt.show()
except ImportError:
pass
def plot_capacity_histogram(design_snr, save_file=None):
eta = design_snr_to_bec_eta(design_snr)
# capacities = calculate_bec_channel_capacities(eta, block_size)
try:
import matplotlib.pyplot as plt
# FUN with matplotlib LaTeX fonts! http://matplotlib.org/users/usetex.html
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('figure', autolayout=True)
block_sizes = [32, 128, 512]
for b in block_sizes:
capacities = calculate_bec_channel_capacities(eta, b)
w = 1. / float(len(capacities))
weights = [w, ] * b
plt.hist(capacities, bins=b, weights=weights, range=(0.95, 1.0))
plt.grid()
plt.xlabel('synthetic channel capacity')
plt.ylabel('normalized item count')
print(plt.gcf().get_size_inches())
plt.gcf().set_size_inches(plt.gcf().get_size_inches() * .5)
if save_file:
plt.savefig(save_file)
plt.show()
except ImportError:
pass
def main():
print('channel construction main')
n = 11
block_size = int(2 ** n)
design_snr = -1.59
eta = design_snr_to_bec_eta(design_snr)
# print(calculate_bec_channel_z_parameters(eta, block_size))
# capacity = calculate_bec_channel_capacities(eta, block_size)
# plot_average_channel_distance()
calculate_bec_channel_z_parameters(eta, block_size)
if __name__ == '__main__':
main()
| gpl-3.0 |
kernc/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
marqh/iris | docs/iris/example_code/Meteorology/COP_maps.py | 3 | 5588 | """
Global average annual temperature maps
======================================
Produces maps of global temperature forecasts from the A1B and E1 scenarios.
The data used comes from the HadGEM2-AO model simulations for the A1B and E1
scenarios, both of which were derived using the IMAGE Integrated Assessment
Model (Johns et al. 2011; Lowe et al. 2009).
References
----------
Johns T.C., et al. (2011) Climate change under aggressive mitigation: the
ENSEMBLES multi-model experiment. Climate Dynamics, Vol 37, No. 9-10,
doi:10.1007/s00382-011-1005-5.
Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F.
Royer, and P. van der Linden, 2009. New Study For Climate Modeling,
Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21,
doi:10.1029/2009EO210001.
"""
from six.moves import zip
import os.path
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.coords as coords
import iris.plot as iplt
def cop_metadata_callback(cube, field, filename):
"""
A function which adds an "Experiment" coordinate which comes from the
filename.
"""
# Extract the experiment name (such as a1b or e1) from the filename (in
# this case it is just the parent folder's name)
containing_folder = os.path.dirname(filename)
experiment_label = os.path.basename(containing_folder)
# Create a coordinate with the experiment label in it
exp_coord = coords.AuxCoord(experiment_label, long_name='Experiment',
units='no_unit')
# and add it to the cube
cube.add_aux_coord(exp_coord)
def main():
# Load e1 and a1 using the callback to update the metadata
e1 = iris.load_cube(iris.sample_data_path('E1.2098.pp'),
callback=cop_metadata_callback)
a1b = iris.load_cube(iris.sample_data_path('A1B.2098.pp'),
callback=cop_metadata_callback)
# Load the global average data and add an 'Experiment' coord it
global_avg = iris.load_cube(iris.sample_data_path('pre-industrial.pp'))
# Define evenly spaced contour levels: -2.5, -1.5, ... 15.5, 16.5 with the
# specific colours
levels = np.arange(20) - 2.5
red = np.array([0, 0, 221, 239, 229, 217, 239, 234, 228, 222, 205, 196,
161, 137, 116, 89, 77, 60, 51]) / 256.
green = np.array([16, 217, 242, 243, 235, 225, 190, 160, 128, 87, 72, 59,
33, 21, 29, 30, 30, 29, 26]) / 256.
blue = np.array([255, 255, 243, 169, 99, 51, 63, 37, 39, 21, 27, 23, 22,
26, 29, 28, 27, 25, 22]) / 256.
# Put those colours into an array which can be passed to contourf as the
# specific colours for each level
colors = np.array([red, green, blue]).T
# Subtract the global
# Iterate over each latitude longitude slice for both e1 and a1b scenarios
# simultaneously
for e1_slice, a1b_slice in zip(e1.slices(['latitude', 'longitude']),
a1b.slices(['latitude', 'longitude'])):
time_coord = a1b_slice.coord('time')
# Calculate the difference from the mean
delta_e1 = e1_slice - global_avg
delta_a1b = a1b_slice - global_avg
# Make a wider than normal figure to house two maps side-by-side
fig = plt.figure(figsize=(12, 5))
# Get the time datetime from the coordinate
time = time_coord.units.num2date(time_coord.points[0])
# Set a title for the entire figure, giving the time in a nice format
# of "MonthName Year". Also, set the y value for the title so that it
# is not tight to the top of the plot.
fig.suptitle(
'Annual Temperature Predictions for ' + time.strftime("%Y"),
y=0.9,
fontsize=18)
# Add the first subplot showing the E1 scenario
plt.subplot(121)
plt.title('HadGEM2 E1 Scenario', fontsize=10)
iplt.contourf(delta_e1, levels, colors=colors, linewidth=0,
extend='both')
plt.gca().coastlines()
# get the current axes' subplot for use later on
plt1_ax = plt.gca()
# Add the second subplot showing the A1B scenario
plt.subplot(122)
plt.title('HadGEM2 A1B-Image Scenario', fontsize=10)
contour_result = iplt.contourf(delta_a1b, levels, colors=colors,
linewidth=0, extend='both')
plt.gca().coastlines()
# get the current axes' subplot for use later on
plt2_ax = plt.gca()
# Now add a colourbar who's leftmost point is the same as the leftmost
# point of the left hand plot and rightmost point is the rightmost
# point of the right hand plot
# Get the positions of the 2nd plot and the left position of the 1st
# plot
left, bottom, width, height = plt2_ax.get_position().bounds
first_plot_left = plt1_ax.get_position().bounds[0]
# the width of the colorbar should now be simple
width = left - first_plot_left + width
# Add axes to the figure, to place the colour bar
colorbar_axes = fig.add_axes([first_plot_left, bottom + 0.07,
width, 0.03])
# Add the colour bar
cbar = plt.colorbar(contour_result, colorbar_axes,
orientation='horizontal')
# Label the colour bar and add ticks
cbar.set_label(e1_slice.units)
cbar.ax.tick_params(length=0)
iplt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
dmitriz/zipline | zipline/assets/assets.py | 1 | 29230 | # Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
from numbers import Integral
from operator import itemgetter
import warnings
from logbook import Logger
import numpy as np
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import with_metaclass, string_types, viewkeys
import sqlalchemy as sa
from toolz import compose
from zipline.errors import (
MultipleSymbolsFound,
RootSymbolNotFound,
SidNotFound,
SymbolNotFound,
MapAssetIdentifierIndexError,
)
from zipline.assets import (
Asset, Equity, Future,
)
from zipline.assets.asset_writer import (
split_delimited_symbol,
check_version_info,
ASSET_DB_VERSION,
asset_db_table_names,
)
log = Logger('assets.py')
# A set of fields that need to be converted to strings before building an
# Asset to avoid unicode fields
_asset_str_fields = frozenset({
'symbol',
'asset_name',
'exchange',
})
# A set of fields that need to be converted to timestamps in UTC
_asset_timestamp_fields = frozenset({
'start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date',
})
def _convert_asset_timestamp_fields(dict):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key in (_asset_timestamp_fields & viewkeys(dict)):
value = pd.Timestamp(dict[key], tz='UTC')
dict[key] = None if pd.isnull(value) else value
class AssetFinder(object):
# Token used as a substitute for pickling objects that contain a
# reference to an AssetFinder
PERSISTENT_TOKEN = "<AssetFinder>"
def __init__(self, engine):
self.engine = engine
metadata = sa.MetaData(bind=engine)
metadata.reflect(only=asset_db_table_names)
for table_name in asset_db_table_names:
setattr(self, table_name, metadata.tables[table_name])
# Check the version info of the db for compatibility
check_version_info(self.version_info, ASSET_DB_VERSION)
# Cache for lookup of assets by sid, the objects in the asset lookup
# may be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset, _retrieve_equity etc. will populate the cache on
# first retrieval.
self._asset_cache = {}
self._equity_cache = {}
self._future_cache = {}
self._asset_type_cache = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = None
def asset_type_by_sid(self, sid):
"""
Retrieve the asset type of a given sid.
"""
try:
return self._asset_type_cache[sid]
except KeyError:
pass
asset_type = sa.select((self.asset_router.c.asset_type,)).where(
self.asset_router.c.sid == int(sid),
).scalar()
if asset_type is not None:
self._asset_type_cache[sid] = asset_type
return asset_type
def retrieve_asset(self, sid, default_none=False):
"""
Retrieve the Asset object of a given sid.
"""
if isinstance(sid, Asset):
return sid
try:
asset = self._asset_cache[sid]
except KeyError:
asset_type = self.asset_type_by_sid(sid)
if asset_type == 'equity':
asset = self._retrieve_equity(sid)
elif asset_type == 'future':
asset = self._retrieve_futures_contract(sid)
else:
asset = None
# Cache the asset if it has been retrieved
if asset is not None:
self._asset_cache[sid] = asset
if asset is not None:
return asset
elif default_none:
return None
else:
raise SidNotFound(sid=sid)
def retrieve_all(self, sids, default_none=False):
return [self.retrieve_asset(sid, default_none) for sid in sids]
def _retrieve_equity(self, sid):
"""
Retrieve the Equity object of a given sid.
"""
return self._retrieve_asset(
sid, self._equity_cache, self.equities, Equity,
)
def _retrieve_futures_contract(self, sid):
"""
Retrieve the Future object of a given sid.
"""
return self._retrieve_asset(
sid, self._future_cache, self.futures_contracts, Future,
)
@staticmethod
def _select_asset_by_sid(asset_tbl, sid):
return sa.select([asset_tbl]).where(asset_tbl.c.sid == int(sid))
@staticmethod
def _select_asset_by_symbol(asset_tbl, symbol):
return sa.select([asset_tbl]).where(asset_tbl.c.symbol == symbol)
def _retrieve_asset(self, sid, cache, asset_tbl, asset_type):
try:
return cache[sid]
except KeyError:
pass
data = self._select_asset_by_sid(asset_tbl, sid).execute().fetchone()
# Convert 'data' from a RowProxy object to a dict, to allow assignment
data = dict(data.items())
if data:
_convert_asset_timestamp_fields(data)
asset = asset_type(**data)
else:
asset = None
cache[sid] = asset
return asset
def _get_fuzzy_candidates(self, fuzzy_symbol):
candidates = sa.select(
(self.equities.c.sid,)
).where(self.equities.c.fuzzy_symbol == fuzzy_symbol).order_by(
self.equities.c.start_date.desc(),
self.equities.c.end_date.desc()
).execute().fetchall()
return candidates
def _get_fuzzy_candidates_in_range(self, fuzzy_symbol, ad_value):
candidates = sa.select(
(self.equities.c.sid,)
).where(
sa.and_(
self.equities.c.fuzzy_symbol == fuzzy_symbol,
self.equities.c.start_date <= ad_value,
self.equities.c.end_date >= ad_value
)
).order_by(
self.equities.c.start_date.desc(),
self.equities.c.end_date.desc(),
).execute().fetchall()
return candidates
def _get_split_candidates_in_range(self,
company_symbol,
share_class_symbol,
ad_value):
candidates = sa.select(
(self.equities.c.sid,)
).where(
sa.and_(
self.equities.c.company_symbol == company_symbol,
self.equities.c.share_class_symbol == share_class_symbol,
self.equities.c.start_date <= ad_value,
self.equities.c.end_date >= ad_value
)
).order_by(
self.equities.c.start_date.desc(),
self.equities.c.end_date.desc(),
).execute().fetchall()
return candidates
def _get_split_candidates(self, company_symbol, share_class_symbol):
candidates = sa.select(
(self.equities.c.sid,)
).where(
sa.and_(
self.equities.c.company_symbol == company_symbol,
self.equities.c.share_class_symbol == share_class_symbol
)
).order_by(
self.equities.c.start_date.desc(),
self.equities.c.end_date.desc(),
).execute().fetchall()
return candidates
def _resolve_no_matching_candidates(self,
company_symbol,
share_class_symbol,
ad_value):
candidates = sa.select((self.equities.c.sid,)).where(
sa.and_(
self.equities.c.company_symbol == company_symbol,
self.equities.c.share_class_symbol ==
share_class_symbol,
self.equities.c.start_date <= ad_value),
).order_by(
self.equities.c.end_date.desc(),
).execute().fetchall()
return candidates
def _get_best_candidate(self, candidates):
return self._retrieve_equity(candidates[0]['sid'])
def _get_equities_from_candidates(self, candidates):
return list(map(
compose(self._retrieve_equity, itemgetter('sid')),
candidates,
))
def lookup_symbol(self, symbol, as_of_date, fuzzy=False):
"""
Return matching Equity of name symbol in database.
If multiple Equities are found and as_of_date is not set,
raises MultipleSymbolsFound.
If no Equity was active at as_of_date raises SymbolNotFound.
"""
company_symbol, share_class_symbol, fuzzy_symbol = \
split_delimited_symbol(symbol)
if as_of_date:
# Format inputs
as_of_date = pd.Timestamp(normalize_date(as_of_date))
ad_value = as_of_date.value
if fuzzy:
# Search for a single exact match on the fuzzy column
candidates = self._get_fuzzy_candidates_in_range(fuzzy_symbol,
ad_value)
# If exactly one SID exists for fuzzy_symbol, return that sid
if len(candidates) == 1:
return self._get_best_candidate(candidates)
# Search for exact matches of the split-up company_symbol and
# share_class_symbol
candidates = self._get_split_candidates_in_range(
company_symbol,
share_class_symbol,
ad_value
)
# If exactly one SID exists for symbol, return that symbol
# If multiple SIDs exist for symbol, return latest start_date with
# end_date as a tie-breaker
if candidates:
return self._get_best_candidate(candidates)
# If no SID exists for symbol, return SID with the
# highest-but-not-over end_date
elif not candidates:
candidates = self._resolve_no_matching_candidates(
company_symbol,
share_class_symbol,
ad_value
)
if candidates:
return self._get_best_candidate(candidates)
raise SymbolNotFound(symbol=symbol)
else:
# If this is a fuzzy look-up, check if there is exactly one match
# for the fuzzy symbol
if fuzzy:
candidates = self._get_fuzzy_candidates(fuzzy_symbol)
if len(candidates) == 1:
return self._get_best_candidate(candidates)
candidates = self._get_split_candidates(company_symbol,
share_class_symbol)
if len(candidates) == 1:
return self._get_best_candidate(candidates)
elif not candidates:
raise SymbolNotFound(symbol=symbol)
else:
raise MultipleSymbolsFound(
symbol=symbol,
options=self._get_equities_from_candidates(candidates)
)
def lookup_future_symbol(self, symbol):
""" Return the Future object for a given symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
Future
A Future object.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\
.execute().fetchone()
# If no data found, raise an exception
if not data:
raise SymbolNotFound(symbol=symbol)
# If we find a contract, check whether it's been cached
try:
return self._future_cache[data['sid']]
except KeyError:
pass
# Build the Future object from its parameters
data = dict(data.items())
_convert_asset_timestamp_fields(data)
future = Future(**data)
# Cache the Future object.
self._future_cache[data['sid']] = future
return future
def lookup_future_chain(self, root_symbol, as_of_date):
""" Return the futures chain for a given root symbol.
Parameters
----------
root_symbol : str
Root symbol of the desired future.
as_of_date : pd.Timestamp or pd.NaT
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date/expiration date is first
after this date is the primary contract, etc. If NaT is
given, the chain is unbounded, and all contracts for this
root symbol are returned.
Returns
-------
list
A list of Future objects, the chain for the given
parameters.
Raises
------
RootSymbolNotFound
Raised when a future chain could not be found for the given
root symbol.
"""
fc_cols = self.futures_contracts.c
if as_of_date is pd.NaT:
# If the as_of_date is NaT, get all contracts for this
# root symbol.
sids = list(map(
itemgetter('sid'),
sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol),
).order_by(
fc_cols.notice_date.asc(),
).execute().fetchall()))
else:
as_of_date = as_of_date.value
sids = list(map(
itemgetter('sid'),
sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol) &
# Filter to contracts that are still valid. If both
# exist, use the one that comes first in time (i.e.
# the lower value). If either notice_date or
# expiration_date is NaT, use the other. If both are
# NaT, the contract cannot be included in any chain.
sa.case(
[
(
fc_cols.notice_date == pd.NaT.value,
fc_cols.expiration_date >= as_of_date
),
(
fc_cols.expiration_date == pd.NaT.value,
fc_cols.notice_date >= as_of_date
)
],
else_=(
sa.func.min(
fc_cols.notice_date,
fc_cols.expiration_date
) >= as_of_date
)
)
).order_by(
# Sort using expiration_date if valid. If it's NaT,
# use notice_date instead.
sa.case(
[
(
fc_cols.expiration_date == pd.NaT.value,
fc_cols.notice_date
)
],
else_=fc_cols.expiration_date
).asc()
).execute().fetchall()
))
if not sids:
# Check if root symbol exists.
count = sa.select((sa.func.count(fc_cols.sid),)).where(
fc_cols.root_symbol == root_symbol,
).scalar()
if count == 0:
raise RootSymbolNotFound(root_symbol=root_symbol)
return list(map(self._retrieve_futures_contract, sids))
@property
def sids(self):
return tuple(map(
itemgetter('sid'),
sa.select((self.asset_router.c.sid,)).execute().fetchall(),
))
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
try:
result = self.retrieve_asset(int(asset_convertible))
except SidNotFound:
missing.append(asset_convertible)
return None
matches.append(result)
elif isinstance(asset_convertible, string_types):
try:
matches.append(
self.lookup_symbol(asset_convertible, as_of_date)
)
except SymbolNotFound:
missing.append(asset_convertible)
return None
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidNotFound(sid=asset_convertible_or_iterable)
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
----------
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
-------
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
# Handle missing assets
if len(missing) > 0:
warnings.warn("Missing assets for identifiers: %s" % missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _compute_asset_lifetimes(self):
"""
Compute and cache a recarry of asset lifetimes.
"""
equities_cols = self.equities.c
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).execute(),
), dtype='<f8', # use doubles so we get NaNs
)
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', '<f8'),
('start', '<f8'),
('end', '<f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', '<i8'),
('start', '<i8'),
('end', '<i8'),
])
def lifetimes(self, dates, include_start_date):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
# This is a less than ideal place to do this, because if someone adds
# assets to the finder after we've touched lifetimes we won't have
# those new assets available. Mutability is not my favorite
# programming feature.
if self._asset_lifetimes is None:
self._asset_lifetimes = self._compute_asset_lifetimes()
lifetimes = self._asset_lifetimes
raw_dates = dates.asi8[:, None]
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
class AssetFinderCachedEquities(AssetFinder):
"""
An extension to AssetFinder that loads all equities from equities table
into memory and overrides the methods that lookup_symbol uses to look up
those equities.
"""
def __init__(self, engine):
super(AssetFinderCachedEquities, self).__init__(engine)
self.fuzzy_symbol_hashed_equities = {}
self.company_share_class_hashed_equities = {}
self.hashed_equities = sa.select(self.equities.c).execute().fetchall()
self._load_hashed_equities()
def _load_hashed_equities(self):
"""
Populates two maps - fuzzy symbol to list of equities having that
fuzzy symbol and company symbol/share class symbol to list of
equities having that combination of company symbol/share class symbol.
"""
for equity in self.hashed_equities:
company_symbol = equity['company_symbol']
share_class_symbol = equity['share_class_symbol']
fuzzy_symbol = equity['fuzzy_symbol']
asset = self._convert_row_to_equity(equity)
self.company_share_class_hashed_equities.setdefault(
(company_symbol, share_class_symbol),
[]
).append(asset)
self.fuzzy_symbol_hashed_equities.setdefault(
fuzzy_symbol, []
).append(asset)
def _convert_row_to_equity(self, equity):
"""
Converts a SQLAlchemy equity row to an Equity object.
"""
data = dict(equity.items())
_convert_asset_timestamp_fields(data)
asset = Equity(**data)
return asset
def _get_fuzzy_candidates(self, fuzzy_symbol):
if fuzzy_symbol in self.fuzzy_symbol_hashed_equities:
return self.fuzzy_symbol_hashed_equities[fuzzy_symbol]
return []
def _get_fuzzy_candidates_in_range(self, fuzzy_symbol, ad_value):
equities = self._get_fuzzy_candidates(fuzzy_symbol)
fuzzy_candidates = []
for equity in equities:
if (equity.start_date.value <=
ad_value <=
equity.end_date.value):
fuzzy_candidates.append(equity)
return fuzzy_candidates
def _get_split_candidates(self, company_symbol, share_class_symbol):
if (company_symbol, share_class_symbol) in \
self.company_share_class_hashed_equities:
return self.company_share_class_hashed_equities[(
company_symbol, share_class_symbol)]
return []
def _get_split_candidates_in_range(self,
company_symbol,
share_class_symbol,
ad_value):
equities = self._get_split_candidates(
company_symbol, share_class_symbol
)
best_candidates = []
for equity in equities:
if (equity.start_date.value <=
ad_value <=
equity.end_date.value):
best_candidates.append(equity)
if best_candidates:
best_candidates = sorted(
best_candidates,
key=lambda x: (x.start_date, x.end_date),
reverse=True
)
return best_candidates
def _resolve_no_matching_candidates(self,
company_symbol,
share_class_symbol,
ad_value):
equities = self._get_split_candidates(
company_symbol, share_class_symbol
)
partial_candidates = []
for equity in equities:
if equity.start_date.value <= ad_value:
partial_candidates.append(equity)
if partial_candidates:
partial_candidates = sorted(
partial_candidates,
key=lambda x: x.end_date,
reverse=True
)
return partial_candidates
def _get_best_candidate(self, candidates):
return candidates[0]
def _get_equities_from_candidates(self, candidates):
return candidates
| apache-2.0 |
Crobisaur/HyperSpec | Python/tifffile.py | 1 | 205137 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2015, Christoph Gohlke
# Copyright (c) 2008-2015, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read image and meta data from (bio)TIFF files. Save numpy arrays as TIFF.
Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS,
and XMP metadata is not implemented. Only primary info records are read for
STK, FluoView, MicroManager, and NIH Image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL,
and OME-TIFF, are custom extensions defined by Molecular Devices (Universal
Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics
International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy
Environment consortium respectively.
For command line usage run `python tifffile.py --help`
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2015.08.17
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_ (64 bit recommended)
* `Numpy 1.9.2 <http://www.numpy.org>`_
* `Matplotlib 1.4.3 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2015.08.17 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Revisions
---------
2015.08.17
Pass 1906 tests.
Write ImageJ hyperstacks (optional).
Read and write LZMA compressed data.
Specify datetime when saving (optional).
Save tiled and color-mapped images (optional).
Ignore void byte_counts and offsets if possible.
Ignore bogus image_depth tag created by ISS Vista software.
Decode floating point horizontal differencing (not tiled).
Save image data contiguously if possible.
Only read first IFD from ImageJ files if possible.
Read ImageJ 'raw' format (files larger than 4 GB).
TiffPageSeries class for pages with compatible shape and data type.
Try to read incomplete tiles.
Open file dialog if no filename is passed on command line.
Ignore errors when decoding OME-XML.
Rename decoder functions (backwards incompatible)
2014.08.24
TiffWriter class for incremental writing images.
Simplified examples.
2014.08.19
Add memmap function to FileHandle.
Add function to determine if image data in TiffPage is memory-mappable.
Do not close files if multifile_close parameter is False.
2014.08.10
Pass 1730 tests.
Return all extrasamples by default (backwards incompatible).
Read data from series of pages into memory-mapped array (optional).
Squeeze OME dimensions (backwards incompatible).
Workaround missing EOI code in strips.
Support image and tile depth tags (SGI extension).
Better handling of STK/UIC tags (backwards incompatible).
Disable color mapping for STK.
Julian to datetime converter.
TIFF ASCII type may be NULL separated.
Unwrap strip offsets for LSM files greater than 4 GB.
Correct strip byte counts in compressed LSM files.
Skip missing files in OME series.
Read embedded TIFF files.
2014.02.05
Save rational numbers as type 5 (bug fix).
2013.12.20
Keep other files in OME multi-file series closed.
FileHandle class to abstract binary file handle.
Disable color mapping for bad OME-TIFF produced by bio-formats.
Read bad OME-XML produced by ImageJ when cropping.
2013.11.03
Allow zlib compress data in imsave function (optional).
Memory-map contiguous image data (optional).
2013.10.28
Read MicroManager metadata and little endian ImageJ tag.
Save extra tags in imsave function.
Save tags in ascending order by code (bug fix).
2012.10.18
Accept file like objects (read from OIB files).
2012.08.21
Rename TIFFfile to TiffFile and TIFFpage to TiffPage.
TiffSequence class for reading sequence of TIFF files.
Read UltraQuant tags.
Allow float numbers as resolution in imsave function.
2012.08.03
Read MD GEL tags and NIH Image header.
2012.07.25
Read ImageJ tags.
...
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <http://luispedro.org/software/imread>`_
* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_ (same as fabio.TiffIO)
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats
<https://github.com/CellProfiler/python-bioformats>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis for a bug fix and some read_cz_lsm functions.
* Hadrien Mary for help on reading MicroManager files.
* Christian Kliche for help writing tiled and color-mapped files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
(5) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(8) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
(9) Tags for TIFF and Related Specifications. Digital Preservation.
http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
Examples
--------
>>> data = numpy.random.rand(5, 301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> numpy.testing.assert_array_equal(image, data)
>>> with TiffFile('temp.tif') as tif:
... images = tif.asarray()
... for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import tempfile
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as etree
import numpy
try:
import lzma
except ImportError:
try:
import backports.lzma as lzma
except ImportError:
lzma = None
try:
if __package__:
from . import _tifffile
else:
import _tifffile
except ImportError:
warnings.warn(
"failed to import the optional _tifffile C extension module.\n"
"Loading of some compressed images will be very slow.\n"
"Tifffile.c can be obtained at http://www.lfd.uci.edu/~gohlke/")
__version__ = '2015.08.17'
__docformat__ = 'restructuredtext en'
__all__ = (
'imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter', 'TiffSequence',
# utility functions used in oiffile and czifile
'FileHandle', 'lazyattr', 'natural_sorted', 'decode_lzw', 'stripnull')
def imsave(filename, data, **kwargs):
"""Write image data to TIFF file.
Refer to the TiffWriter class and member functions for documentation.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
kwargs : dict
Parameters 'byteorder', 'bigtiff', 'software', and 'imagej', are passed
to the TiffWriter class.
Parameters 'photometric', 'planarconfig', 'resolution', 'compress',
'colormap', 'tile', 'description', 'datetime', 'metadata', 'contiguous'
and 'extratags' are passed to the TiffWriter.save function.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> metadata = {'axes': 'TZCYX'}
>>> imsave('temp.tif', data, compress=6, metadata={'axes': 'TZCYX'})
"""
tifargs = {}
for key in ('byteorder', 'bigtiff', 'software', 'imagej'):
if key in kwargs:
tifargs[key] = kwargs[key]
del kwargs[key]
if 'bigtiff' not in tifargs and 'imagej' not in tifargs and (
data.size*data.dtype.itemsize > 2000*2**20):
tifargs['bigtiff'] = True
with TiffWriter(filename, **tifargs) as tif:
tif.save(data, **kwargs)
class TiffWriter(object):
"""Write image data to TIFF file.
TiffWriter instances must be closed using the 'close' method, which is
automatically called when using the 'with' statement.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6)
"""
TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
TAGS = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324,
'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339,
'image_depth': 32997, 'tile_depth': 32998}
def __init__(self, filename, bigtiff=False, byteorder=None,
software='tifffile.py', imagej=False):
"""Create a new TIFF file for writing.
Use bigtiff=True when creating files greater than 2 GB.
Parameters
----------
filename : str
Name of file to write.
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the file.
Saved with the first page in the file only.
imagej : bool
If True, write an ImageJ hyperstack compatible file.
This format can handle data types uint8, uint16, or float32 and
data shapes up to 6 dimensions in TZCYXS order.
RGB images (S=3 or S=4) must be uint8.
ImageJ's default byte order is big endian but this implementation
uses the system's native byte order by default.
ImageJ doesn't support BigTIFF format or LZMA compression.
The ImageJ file format is undocumented.
"""
if byteorder not in (None, '<', '>'):
raise ValueError("invalid byteorder %s" % byteorder)
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
if imagej and bigtiff:
warnings.warn("writing incompatible bigtiff ImageJ")
self._byteorder = byteorder
self._software = software
self._imagej = bool(imagej)
self._metadata = None
self._colormap = None
self._description_offset = 0
self._description_len_offset = 0
self._description_len = 0
self._tags = None
self._shape = None # normalized shape of data in consecutive pages
self._data_shape = None # shape of data in consecutive pages
self._data_dtype = None # data type
self._data_offset = None # offset to data
self._data_byte_counts = None # byte counts per plane
self._tag_offsets = None # strip or tile offset tag code
self._fh = open(filename, 'wb')
self._fh.write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
self._bigtiff = True
self._offset_size = 8
self._tag_size = 20
self._numtag_format = 'Q'
self._offset_format = 'Q'
self._value_format = '8s'
self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0))
else:
self._bigtiff = False
self._offset_size = 4
self._tag_size = 12
self._numtag_format = 'H'
self._offset_format = 'I'
self._value_format = '4s'
self._fh.write(struct.pack(byteorder+'H', 42))
# first IFD
self._ifd_offset = self._fh.tell()
self._fh.write(struct.pack(byteorder+self._offset_format, 0))
def save(self, data, photometric=None, planarconfig=None, resolution=None,
compress=0, colormap=None, tile=None, datetime=None,
description='', metadata=None, contiguous=True, extratags=()):
"""Write image data and tags to TIFF file.
Image data are written in one stripe per plane by default.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The 'sample_format' and 'bits_per_sample' tags are derived from
the data type.
Parameters
----------
data : numpy.ndarray
Input image. The last dimensions are assumed to be image depth,
height (length), width, and samples.
If a colormap is provided, the dtype must be uint8 or uint16 and
the data values are indices into the last dimension of the
colormap.
photometric : {'minisblack', 'miniswhite', 'rgb', 'palette'}
The color space of the image data.
By default this setting is inferred from the data shape and the
value of colormap.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
compress : int or 'lzma'
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
Compression cannot be used to write contiguous files.
If 'lzma', LZMA compression is used, which is not available on
all platforms.
colormap : numpy.ndarray
RGB color values for the corresponding data value.
Must be of shape (3, 2**(data.itemsize*8)) and dtype uint16.
tile : tuple of int
The shape (depth, length, width) of image tiles to write.
If None (default), image data are written in one stripe per plane.
The tile length and width must be a multiple of 16.
If the tile depth is provided, the SGI image_depth and tile_depth
tags are used to save volume data. Few software can read the
SGI format, e.g. MeVisLab.
datetime : datetime
Date and time of image creation. Saved with the first page only.
If None (default), the current date and time is used.
description : str
The subject of the image. Saved with the first page only.
Cannot be used with the ImageJ format. If None (default),
the data shape and metadata are saved in JSON or ImageJ format.
metadata : dict
Additional meta data passed to the image description functions.
contiguous : bool
If True (default) and the data and parameters are compatible with
previous ones, if any, the data are stored contiguously after
the previous one. Parameters 'photometric' and 'planarconfig' are
ignored.
extratags : sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
# TODO: refactor this function
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
value_format = self._value_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
# just append contiguous data if possible
if self._data_shape:
if (not contiguous or
self._data_shape[1:] != data.shape or
self._data_dtype != data.dtype or
(compress and self._tags) or
tile or
not numpy.array_equal(colormap, self._colormap)):
# incompatible shape, dtype, compression mode, or colormap
self._write_remaining_pages()
self._write_image_description()
self._description_offset = 0
self._description_len_offset = 0
self._data_shape = None
self._colormap = None
if self._imagej:
raise ValueError(
"ImageJ does not support non-contiguous data")
else:
# consecutive mode
self._data_shape = (self._data_shape[0] + 1,) + data.shape
if not compress:
# write contiguous data, write ifds/tags later
data.tofile(fh)
return
if photometric not in (None, 'minisblack', 'miniswhite',
'rgb', 'palette'):
raise ValueError("invalid photometric %s" % photometric)
if planarconfig not in (None, 'contig', 'planar'):
raise ValueError("invalid planarconfig %s" % planarconfig)
# prepare compression
if not compress:
compress = False
compress_tag = 1
elif compress == 'lzma':
compress = lzma.compress
compress_tag = 34925
if self._imagej:
raise ValueError("ImageJ can't handle LZMA compression")
elif not 0 <= compress <= 9:
raise ValueError("invalid compression level %s" % compress)
elif compress:
def compress(data, level=compress):
return zlib.compress(data, level)
compress_tag = 32946
# prepare ImageJ format
if self._imagej:
if description:
warnings.warn("not writing description to ImageJ file")
description = None
volume = False
if data.dtype.char not in 'BHhf':
raise ValueError("ImageJ does not support data type '%s'"
% data.dtype.char)
ijrgb = photometric == 'rgb' if photometric else None
if data.dtype.char not in 'B':
ijrgb = False
ijshape = imagej_shape(data.shape, ijrgb)
if ijshape[-1] in (3, 4):
photometric = 'rgb'
if data.dtype.char not in 'B':
raise ValueError("ImageJ does not support data type '%s' "
"for RGB" % data.dtype.char)
elif photometric is None:
photometric = 'minisblack'
planarconfig = None
if planarconfig == 'planar':
raise ValueError("ImageJ does not support planar images")
else:
planarconfig = 'contig' if ijrgb else None
# verify colormap and indices
if colormap is not None:
if data.dtype.char not in 'BH':
raise ValueError("invalid data dtype for palette mode")
colormap = numpy.asarray(colormap, dtype=byteorder+'H')
if colormap.shape != (3, 2**(data.itemsize * 8)):
raise ValueError("invalid color map shape")
self._colormap = colormap
# verify tile shape
if tile:
tile = tuple(int(i) for i in tile[:3])
volume = len(tile) == 3
if (len(tile) < 2 or tile[-1] % 16 or tile[-2] % 16 or
any(i < 1 for i in tile)):
raise ValueError("invalid tile shape")
else:
tile = ()
volume = False
# normalize data shape to 5D or 6D, depending on volume:
# (pages, planar_samples, [depth,] height, width, contig_samples)
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
samplesperpixel = 1
extrasamples = 0
if volume and data.ndim < 3:
volume = False
if colormap is not None:
photometric = 'palette'
planarconfig = None
if photometric is None:
if planarconfig:
photometric = 'rgb'
elif data.ndim > 2 and shape[-1] in (3, 4):
photometric = 'rgb'
elif self._imagej:
photometric = 'minisblack'
elif volume and data.ndim > 3 and shape[-4] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-3] in (3, 4):
photometric = 'rgb'
else:
photometric = 'minisblack'
if planarconfig and len(shape) <= (3 if volume else 2):
planarconfig = None
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = 'contig'
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = 'planar'
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = 'planar'
else:
planarconfig = 'contig'
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
if False and (
photometric != 'palette' and
len(shape) > (3 if volume else 2) and shape[-1] < 5 and
all(shape[-1] < i
for i in shape[(-4 if volume else -3):-1])):
# DISABLED: non-standard TIFF, e.g. (220, 320, 2)
planarconfig = 'contig'
samplesperpixel = shape[-1]
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
else:
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
# normalize shape to 6D
assert len(data.shape) in (5, 6)
if len(data.shape) == 5:
data = data.reshape(data.shape[:2] + (1,) + data.shape[2:])
shape = data.shape
if tile and not volume:
tile = (1, tile[-2], tile[-1])
if photometric == 'palette':
if (samplesperpixel != 1 or extrasamples or
shape[1] != 1 or shape[-1] != 1):
raise ValueError("invalid data shape for palette mode")
if samplesperpixel == 2:
warnings.warn("writing non-standard TIFF (samplesperpixel 2)")
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
strip_or_tile = 'tile' if tile else 'strip'
tag_byte_counts = TiffWriter.TAGS[strip_or_tile + '_byte_counts']
tag_offsets = TiffWriter.TAGS[strip_or_tile + '_offsets']
self._tag_offsets = tag_offsets
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value
# Append (code, ifdentry, ifdvalue, writeonce) to tags list
code = int(TiffWriter.TAGS.get(code, code))
try:
tifftype = TiffWriter.TYPES[dtype]
except KeyError:
raise ValueError("unknown dtype %s" % dtype)
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
rawcount = value.find(b'\0\0')
if rawcount < 0:
rawcount = count
else:
rawcount += 1 # length of string without buffer
value = (value,)
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if count == 1:
if isinstance(value, (tuple, list, numpy.ndarray)):
value = value[0]
ifdentry.append(pack(value_format, pack(dtype, value)))
elif struct.calcsize(dtype) * count <= offset_size:
ifdentry.append(pack(value_format,
pack(str(count)+dtype, *value)))
else:
ifdentry.append(pack(offset_format, 0))
if isinstance(value, numpy.ndarray):
assert value.size == count
assert value.dtype.char == dtype
ifdvalue = value.tobytes()
else:
ifdvalue = pack(str(count)+dtype, *value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if description:
# user provided description
addtag('image_description', 's', 0, description, writeonce=True)
# always write shape and metadata to image_description
self._metadata = {} if metadata is None else metadata
if self._imagej:
description = imagej_description(
data_shape, shape[-1] in (3, 4), self._colormap is not None,
**self._metadata)
else:
description = image_description(
data_shape, self._colormap is not None, **self._metadata)
if description:
# add 32 bytes buffer
# the image description might be updated later with the final shape
description += b'\0'*32
self._description_len = len(description)
addtag('image_description', 's', 0, description, writeonce=True)
if self._software:
addtag('software', 's', 0, self._software, writeonce=True)
self._software = None # only save to first page in file
if datetime is None:
datetime = self._now()
addtag('datetime', 's', 0, datetime.strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, compress_tag)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
if tile:
addtag('tile_width', 'I', 1, tile[-1])
addtag('tile_length', 'I', 1, tile[-2])
if tile[0] > 1:
addtag('image_depth', 'I', 1, shape[-4])
addtag('tile_depth', 'I', 1, tile[0])
addtag('new_subfile_type', 'I', 1, 0)
addtag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('photometric', 'H', 1, {'miniswhite': 0, 'minisblack': 1,
'rgb': 2, 'palette': 3}[photometric])
if colormap is not None:
addtag('color_map', 'H', colormap.size, colormap)
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig and samplesperpixel > 1:
addtag('planar_configuration', 'H', 1, 1
if planarconfig == 'contig' else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8,) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb' and extrasamples == 1:
addtag('extra_samples', 'H', 1, 1) # associated alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
addtag('resolution_unit', 'H', 1, 2)
if not tile:
addtag('rows_per_strip', 'I', 1, shape[-3]) # * shape[-4]
if tile:
# use one chunk per tile per plane
tiles = ((shape[2] + tile[0] - 1) // tile[0],
(shape[3] + tile[1] - 1) // tile[1],
(shape[4] + tile[2] - 1) // tile[2])
numtiles = product(tiles) * shape[1]
strip_byte_counts = [
product(tile) * shape[-1] * data.dtype.itemsize] * numtiles
addtag(tag_byte_counts, offset_format, numtiles, strip_byte_counts)
addtag(tag_offsets, offset_format, numtiles, [0] * numtiles)
# allocate tile buffer
chunk = numpy.empty(tile + (shape[-1],), dtype=data.dtype)
else:
# use one strip per plane
strip_byte_counts = [
data[0, 0].size * data.dtype.itemsize] * shape[1]
addtag(tag_byte_counts, offset_format, shape[1], strip_byte_counts)
addtag(tag_offsets, offset_format, shape[1], [0] * shape[1])
# add extra tags from user
for t in extratags:
addtag(*t)
# TODO: check TIFFReadDirectoryCheckOrder warning in files containing
# multiple tags of same code
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
if not (self._bigtiff or self._imagej) and (
fh.tell() + data.size*data.dtype.itemsize > 2**31-1):
raise ValueError("data too large for standard TIFF file")
# if not compressed or tiled, write the first ifd and then all data
# contiguously; else, write all ifds and data interleaved
for pageindex in range(shape[0] if (compress or tile) else 1):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifdentries
fh.write(pack(numtag_format, len(tags)))
tag_offset = fh.tell()
fh.write(b''.join(t[1] for t in tags))
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == tag_offsets:
strip_offsets_offset = pos
elif tag[0] == tag_byte_counts:
strip_byte_counts_offset = pos
elif tag[0] == 270 and tag[2].endswith(b'\0\0\0\0'):
# image description buffer
self._description_offset = pos
self._description_len_offset = (
tag_offset + tagindex * tag_size + 4)
fh.write(tag[2])
# write image data
data_offset = fh.tell()
if compress:
strip_byte_counts = []
if tile:
for plane in data[pageindex]:
for tz in range(tiles[0]):
for ty in range(tiles[1]):
for tx in range(tiles[2]):
c0 = min(tile[0], shape[2] - tz*tile[0])
c1 = min(tile[1], shape[3] - ty*tile[1])
c2 = min(tile[2], shape[4] - tx*tile[2])
chunk[c0:, c1:, c2:] = 0
chunk[:c0, :c1, :c2] = plane[
tz*tile[0]:tz*tile[0]+c0,
ty*tile[1]:ty*tile[1]+c1,
tx*tile[2]:tx*tile[2]+c2]
if compress:
t = compress(chunk)
strip_byte_counts.append(len(t))
fh.write(t)
else:
chunk.tofile(fh)
fh.flush()
elif compress:
for plane in data[pageindex]:
plane = compress(plane)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
data.tofile(fh) # if this fails try update Python and numpy
# update strip/tile offsets and byte_counts if necessary
pos = fh.tell()
for tagindex, tag in enumerate(tags):
if tag[0] == tag_offsets: # strip/tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
elif tag[0] == tag_byte_counts: # strip/tile byte_counts
if compress:
if tag[2]:
fh.seek(strip_byte_counts_offset)
for size in strip_byte_counts:
fh.write(pack(offset_format, size))
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, strip_byte_counts[0]))
break
fh.seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [tag for tag in tags if not tag[-1]]
# if uncompressed, write remaining ifds/tags later
if not (compress or tile):
self._tags = tags
self._shape = shape
self._data_shape = (1,) + data_shape
self._data_dtype = data.dtype
self._data_offset = data_offset
self._data_byte_counts = strip_byte_counts
def _write_remaining_pages(self):
"""Write outstanding IFDs and tags to file."""
if not self._tags:
return
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data_offset = self._data_offset
page_data_size = sum(self._data_byte_counts)
tag_bytes = b''.join(t[1] for t in self._tags)
numpages = self._shape[0] * self._data_shape[0] - 1
pos = fh.tell()
if not self._bigtiff and pos + len(tag_bytes) * numpages > 2**32 - 256:
if self._imagej:
warnings.warn("truncating ImageJ file")
return
raise ValueError("data too large for non-bigtiff file")
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
for _ in range(numpages):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifd entries
fh.write(pack(numtag_format, len(self._tags)))
tag_offset = fh.tell()
fh.write(tag_bytes)
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# offset to image data
data_offset += page_data_size
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(self._tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == self._tag_offsets:
strip_offsets_offset = pos
fh.write(tag[2])
# update strip/tile offsets if necessary
pos = fh.tell()
for tagindex, tag in enumerate(self._tags):
if tag[0] == self._tag_offsets: # strip/tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in self._data_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
break
fh.seek(pos)
self._tags = None
self._data_dtype = None
self._data_offset = None
self._data_byte_counts = None
# do not reset _shape or _data_shape
def _write_image_description(self):
"""Write meta data to image_description tag."""
if (not self._data_shape or self._data_shape[0] == 1 or
self._description_offset <= 0):
return
colormapped = self._colormap is not None
if self._imagej:
isrgb = self._shape[-1] in (3, 4)
description = imagej_description(
self._data_shape, isrgb, colormapped, **self._metadata)
else:
description = image_description(
self._data_shape, colormapped, **self._metadata)
# rewrite description and its length to file
description = description[:self._description_len-1]
pos = self._fh.tell()
self._fh.seek(self._description_offset)
self._fh.write(description)
self._fh.seek(self._description_len_offset)
self._fh.write(struct.pack(self._byteorder+self._offset_format,
len(description)+1))
self._fh.seek(pos)
self._description_offset = 0
self._description_len_offset = 0
self._description_len = 0
def _now(self):
"""Return current date and time."""
return datetime.datetime.now()
def close(self, truncate=False):
"""Write remaining pages (if not truncate) and close file handle."""
if not truncate:
self._write_remaining_pages()
self._write_image_description()
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def imread(files, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
kwargs : dict
Additional parameters passed to the TiffFile or TiffSequence asarray
function.
Examples
--------
>>> imsave('temp.tif', numpy.random.rand(3, 4, 301, 219))
>>> im = imread('temp.tif', key=0)
>>> im.shape
(4, 301, 219)
>>> ims = imread(['temp.tif', 'temp.tif'])
>>> ims.shape
(2, 3, 4, 301, 219)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(**kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(**kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func',)
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and metadata from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the 'close' method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list of TiffPage
All TIFF pages in file.
series : list of TiffPageSeries
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> with TiffFile('temp.tif') as tif:
... data = tif.asarray()
... data.shape
(5, 301, 219)
"""
def __init__(self, arg, name=None, offset=None, size=None,
multifile=True, multifile_close=True, maxpages=None,
fastij=True):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
maxpages : int
Number of pages to read (default: no limit).
fastij : bool
If True (default), try to use only the metadata from the first page
of ImageJ files. Significantly speeds up loading movies with
thousands of pages.
"""
self._fh = FileHandle(arg, name=name, offset=offset, size=size)
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
self._multifile_close = bool(multifile_close)
self._files = {self._fh.name: self} # cache of TiffFiles
try:
self._fromfile(maxpages, fastij)
except Exception:
self._fh.close()
raise
@property
def filehandle(self):
"""Return file handle."""
return self._fh
@property
def filename(self):
"""Return name of file handle."""
return self._fh.name
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif._fh.close()
self._files = {}
def _fromfile(self, maxpages=None, fastij=True):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
self._is_native = self.byteorder == {'big': '>',
'little': '<'}[sys.byteorder]
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43:
# BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if maxpages and len(self.pages) > maxpages:
break
if fastij and page.is_imagej:
if page._patch_imagej():
break # only read the first page of ImageJ files
fastij = False
if not self.pages:
raise ValueError("empty TIFF file")
# TODO? sort pages by page_number value
if self.is_micromanager:
# MicroManager files contain metadata not stored in TIFF tags.
self.micromanager_metadata = read_micromanager_metadata(self._fh)
if self.is_lsm:
self._fix_lsm_strip_offsets()
self._fix_lsm_strip_byte_counts()
def _fix_lsm_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB."""
for series in self.series:
wrap = 0
previous_offset = 0
for page in series.pages:
strip_offsets = []
for current_offset in page.strip_offsets:
if current_offset < previous_offset:
wrap += 2**32
strip_offsets.append(current_offset + wrap)
previous_offset = current_offset
page.strip_offsets = tuple(strip_offsets)
def _fix_lsm_strip_byte_counts(self):
"""Set strip_byte_counts to size of compressed data.
The strip_byte_counts tag in LSM files contains the number of bytes
for the uncompressed data.
"""
if not self.pages:
return
strips = {}
for page in self.pages:
assert len(page.strip_offsets) == len(page.strip_byte_counts)
for offset, bytecount in zip(page.strip_offsets,
page.strip_byte_counts):
strips[offset] = bytecount
offsets = sorted(strips.keys())
offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size))
for i, offset in enumerate(offsets[:-1]):
strips[offset] = min(strips[offset], offsets[i+1] - offset)
for page in self.pages:
if page.compression:
page.strip_byte_counts = tuple(
strips[offset] for offset in page.strip_offsets)
def asarray(self, key=None, series=None, memmap=False):
"""Return image data from multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int or TiffPageSeries
Defines which series of pages to return as array.
memmap : bool
If True, return an array stored in a binary file on disk
if possible.
"""
if key is None and series is None:
series = 0
if series is not None:
try:
series = self.series[series]
except (KeyError, TypeError):
pass
pages = series.pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if not len(pages):
raise ValueError("no pages selected")
if self.is_nih:
if pages[0].is_palette:
result = stack_pages(pages, colormapped=False, squeeze=False)
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
result = stack_pages(pages, memmap=memmap,
colormapped=False, squeeze=False)
elif len(pages) == 1:
result = pages[0].asarray(memmap=memmap)
elif self.is_ome:
assert not self.is_palette, "color mapping disabled for ome-tiff"
if any(p is None for p in pages):
# zero out missing pages
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray(memmap=False))
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, series.dtype, shape=series.shape)
result = result.reshape(-1)
else:
result = numpy.empty(series.shape, series.dtype).reshape(-1)
index = 0
class KeepOpen:
# keep Tiff files open between consecutive pages
def __init__(self, parent, close):
self.master = parent
self.parent = parent
self._close = close
def open(self, page):
if self._close and page and page.parent != self.parent:
if self.parent != self.master:
self.parent.filehandle.close()
self.parent = page.parent
self.parent.filehandle.open()
def close(self):
if self._close and self.parent != self.master:
self.parent.filehandle.close()
keep = KeepOpen(self, self._multifile_close)
for page in pages:
keep.open(page)
if page:
a = page.asarray(memmap=False, colormapped=False,
reopen=False)
else:
a = nopage
try:
result[index:index + a.size] = a.reshape(-1)
except ValueError as e:
warnings.warn("ome-tiff: %s" % e)
break
index += a.size
keep.close()
else:
result = stack_pages(pages, memmap=memmap)
if key is None:
try:
result.shape = series.shape
except ValueError:
try:
warnings.warn("failed to reshape %s to %s" % (
result.shape, series.shape))
# try series of expected shapes
result.shape = (-1,) + series.shape
except ValueError:
# revert to generic shape
result.shape = (-1,) + pages[0].shape
elif len(pages) == 1:
result.shape = pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
if not self.pages:
return []
series = []
if self.is_ome:
series = self._ome_series()
elif self.is_fluoview:
series = self._fluoview_series()
elif self.is_lsm:
series = self._lsm_series()
elif self.is_imagej:
series = self._imagej_series()
elif self.is_nih:
series = self._nih_series()
if not series:
# generic detection of series
shapes = []
pages = {}
index = 0
for page in self.pages:
if not page.shape:
continue
if page.is_shaped:
index += 1 # shape starts a new series
shape = page.shape + (index, page.axes,
page.compression in TIFF_DECOMPESSORS)
if shape in pages:
pages[shape].append(page)
else:
shapes.append(shape)
pages[shape] = [page]
series = []
for s in shapes:
shape = ((len(pages[s]),) + s[:-3] if len(pages[s]) > 1
else s[:-3])
axes = (('I' + s[-2]) if len(pages[s]) > 1 else s[-2])
page0 = pages[s][0]
if page0.is_shaped:
description = page0.is_shaped
metadata = image_description_dict(description)
if product(metadata.get('shape', shape)) == product(shape):
shape = metadata.get('shape', shape)
else:
warnings.warn(
"metadata shape doesn't match data shape")
if 'axes' in metadata:
axes = metadata['axes']
if len(axes) != len(shape):
warnings.warn("axes don't match shape")
axes = 'Q'*(len(shape)-len(axes)) + axes[-len(shape):]
series.append(
TiffPageSeries(pages[s], shape, page0.dtype, axes))
# remove empty series, e.g. in MD Gel files
series = [s for s in series if sum(s.shape) > 0]
return series
def _fluoview_series(self):
"""Return image series in FluoView file."""
page0 = self.pages[0]
dims = {
b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(page0.mm_header.dimensions))
axes = ''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1)
shape = tuple(int(i[1]) for i in mmhd if i[1] > 1)
return [TiffPageSeries(self.pages, shape, page0.dtype, axes)]
def _lsm_series(self):
"""Return image series in LSM file."""
page0 = self.pages[0]
lsmi = page0.cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if page0.is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes)
pages = [p for p in self.pages if not p.is_reduced]
dtype = pages[0].dtype
series = [TiffPageSeries(pages, shape, dtype, axes)]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
dtype = pages[0].dtype
series.append(TiffPageSeries(pages, shape, dtype, axes))
return series
def _imagej_series(self):
"""Return image series in ImageJ file."""
# ImageJ's dimension order is always TZCYXS
# TODO: fix loading of color, composite or palette images
shape = []
axes = []
page0 = self.pages[0]
ij = page0.imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not (self.is_rgb and not
ij.get('hyperstack', False)):
shape.append(ij['channels'])
axes.append('C')
remain = ij.get('images', len(self.pages)) // (product(shape)
if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
if page0.axes[0] == 'I':
# contiguous multiple images
shape.extend(page0.shape[1:])
axes.extend(page0.axes[1:])
elif page0.axes[:2] == 'SI':
# color-mapped contiguous multiple images
shape = page0.shape[0:1] + tuple(shape) + page0.shape[2:]
axes = list(page0.axes[0]) + axes + list(page0.axes[2:])
else:
shape.extend(page0.shape)
axes.extend(page0.axes)
return [TiffPageSeries(self.pages, shape, page0.dtype, axes)]
def _nih_series(self):
"""Return image series in NIH file."""
page0 = self.pages[0]
if len(self.pages) == 1:
shape = page0.shape
axes = page0.axes
else:
shape = (len(self.pages),) + page0.shape
axes = 'I' + page0.axes
return [TiffPageSeries(self.pages, shape, page0.dtype, axes)]
def _ome_series(self):
"""Return image series in OME-TIFF file(s)."""
omexml = self.pages[0].tags['image_description'].value
omexml = omexml.decode('UTF-8', 'ignore')
root = etree.fromstring(omexml)
uuid = root.attrib.get('UUID', None)
self._files = {uuid: self}
dirname = self._fh.dirname
modulo = {}
series = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("ome-xml: not an ome-tiff master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
dtype = atr.get('Type', None)
axes = ''.join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = product(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
try:
idx = numpy.ravel_multi_index(idx, shape[:-2])
except ValueError:
# ImageJ produces invalid ome-xml when cropping
warnings.warn("ome-xml: invalid TiffData index")
continue
for uuid in data:
if not uuid.tag.endswith('UUID'):
continue
if uuid.text not in self._files:
if not self._multifile:
# abort reading multifile OME series
# and fall back to generic series
return []
fname = uuid.attrib['FileName']
try:
tif = TiffFile(os.path.join(dirname, fname))
except (IOError, ValueError):
tif.close()
warnings.warn(
"ome-xml: failed to read '%s'" % fname)
break
self._files[uuid.text] = tif
if self._multifile_close:
tif.close()
pages = self._files[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
# only process first uuid
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
if all(i is None for i in ifds):
# skip images without data
continue
dtype = next(i for i in ifds if i).dtype
series.append(TiffPageSeries(ifds, shape, dtype, axes, self))
for serie in series:
shape = list(serie.shape)
for axis, (newaxis, labels) in modulo.items():
i = serie.axes.index(axis)
size = len(labels)
if shape[i] == size:
serie.axes = serie.axes.replace(axis, newaxis, 1)
else:
shape[i] //= size
shape.insert(i+1, size)
serie.axes = serie.axes.replace(axis, axis+newaxis, 1)
serie.shape = tuple(shape)
# squeeze dimensions
for serie in series:
serie.shape, serie.axes = squeeze_axes(serie.shape, serie.axes)
return series
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [
self._fh.name.capitalize(),
format_size(self._fh.size),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._files) > 1:
result.append("%i files" % (len(self._files)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
"""File has BigTIFF format."""
return self.offset_size != 4
@lazyattr
def is_rgb(self):
"""File contains only RGB images."""
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
"""File contains only color-mapped images."""
return all(p.is_palette for p in self.pages)
@lazyattr
def is_mdgel(self):
"""File has MD Gel format."""
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
"""File was created by Media Cybernetics software."""
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
"""File has MetaMorph STK format."""
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
"""File was created by Carl Zeiss software."""
return len(self.pages) and self.pages[0].is_lsm
@lazyattr
def is_vista(self):
"""File was created by ISS Vista."""
return len(self.pages) and self.pages[0].is_vista
@lazyattr
def is_imagej(self):
"""File has ImageJ format."""
return len(self.pages) and self.pages[0].is_imagej
@lazyattr
def is_micromanager(self):
"""File was created by MicroManager."""
return len(self.pages) and self.pages[0].is_micromanager
@lazyattr
def is_nih(self):
"""File has NIH Image format."""
return len(self.pages) and self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
"""File was created by Olympus FluoView."""
return len(self.pages) and self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
"""File has OME-TIFF format."""
return len(self.pages) and self.pages[0].is_ome
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, color-mapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
color-mapped and with extra samples if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy.ndarray
Color look up table, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and metadata tags, if exists.
uic_tags: Record(dict)
Consolidated MetaMorph STK/UIC tags, if exists.
All attributes are read-only.
Notes
-----
The internal, normalized '_shape' attribute is 6 dimensional:
0. number planes/images (stk, ij).
1. planar samples_per_pixel.
2. image_depth Z (sgi).
3. image_length Y.
4. image_width X.
5. contig samples_per_pixel.
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._offset = 0
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0
or a corrupted page list is encountered.
"""
fh = self.parent.filehandle
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
# read offset to this IFD
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
raise StopIteration()
if offset >= fh.size:
warnings.warn("invalid page offset > file size")
raise StopIteration()
self._offset = offset
# read standard tags
tags = self.tags
fh.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
if numtags > 4096:
raise ValueError("suspicious number of tags")
except Exception:
warnings.warn("corrupted page list at offset %i" % offset)
raise StopIteration()
tagcode = 0
for _ in range(numtags):
try:
tag = TiffTag(self.parent)
except TiffTag.Error as e:
warnings.warn(str(e))
continue
if tagcode > tag.code:
# expected for early LSM and tifffile versions
warnings.warn("tags are not ordered by code")
tagcode = tag.code
if tag.name not in tags:
tags[tag.name] = tag
else:
# some files contain multiple IFD with same code
# e.g. MicroManager files contain two image_description
i = 1
while True:
name = "%s_%i" % (tag.name, i)
if name not in tags:
tags[name] = tag
break
pos = fh.tell() # where offset to next IFD can be found
if self.is_lsm or (self.index and self.parent.is_lsm):
# correct non standard LSM bitspersample tags
self.tags['bits_per_sample']._fix_lsm_bitspersample(self)
if self.is_lsm:
# read LSM info subrecords
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_'+name]
except KeyError:
continue
if offset < 8:
# older LSM revision
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_'+name, reader(fh))
except ValueError:
pass
elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value:
# read uic1tag now that plane count is known
uic1tag = tags['uic1tag']
fh.seek(uic1tag.value_offset)
tags['uic1tag'].value = Record(
read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count,
tags['uic2tag'].count))
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TiffTag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
# LSM might list more items than samples_per_pixel
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if 'photometric' not in tags:
self.photometric = None
if 'image_depth' not in tags:
self.image_depth = 1
if 'image_length' in tags:
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if 'image_length' not in self.tags or 'image_width' not in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self.image_depth = 0
self.strip_offsets = 0
self._shape = ()
self.shape = ()
self.axes = ''
if self.is_vista or self.parent.is_vista:
# ISS Vista writes wrong image_depth tag
self.image_depth = 1
if self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
# determine shape of data
image_length = self.image_length
image_width = self.image_width
image_depth = self.image_depth
samples_per_pixel = self.samples_per_pixel
if self.is_stk:
assert self.image_depth == 1
planes = self.tags['uic2tag'].count
if self.is_contig:
self._shape = (planes, 1, 1, image_length, image_width,
samples_per_pixel)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, image_length, image_width,
samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (planes, samples_per_pixel, 1, image_length,
image_width, 1)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, samples_per_pixel, image_length,
image_width)
self.axes = 'SYX'
# detect type of series
if planes == 1:
self.shape = self.shape[1:]
elif numpy.all(self.uic2tag.z_distance != 0):
self.axes = 'Z' + self.axes
elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0):
self.axes = 'T' + self.axes
else:
self.axes = 'I' + self.axes
# DISABLED
if self.is_palette:
assert False, "color mapping disabled for stk"
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, planes, image_length, image_width)
else:
self.shape = (3, planes, image_depth, image_length,
image_width)
self.axes = 'S' + self.axes
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
elif self.is_palette:
samples = 1
if 'extra_samples' in self.tags:
samples += len(self.extra_samples)
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples)
else:
self._shape = (1, samples, image_depth, image_length,
image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, image_length, image_width)
self.axes = 'SYX'
else:
self.shape = (3, image_depth, image_length, image_width)
self.axes = 'SZYX'
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
elif self.is_rgb or samples_per_pixel > 1:
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples_per_pixel)
if image_depth == 1:
self.shape = (image_length, image_width, samples_per_pixel)
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
samples_per_pixel)
self.axes = 'ZYXS'
else:
self._shape = (1, samples_per_pixel, image_depth,
image_length, image_width, 1)
if image_depth == 1:
self.shape = (samples_per_pixel, image_length, image_width)
self.axes = 'SYX'
else:
self.shape = (samples_per_pixel, image_depth,
image_length, image_width)
self.axes = 'SZYX'
if False and self.is_rgb and 'extra_samples' in self.tags:
# DISABLED: only use RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples,)
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
self.shape = self.shape[:-1] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, image_depth, image_length, image_width, 1)
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
if not self.compression and 'strip_byte_counts' not in tags:
self.strip_byte_counts = (
product(self.shape) * (self.bits_per_sample // 8),)
assert len(self.shape) == len(self.axes)
def _patch_imagej(self):
"""Return if ImageJ data are contiguous and adjust page attributes.
Patch 'strip_offsets' and 'strip_byte_counts' tags to span the
complete contiguous data.
ImageJ stores all image metadata in the first page and image data is
stored contiguously before the second page, if any. No need to
read other pages.
"""
if not self.is_imagej or not self.is_contiguous:
return
images = self.imagej_tags.get('images', 0)
if images <= 1:
return
pre = 'tile' if self.is_tiled else 'strip'
self.tags[pre+'_offsets'].value = (self.is_contiguous[0],)
self.tags[pre+'_byte_counts'].value = (self.is_contiguous[1] * images,)
self.shape = (images,) + self.shape
self._shape = (images,) + self._shape[1:]
self.axes = 'I' + self.axes
if self.is_palette:
# swap first two dimensions
self.axes = self.axes[1::-1] + self.axes[2:]
self.shape = self.shape[1::-1] + self.shape[2:]
return True
def asarray(self, squeeze=True, colormapped=True, rgbonly=False,
scale_mdgel=False, memmap=False, reopen=True,
maxsize=64*1024*1024*1024):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default,
the shape of the returned array might be different from the page shape.
Parameters
----------
squeeze : bool
If True, all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True, color mapping is applied for palette-indexed images.
rgbonly : bool
If True, return RGB(A) image without additional extra samples.
memmap : bool
If True, use numpy.memmap to read arrays from file if possible.
For use on 64 bit systems and files with few huge contiguous data.
reopen : bool
If True and the parent file handle is closed, the file is
temporarily re-opened (and closed if no exception occurs).
scale_mdgel : bool
If True, MD Gel data will be scaled according to the private
metadata in the second TIFF page. The dtype will be float32.
maxsize: int or None
Maximum size of data before a ValueError is raised.
Can be used to catch DOS. Default: 64 GB.
"""
if not self._shape:
return
if maxsize and product(self._shape) > maxsize:
raise ValueError("data is too large %s" % str(self._shape))
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
fh = self.parent.filehandle
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError("file handle is closed")
dtype = self._dtype
shape = self._shape
image_width = self.image_width
image_length = self.image_length
image_depth = self.image_depth
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
byte_counts, offsets = self._byte_counts_offsets
if self.is_tiled:
tile_width = self.tile_width
tile_length = self.tile_length
tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
td = (image_depth + tile_depth - 1) // tile_depth
shape = (shape[0], shape[1],
td*tile_depth, tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_depth, tile_length, tile_width, shape[-1])
runlen = tile_width
else:
runlen = image_width
if memmap and self._is_memmappable(rgbonly, colormapped):
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape))
result = result.astype('=' + dtype)
else:
if self.is_contig:
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
def unpack(x, typecode=typecode):
if self.predictor == 'float':
# the floating point horizontal differencing decoder
# needs the raw byte order
typecode = dtype
try:
return numpy.fromstring(x, typecode)
except ValueError as e:
# strips may be missing EOI
warnings.warn("unpack: %s" % e)
xlen = ((len(x) // (bits_per_sample // 8)) *
(bits_per_sample // 8))
return numpy.fromstring(x[:xlen], typecode)
elif isinstance(bits_per_sample, tuple):
def unpack(x):
return unpack_rgb(x, typecode, bits_per_sample)
else:
def unpack(x):
return unpack_ints(x, typecode, bits_per_sample, runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.compression == 'jpeg':
table = self.jpeg_tables if 'jpeg_tables' in self.tags else b''
def decompress(x):
return decode_jpeg(x, table, self.photometric)
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, td, pl = 0, 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = unpack(decompress(fh.read(bytecount)))
try:
tile.shape = tile_shape
except ValueError:
# incomplete tiles; see gdal issue #1179
warnings.warn("invalid tile data")
t = numpy.zeros(tile_shape, dtype).reshape(-1)
s = min(tile.size, t.size)
t[:s] = tile[:s]
tile = t.reshape(tile_shape)
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
elif self.predictor == 'float':
raise NotImplementedError()
result[0, pl, td:td+tile_depth,
tl:tl+tile_length, tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[4]:
tw, tl = 0, tl + tile_length
if tl >= shape[3]:
tl, td = 0, td + tile_depth
if td >= shape[2]:
td, pl = 0, pl + 1
result = result[...,
:image_depth, :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = fh.read(bytecount)
strip = decompress(strip)
strip = unpack(strip)
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor and not (self.is_tiled and not self.is_contiguous):
if self.parent.is_lsm and not self.compression:
pass # work around bug in LSM510 software
elif self.predictor == 'horizontal':
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
elif self.predictor == 'float':
result = decode_floats(result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map,
result[:, 0:1, :, :, :, 0:1], axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples,)
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.is_contig:
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
if scale_mdgel and self.parent.is_mdgel:
# MD Gel stores private metadata in the second page
tags = self.parent.pages[1]
if tags.md_file_tag in (2, 128):
scale = tags.md_scale_pixel
scale = scale[0] / scale[1] # rational
result = result.astype('float32')
if tags.md_file_tag == 2:
result **= 2 # squary root data format
result *= scale
if closed:
# TODO: file should remain open if an exception occurred above
fh.close()
return result
@lazyattr
def _byte_counts_offsets(self):
"""Return simplified byte_counts and offsets."""
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
j = 0
for i, (b, o) in enumerate(zip(byte_counts, offsets)):
if b > 0 and o > 0:
if i > j:
byte_counts[j] = b
offsets[j] = o
j += 1
elif b > 0 and o <= 0:
raise ValueError("invalid offset")
else:
warnings.warn("empty byte count")
if j == 0:
j = 1
return byte_counts[:j], offsets[:j]
def _is_memmappable(self, rgbonly, colormapped):
"""Return if page's image data in file can be memory-mapped."""
return (self.parent.filehandle.is_file and
self.is_contiguous and
(self.bits_per_sample == 8 or
self.parent._is_native) and
not self.predictor and
not (rgbonly and 'extra_samples' in self.tags) and
not (colormapped and self.is_palette))
@lazyattr
def is_contiguous(self):
"""Return offset and size of contiguous data, else None.
Excludes prediction and colormapping.
"""
if self.compression or self.bits_per_sample not in (8, 16, 32, 64):
return
if self.is_tiled:
if (self.image_width != self.tile_width or
self.image_length % self.tile_length or
self.tile_width % 16 or self.tile_length % 16):
return
if ('image_depth' in self.tags and 'tile_depth' in self.tags and
(self.image_length != self.tile_length or
self.image_depth % self.tile_depth)):
return
offsets = self.tile_offsets
byte_counts = self.tile_byte_counts
else:
offsets = self.strip_offsets
byte_counts = self.strip_byte_counts
if len(offsets) == 1:
return offsets[0], byte_counts[0]
if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1] or
byte_counts[i+1] == 0 # no data/ignore offset
for i in range(len(offsets)-1)):
return offsets[0], sum(byte_counts)
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
'|'.join(t[3:] for t in (
'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej',
'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy',
'is_sgi', 'is_reduced', 'is_tiled',
'is_contiguous') if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def uic_tags(self):
"""Consolidate UIC tags."""
if not self.is_stk:
raise AttributeError("uic_tags")
tags = self.tags
result = Record()
result.number_planes = tags['uic2tag'].count
if 'image_description' in tags:
result.plane_descriptions = self.image_description.split(b'\x00')
if 'uic1tag' in tags:
result.update(tags['uic1tag'].value)
if 'uic3tag' in tags:
result.update(tags['uic3tag'].value) # wavelengths
if 'uic4tag' in tags:
result.update(tags['uic4tag'].value) # override uic1 tags
uic2tag = tags['uic2tag'].value
result.z_distance = uic2tag.z_distance
result.time_created = uic2tag.time_created
result.time_modified = uic2tag.time_modified
try:
result.datetime_created = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_created, uic2tag.time_created)]
result.datetime_modified = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_modified, uic2tag.time_modified)]
except ValueError as e:
warnings.warn("uic_tags: %s" % e)
return result
@lazyattr
def imagej_tags(self):
"""Consolidate ImageJ metadata."""
if not self.is_imagej:
raise AttributeError("imagej_tags")
result = imagej_description_dict(self.is_imagej)
if 'imagej_metadata' in self.tags:
try:
result.update(imagej_metadata(
self.tags['imagej_metadata'].value,
self.tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception as e:
warnings.warn(str(e))
return Record(result)
@lazyattr
def is_rgb(self):
"""Page contains a RGB image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 2)
@lazyattr
def is_contig(self):
"""Page contains contiguous image."""
return ('planar_configuration' in self.tags and
self.tags['planar_configuration'].value == 1)
@lazyattr
def is_palette(self):
"""Page contains palette-colored image and is not OME or STK."""
# turn off color mapping for OME-TIFF and STK
if self.is_stk or self.is_ome or self.parent.is_ome:
return False
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""Page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""Page is reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_mdgel(self):
"""Page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""Page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""Page contains UIC2Tag tag."""
return 'uic2tag' in self.tags
@lazyattr
def is_lsm(self):
"""Page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""Page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""Page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_sgi(self):
"""Page contains SGI image and tile depth tags."""
return 'image_depth' in self.tags and 'tile_depth' in self.tags
@lazyattr
def is_vista(self):
"""Software tag is 'ISS Vista'."""
return ('software' in self.tags and
self.tags['software'].value == b'ISS Vista')
@lazyattr
def is_ome(self):
"""Page contains OME-XML in image_description tag."""
if 'image_description' not in self.tags:
return False
d = self.tags['image_description'].value.strip()
return d.startswith(b'<?xml version=') and d.endswith(b'</OME>')
@lazyattr
def is_shaped(self):
"""Return description containing shape if exists, else None."""
if 'image_description' in self.tags:
description = self.tags['image_description'].value
if b'"shape":' in description or b'shape=(' in description:
return description
if 'image_description_1' in self.tags:
description = self.tags['image_description_1'].value
if b'"shape":' in description or b'shape=(' in description:
return description
@lazyattr
def is_imagej(self):
"""Return ImageJ description if exists, else None."""
if 'image_description' in self.tags:
description = self.tags['image_description'].value
if description.startswith(b'ImageJ='):
return description
if 'image_description_1' in self.tags:
# Micromanager
description = self.tags['image_description_1'].value
if description.startswith(b'ImageJ='):
return description
@lazyattr
def is_micromanager(self):
"""Page contains Micro-Manager metadata."""
return 'micromanager_metadata' in self.tags
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data as Python object.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset', '_value', '_type')
class Error(Exception):
pass
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
self._value = value
self._type = dtype
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent.filehandle
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
self._value = value
self._type = dtype
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[self._type]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % self._type)
fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(byteorder+tof, value)[0]
if offset < 0 or offset > parent.filehandle.size:
raise TiffTag.Error("corrupt file - invalid tag value offset")
elif offset < 4:
raise TiffTag.Error("corrupt value offset for tag %i" % code)
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if code not in CUSTOM_TAGS and code not in (273, 279, 324, 325):
# scalar value if not strip/tile offsets/byte_counts
if len(value) == 1:
value = value[0]
if (dtype.endswith('s') and isinstance(value, bytes) and
self._type != 7):
# TIFF ASCII fields can contain multiple strings,
# each terminated with a NUL
value = stripascii(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def _fix_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code == 258 and self.count == 2:
# TODO: test this case; need example file
warnings.warn("correcting LSM bitspersample tag")
fh = parent.filehandle
tof = {4: '<I', 8: '<Q'}[parent.offset_size]
self.value_offset = struct.unpack(tof, self._value)[0]
fh.seek(self.value_offset)
self.value = struct.unpack("<HH", fh.read(4))
def as_str(self):
"""Return value as human readable string."""
return ((str(self.value).split('\n', 1)[0]) if (self._type != 7)
else '<undefined>')
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffPageSeries(object):
"""Series of TIFF pages with compatible shape and data type.
Attributes
----------
pages : list of TiffPage
Sequence of TiffPages in series.
dtype : numpy.dtype or str
Data type of the image array in series.
shape : tuple
Dimensions of the image array in series.
axes : str
Labels of axes in shape. See TiffPage.axes.
"""
__slots__ = 'pages', 'shape', 'dtype', 'axes', 'parent'
def __init__(self, pages, shape, dtype, axes, parent=None):
self.pages = pages
self.shape = tuple(shape)
self.axes = ''.join(axes)
self.dtype = numpy.dtype(dtype)
if parent:
self.parent = parent
elif len(pages):
self.parent = pages[0].parent
else:
self.parent = None
def asarray(self, memmap=False):
"""Return image data from series of TIFF pages as numpy array.
Parameters
----------
memmap : bool
If True, return an array stored in a binary file on disk
if possible.
"""
if self.parent:
return self.parent.asarray(series=self, memmap=memmap)
def __len__(self):
"""Return number of TiffPages in series."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified TiffPage."""
return self.pages[key]
def __iter__(self):
"""Return iterator over TiffPages in series."""
return iter(self.pages)
def __str__(self):
"""Return string with information about series."""
return "* pages: %i\n* dtype: %s\n* shape: %s\n* axes: %s" % (
len(self.pages), str(self.dtype), str(self.shape), self.axes)
class TiffSequence(object):
"""Sequence of image files.
The data shape and dtype of all files must match.
Attributes
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> tifs = TiffSequence("test.oif.files/*.tif")
>>> tifs.shape, tifs.axes
((2, 100), 'CT')
>>> data = tifs.asarray()
>>> data.shape
(2, 100, 256, 256)
"""
_patterns = {
'axes': r"""
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""}
class ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes',
*args, **kwargs):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
By default this matches Olympus OIF and Leica TIFF series.
"""
if isinstance(files, basestring):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
#if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
# redefine imread
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._patterns.get(pattern, pattern)
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self.ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = tuple((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
pass
def asarray(self, memmap=False, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
If memmap is True, return an array stored in a binary file on disk.
The args and kwargs parameters are passed to the imread function.
Raise IndexError or ValueError if image shapes don't match.
"""
im = self.imread(self.files[0], *args, **kwargs)
shape = self.shape + im.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=im.dtype, shape=shape)
else:
result = numpy.zeros(shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i-j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self.ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self.ParseError("pattern doesn't match file names")
matches = matches[-1]
if len(matches) % 2:
raise self.ParseError("pattern doesn't match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self.ParseError("pattern doesn't match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes don't match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, start_index))
if product(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
try:
if k.startswith('_'): # does not work with byte
continue
except AttributeError:
pass
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TiffTag with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (
tag.code, tag.name, typecode, tag.as_str())
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
class FileHandle(object):
"""Binary file handle.
* Handle embedded files (for CZI within CZI files).
* Allow to re-open closed files (for multi file formats such as OME-TIFF).
* Read numpy arrays and records from file like objects.
Only binary read, seek, tell, and close are supported on embedded files.
When initialized from another file handle, do not use it unless this
FileHandle is closed.
Attributes
----------
name : str
Name of the file.
path : str
Absolute path to file.
size : int
Size of file in bytes.
is_file : bool
If True, file has a filno and can be memory-mapped.
All attributes are read-only.
"""
__slots__ = ('_fh', '_arg', '_mode', '_name', '_dir',
'_offset', '_size', '_close', 'is_file')
def __init__(self, arg, mode='rb', name=None, offset=None, size=None):
"""Initialize file handle from file name or another file handle.
Parameters
----------
arg : str, File, or FileHandle
File name or open file handle.
mode : str
File open mode in case 'arg' is a file name.
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
"""
self._fh = None
self._arg = arg
self._mode = mode
self._name = name
self._dir = ''
self._offset = offset
self._size = size
self._close = True
self.is_file = False
self.open()
def open(self):
"""Open or re-open file."""
if self._fh:
return # file is open
if isinstance(self._arg, basestring):
# file name
self._arg = os.path.abspath(self._arg)
self._dir, self._name = os.path.split(self._arg)
self._fh = open(self._arg, self._mode)
self._close = True
if self._offset is None:
self._offset = 0
elif isinstance(self._arg, FileHandle):
# FileHandle
self._fh = self._arg._fh
if self._offset is None:
self._offset = 0
self._offset += self._arg._offset
self._close = False
if not self._name:
if self._offset:
name, ext = os.path.splitext(self._arg._name)
self._name = "%s@%i%s" % (name, self._offset, ext)
else:
self._name = self._arg._name
self._dir = self._arg._dir
else:
# open file object
self._fh = self._arg
if self._offset is None:
self._offset = self._arg.tell()
self._close = False
if not self._name:
try:
self._dir, self._name = os.path.split(self._fh.name)
except AttributeError:
self._name = "Unnamed stream"
if self._offset:
self._fh.seek(self._offset)
if self._size is None:
pos = self._fh.tell()
self._fh.seek(self._offset, 2)
self._size = self._fh.tell()
self._fh.seek(pos)
try:
self._fh.fileno()
self.is_file = True
except Exception:
self.is_file = False
def read(self, size=-1):
"""Read 'size' bytes from file, or until EOF is reached."""
if size < 0 and self._offset:
size = self._size
return self._fh.read(size)
def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'):
"""Return numpy.memmap of data stored in file."""
if not self.is_file:
raise ValueError("Can not memory-map file without fileno.")
return numpy.memmap(self._fh, dtype=dtype, mode=mode,
offset=self._offset + offset,
shape=shape, order=order)
def read_array(self, dtype, count=-1, sep=""):
"""Return numpy array from file.
Work around numpy issue #2230, "numpy.fromfile does not accept
StringIO object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(self._fh, dtype, count, sep)
except IOError:
if count < 0:
size = self._size
else:
size = count * numpy.dtype(dtype).itemsize
data = self._fh.read(size)
return numpy.fromstring(data, dtype, count, sep)
def read_record(self, dtype, shape=1, byteorder=None):
"""Return numpy record from file."""
try:
rec = numpy.rec.fromfile(self._fh, dtype, shape,
byteorder=byteorder)
except Exception:
dtype = numpy.dtype(dtype)
if shape is None:
shape = self._size // dtype.itemsize
size = product(sequence(shape)) * dtype.itemsize
data = self._fh.read(size)
return numpy.rec.fromstring(data, dtype, shape,
byteorder=byteorder)
return rec[0] if shape == 1 else rec
def tell(self):
"""Return file's current position."""
return self._fh.tell() - self._offset
def seek(self, offset, whence=0):
"""Set file's current position."""
if self._offset:
if whence == 0:
self._fh.seek(self._offset + offset, whence)
return
elif whence == 2:
self._fh.seek(self._offset + self._size + offset, 0)
return
self._fh.seek(offset, whence)
def close(self):
"""Close file."""
if self._close and self._fh:
self._fh.close()
self._fh = None
self.is_file = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
"""Return attribute from underlying file object."""
if self._offset:
warnings.warn(
"FileHandle: '%s' not implemented for embedded files" % name)
return getattr(self._fh, name)
@property
def name(self):
return self._name
@property
def dirname(self):
return self._dir
@property
def path(self):
return os.path.join(self._dir, self._name)
@property
def size(self):
return self._size
@property
def closed(self):
return self._fh is None
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count)
def read_json(fh, byteorder, dtype, count):
"""Read JSON tag data from file and return as object."""
data = fh.read(count)
try:
return json.loads(unicode(stripnull(data), 'utf-8'))
except ValueError:
warnings.warn("invalid JSON '%s'" % data)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return fh.read_record(MM_HEADER, byteorder=byteorder)
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.ndarray."""
return fh.read_array(byteorder+'f8', 8)
def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
"""Read MetaMorph STK UIC1Tag from file and return as dictionary.
Return empty dictionary if plane_count is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2*count).reshape(count, 2)
result = {'z_distance': values[:, 0] / values[:, 1]}
elif plane_count:
for _ in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
result[name] = value
return result
def read_uic2tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC2Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 6*plane_count).reshape(plane_count, 6)
return {
'z_distance': values[:, 0] / values[:, 1],
'date_created': values[:, 2], # julian days
'time_created': values[:, 3], # milliseconds
'date_modified': values[:, 4], # julian days
'time_modified': values[:, 5], # milliseconds
}
def read_uic3tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC3Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 2*plane_count).reshape(plane_count, 2)
return {'wavelengths': values[:, 0] / values[:, 1]}
def read_uic4tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC4Tag from file and return as dictionary."""
assert dtype == '1I' and byteorder == '<'
result = {}
while True:
tagid = struct.unpack('<H', fh.read(2))[0]
if tagid == 0:
break
name, value = read_uic_tag(fh, tagid, plane_count, offset=False)
result[name] = value
return result
def read_uic_tag(fh, tagid, plane_count, offset):
"""Read a single UIC tag value from file and return tag name and value.
UIC1Tags use an offset.
"""
def read_int(count=1):
value = struct.unpack('<%iI' % count, fh.read(4*count))
return value[0] if count == 1 else value
try:
name, dtype = UIC_TAGS[tagid]
except KeyError:
# unknown tag
return '_tagid_%i' % tagid, read_int()
if offset:
pos = fh.tell()
if dtype not in (int, None):
off = read_int()
if off < 8:
warnings.warn("invalid offset for uic tag '%s': %i"
% (name, off))
return name, off
fh.seek(off)
if dtype is None:
# skip
name = '_' + name
value = read_int()
elif dtype is int:
# int
value = read_int()
elif dtype is Fraction:
# fraction
value = read_int(2)
value = value[0] / value[1]
elif dtype is julian_datetime:
# datetime
value = julian_datetime(*read_int(2))
elif dtype is read_uic_image_property:
# ImagePropertyEx
value = read_uic_image_property(fh)
elif dtype is str:
# pascal string
size = read_int()
if 0 <= size < 2**10:
value = struct.unpack('%is' % size, fh.read(size))[0][:-1]
value = stripnull(value)
elif offset:
value = ''
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
elif dtype == '%ip':
# sequence of pascal strings
value = []
for _ in range(plane_count):
size = read_int()
if 0 <= size < 2**10:
string = struct.unpack('%is' % size, fh.read(size))[0][:-1]
string = stripnull(string)
value.append(string)
elif offset:
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
else:
# struct or numpy type
dtype = '<' + dtype
if '%i' in dtype:
dtype = dtype % plane_count
if '(' in dtype:
# numpy type
value = fh.read_array(dtype, 1)[0]
if value.shape[-1] == 2:
# assume fractions
value = value[..., 0] / value[..., 1]
else:
# struct format
value = struct.unpack(dtype, fh.read(struct.calcsize(dtype)))
if len(value) == 1:
value = value[0]
if offset:
fh.seek(pos + 4)
return name, value
def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
flags, prop = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError("not a valid CS_LSM_INFO structure")
fh.seek(-8, 1)
if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
# adjust structure according to structure_size
cz_lsm_info = []
size = 0
for name, dtype in CZ_LSM_INFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
cz_lsm_info.append((name, dtype))
else:
cz_lsm_info = CZ_LSM_INFO
return fh.read_record(cz_lsm_info, byteorder=byteorder)
def read_cz_lsm_floatpairs(fh):
"""Read LSM sequence of float pairs from file and return as list."""
size = struct.unpack('<i', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_positions(fh):
"""Read LSM positions from file and return as list."""
size = struct.unpack('<I', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_time_stamps(fh):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack('<ii', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
# return struct.unpack('<%dd' % count, fh.read(8*count))
return fh.read_array('<f8', count=count)
def read_cz_lsm_event_list(fh):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack('<II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack('<IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack('<I', fh.read(4))[0]:
# not a Recording sub block
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack('<III', fh.read(12))
if dtype == 2:
# ascii
value = stripnull(fh.read(size))
elif dtype == 4:
# long
value = unpack('<i', fh.read(4))[0]
elif dtype == 5:
# rational
value = unpack('<d', fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
# end sub block
block = blocks.pop()
else:
# unknown entry
setattr(block, "entry_0x%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder)
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError("not a MicroManager TIFF file")
results = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + "IIIIIIII", fh.read(32))
if summary_header != 2355492:
raise ValueError("invalid MicroManager summary_header")
results['summary'] = read_json(fh, byteorder, None, summary_length)
if index_header != 54773648:
raise ValueError("invalid MicroManager index_header")
fh.seek(index_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 3453623:
raise ValueError("invalid MicroManager index_header")
data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count))
results['index_map'] = {
'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5],
'position': data[3::5], 'offset': data[4::5]}
if display_header != 483765892:
raise ValueError("invalid MicroManager display_header")
fh.seek(display_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 347834724:
raise ValueError("invalid MicroManager display_header")
results['display_settings'] = read_json(fh, byteorder, None, count)
if comments_header != 99384722:
raise ValueError("invalid MicroManager comments_header")
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 84720485:
raise ValueError("invalid MicroManager comments_header")
results['comments'] = read_json(fh, byteorder, None, count)
return results
def imagej_metadata(data, bytecounts, byteorder):
"""Return dictionary from ImageJ metadata tag value."""
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
def read_string(data, byteorder):
return _str(stripnull(data[0 if byteorder == '<' else 1::2]))
def read_double(data, byteorder):
return struct.unpack(byteorder+('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
#return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = { # big endian
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
metadata_types.update( # little endian
dict((k[::-1], v) for k, v in metadata_types.items()))
if not bytecounts:
raise ValueError("no ImageJ metadata")
if not data[:4] in (b'IJIJ', b'JIJI'):
raise ValueError("invalid ImageJ metadata")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ metadata header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description_dict(description):
"""Return dictionary from ImageJ image description byte string.
Raise ValueError if not a valid ImageJ description.
>>> description = b'ImageJ=1.11a\\nimages=510\\nhyperstack=true\\n'
>>> imagej_description_dict(description) # doctest: +SKIP
{'ImageJ': '1.11a', 'images': 510, 'hyperstack': True}
"""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
if 'ImageJ' not in result:
raise ValueError("not a ImageJ image description")
return result
def imagej_description(shape, rgb=None, colormaped=False, version='1.11a',
hyperstack=None, mode=None, loop=None, kwargs={}):
"""Return ImageJ image decription from data shape as byte string.
ImageJ can handle up to 6 dimensions in order TZCYXS.
>>> imagej_description((51, 5, 2, 196, 171)) # doctest: +SKIP
ImageJ=1.11a
images=510
channels=2
slices=5
frames=51
hyperstack=true
mode=grayscale
loop=false
"""
if colormaped:
raise NotImplementedError("ImageJ colormapping not supported")
shape = imagej_shape(shape, rgb=rgb)
rgb = shape[-1] in (3, 4)
result = ['ImageJ=%s' % version]
append = []
result.append('images=%i' % product(shape[:-3]))
if hyperstack is None:
#if product(shape[:-3]) > 1:
hyperstack = True
append.append('hyperstack=true')
else:
append.append('hyperstack=%s' % bool(hyperstack))
if shape[2] > 1:
result.append('channels=%i' % shape[2])
if mode is None and not rgb:
mode = 'grayscale'
if hyperstack and mode:
append.append('mode=%s' % mode)
if shape[1] > 1:
result.append('slices=%i' % shape[1])
if shape[0] > 1:
result.append("frames=%i" % shape[0])
if loop is None:
append.append('loop=false')
if loop is not None:
append.append('loop=%s' % bool(loop))
for key, value in kwargs.items():
append.append('%s=%s' % (key.lower(), value))
return str2bytes('\n'.join(result + append + ['']))
def imagej_shape(shape, rgb=None):
"""Return shape normalized to 6D ImageJ hyperstack TZCYXS.
Raise ValueError if not a valid ImageJ hyperstack shape.
>>> imagej_shape((2, 3, 4, 5, 3), False)
(2, 3, 4, 5, 3, 1)
"""
shape = tuple(int(i) for i in shape)
ndim = len(shape)
if 1 > ndim > 6:
raise ValueError("invalid ImageJ hyperstack: not 2 to 6 dimensional")
if rgb is None:
rgb = shape[-1] in (3, 4) and ndim > 2
if rgb and shape[-1] not in (3, 4):
raise ValueError("invalid ImageJ hyperstack: not a RGB image")
if not rgb and ndim == 6 and shape[-1] != 1:
raise ValueError("invalid ImageJ hyperstack: not a non-RGB image")
if rgb or shape[-1] == 1:
return (1, ) * (6 - ndim) + shape
else:
return (1, ) * (5 - ndim) + shape + (1,)
def image_description_dict(description):
"""Return dictionary from image description byte string.
Raise ValuError if description is of unknown format.
>>> image_description_dict(b'shape=(256, 256, 3)')
{'shape': (256, 256, 3)}
>>> description = b'{"shape": [256, 256, 3], "axes": "YXS"}'
>>> image_description_dict(description) # doctest: +SKIP
{'shape': [256, 256, 3], 'axes': 'YXS'}
"""
if description.startswith(b'shape='):
# old style 'shaped' description
shape = tuple(int(i) for i in description[7:-1].split(b','))
return dict(shape=shape)
if description.startswith(b'{') and description.endswith(b'}'):
# JSON description
return json.loads(description.decode('utf-8'))
raise ValueError("unknown image description")
def image_description(shape, colormaped=False, **metadata):
"""Return image description from data shape and meta data.
Return UTF-8 encoded JSON.
>>> image_description((256, 256, 3), axes='YXS') # doctest: +SKIP
b'{"shape": [256, 256, 3], "axes": "YXS"}'
"""
if colormaped:
shape = (3,) + shape
metadata.update({'shape': shape})
return json.dumps(metadata).encode('utf-8')
def _replace_by(module_function, package=__package__, warn=False):
"""Try replace decorated function by module.function."""
try:
from importlib import import_module
except ImportError:
warnings.warn('could not import module importlib')
return lambda func: func
def decorate(func, module_function=module_function, warn=warn):
try:
module, function = module_function.split('.')
if package:
module = import_module('.' + module, package=package)
else:
module = import_module(module)
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
return func
return decorate
def decode_floats(data):
"""Decode floating point horizontal differencing.
The TIFF predictor type 3 reorders the bytes of the image values and
applies horizontal byte differencing to improve compression of floating
point images. The ordering of interleaved color channels is preserved.
Parameters
----------
data : numpy.ndarray
The image to be decoded. The dtype must be a floating point.
The shape must include the number of contiguous samples per pixel
even if 1.
"""
shape = data.shape
dtype = data.dtype
if len(shape) < 3:
raise ValueError('invalid data shape')
if dtype.char not in 'dfe':
raise ValueError('not a floating point image')
littleendian = data.dtype.byteorder == '<' or (
sys.byteorder == 'little' and data.dtype.byteorder == '=')
# undo horizontal byte differencing
data = data.view('uint8')
data.shape = shape[:-2] + (-1,) + shape[-1:]
numpy.cumsum(data, axis=-2, dtype='uint8', out=data)
# reorder bytes
if littleendian:
data.shape = shape[:-2] + (-1,) + shape[-2:]
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
data = data[..., ::-1]
# back to float
data = numpy.ascontiguousarray(data)
data = data.view(dtype)
data.shape = shape
return data
def decode_jpeg(encoded, tables=b'', photometric=None,
ycbcr_subsampling=None, ycbcr_positioning=None):
"""Decode JPEG encoded byte string (using _czifile extension module)."""
from czifile import _czifile
image = _czifile.decode_jpeg(encoded, tables)
if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning:
# TODO: convert YCbCr to RGB
pass
return image.tostring()
@_replace_by('_tifffile.decode_packbits')
def decode_packbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i+n])
i += n
elif n > 129:
result_extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decode_lzw')
def decode_lzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of 'bitw' bits at 'bitcount' position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn("unexpected end of lzw stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpack_ints')
def unpack_ints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l,), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpack_rgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpack_rgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpack_rgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpack_rgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy.ndarray
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def squeeze_axes(shape, axes, skip='XY'):
"""Return shape and axes with single-dimensional entries removed.
Remove unused dimensions unless their axes are listed in 'skip'.
>>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
((5, 2, 1), 'TYX')
"""
if len(shape) != len(axes):
raise ValueError("dimensions of axes and shape don't match")
shape, axes = zip(*(i for i in zip(shape, axes)
if i[0] > 1 or i[1] in skip))
return tuple(shape), ''.join(axes)
def transpose_axes(data, axes, asaxes='CTZYX'):
"""Return data with its axes permuted to match specified axes.
A view is returned if possible.
>>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape
(5, 2, 1, 3, 4)
"""
for ax in axes:
if ax not in asaxes:
raise ValueError("unknown axis %s" % ax)
# add missing axes to data
shape = data.shape
for ax in reversed(asaxes):
if ax not in axes:
axes = ax + axes
shape = (1,) + shape
data = data.reshape(shape)
# transpose axes
data = data.transpose([axes.index(ax) for ax in asaxes])
return data
def stack_pages(pages, memmap=False, *args, **kwargs):
"""Read data from sequence of TiffPage and stack them vertically.
If memmap is True, return an array stored in a binary file on disk.
Additional parameters are passsed to the page asarray function.
"""
if len(pages) == 0:
raise ValueError("no pages")
if len(pages) == 1:
return pages[0].asarray(memmap=memmap, *args, **kwargs)
result = pages[0].asarray(*args, **kwargs)
shape = (len(pages),) + result.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=result.dtype, shape=shape)
else:
result = numpy.empty(shape, dtype=result.dtype)
for i, page in enumerate(pages):
result[i] = page.asarray(*args, **kwargs)
return result
def stripnull(string):
"""Return string truncated at first null character.
Clean NULL terminated C strings.
>>> stripnull(b'string\\x00')
b'string'
"""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def stripascii(string):
"""Return string truncated at last byte that is 7bit ASCII.
Clean NULL separated and terminated TIFF strings.
>>> stripascii(b'string\\x00string\\n\\x01\\x00')
b'string\\x00string\\n'
>>> stripascii(b'\\x00')
b''
"""
# TODO: pythonize this
ord_ = ord if sys.version_info[0] < 3 else lambda x: x
i = len(string)
while i:
i -= 1
if 8 < ord_(string[i]) < 127:
break
else:
i = -1
return string[:i+1]
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def sequence(value):
"""Return tuple containing value if value is not a sequence.
>>> sequence(1)
(1,)
>>> sequence([1])
[1]
"""
try:
len(value)
return value
except TypeError:
return (value,)
def product(iterable):
"""Return product of sequence of numbers.
Equivalent of functools.reduce(operator.mul, iterable, 1).
>>> product([2**8, 2**30])
274877906944
>>> product([])
1
"""
prod = 1
for i in iterable:
prod *= i
return prod
def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey)
def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Convert LSM time stamps.
>>> excel_datetime(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(timestamp)
def julian_datetime(julianday, milisecond=0):
"""Return datetime from days since 1/1/4713 BC and ms since midnight.
Convert Julian dates according to MetaMorph.
>>> julian_datetime(2451576, 54362783)
datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
"""
if julianday <= 1721423:
# no datetime before year 1
return None
a = julianday + 1
if a > 2299160:
alpha = math.trunc((a - 1867216.25) / 36524.25)
a += 1 + alpha - alpha // 4
b = a + (1524 if a > 1721423 else 1158)
c = math.trunc((b - 122.1) / 365.25)
d = math.trunc(365.25 * c)
e = math.trunc((b - d) / 30.6001)
day = b - d - math.trunc(30.6001 * e)
month = e - (1 if e < 13.5 else 13)
year = c - (4716 if month > 2.5 else 4715)
hour, milisecond = divmod(milisecond, 1000 * 60 * 60)
minute, milisecond = divmod(milisecond, 1000 * 60)
second, milisecond = divmod(milisecond, 1000)
return datetime.datetime(year, month, day,
hour, minute, second, milisecond)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory.
Print error message on failure.
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (
str(tif), str(img.shape), img.dtype, tif[0].compression,
(time.time()-t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful+failed, time.time()-start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated', # CMYK
6: 'ycbcr',
8: 'cielab',
9: 'icclab',
10: 'itulab',
32803: 'cfa', # Color Filter Array
32844: 'logl',
32845: 'logluv',
34892: 'linear_raw'
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
34925: 'lzma',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decode_packbits,
'lzw': decode_lzw,
# 'jpeg': decode_jpeg
}
if lzma:
TIFF_DECOMPESSORS['lzma'] = lzma.decompress
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1s', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
# TODO: is there a standard for character axes labels?
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'I': 'series', # general sequence, plane, page, IFD
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'P': 'phase', # formerly F # P is Position in LSM!
'R': 'tile', # region, point, mosaic
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
#'M': 'mosaic', # LSM 6
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# Map OME pixel types to numpy dtype
OME_PIXEL_TYPES = {
'int8': 'i1',
'int16': 'i2',
'int32': 'i4',
'uint8': 'u1',
'uint16': 'u2',
'uint32': 'u4',
'float': 'f4',
# 'bit': 'bit',
'double': 'f8',
'complex': 'c8',
'double-complex': 'c16',
}
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'), # NIH_UNITS_TYPE
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE
('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE
]
NIH_COLORTABLE_TYPE = (
'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
NIH_LUTMODE_TYPE = (
'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
'ColorLut', 'CustomGrayscale')
NIH_CURVEFIT_TYPE = (
'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
'UncalibratedOD')
NIH_UNITS_TYPE = (
'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
NIH_STACKTYPE_TYPE = (
'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# Map Universal Imaging Corporation MetaMorph internal tag ids to name and type
UIC_TAGS = {
0: ('auto_scale', int),
1: ('min_scale', int),
2: ('max_scale', int),
3: ('spatial_calibration', int),
4: ('x_calibration', Fraction),
5: ('y_calibration', Fraction),
6: ('calibration_units', str),
7: ('name', str),
8: ('thresh_state', int),
9: ('thresh_state_red', int),
10: ('tagid_10', None), # undefined
11: ('thresh_state_green', int),
12: ('thresh_state_blue', int),
13: ('thresh_state_lo', int),
14: ('thresh_state_hi', int),
15: ('zoom', int),
16: ('create_time', julian_datetime),
17: ('last_saved_time', julian_datetime),
18: ('current_buffer', int),
19: ('gray_fit', None),
20: ('gray_point_count', None),
21: ('gray_x', Fraction),
22: ('gray_y', Fraction),
23: ('gray_min', Fraction),
24: ('gray_max', Fraction),
25: ('gray_unit_name', str),
26: ('standard_lut', int),
27: ('wavelength', int),
28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions
29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions
30: ('overlay_mask', None),
31: ('overlay_compress', None),
32: ('overlay', None),
33: ('special_overlay_mask', None),
34: ('special_overlay_compress', None),
35: ('special_overlay', None),
36: ('image_property', read_uic_image_property),
37: ('stage_label', '%ip'), # N str
38: ('autoscale_lo_info', Fraction),
39: ('autoscale_hi_info', Fraction),
40: ('absolute_z', '(%i,2)u4'), # N fractions
41: ('absolute_z_valid', '(%i,)u4'), # N long
42: ('gamma', int),
43: ('gamma_red', int),
44: ('gamma_green', int),
45: ('gamma_blue', int),
46: ('camera_bin', int),
47: ('new_lut', int),
48: ('image_property_ex', None),
49: ('plane_property', int),
50: ('user_lut_table', '(256,3)u1'),
51: ('red_autoscale_info', int),
52: ('red_autoscale_lo_info', Fraction),
53: ('red_autoscale_hi_info', Fraction),
54: ('red_minscale_info', int),
55: ('red_maxscale_info', int),
56: ('green_autoscale_info', int),
57: ('green_autoscale_lo_info', Fraction),
58: ('green_autoscale_hi_info', Fraction),
59: ('green_minscale_info', int),
60: ('green_maxscale_info', int),
61: ('blue_autoscale_info', int),
62: ('blue_autoscale_lo_info', Fraction),
63: ('blue_autoscale_hi_info', Fraction),
64: ('blue_min_scale_info', int),
65: ('blue_max_scale_info', int),
#66: ('overlay_plane_color', read_uic_overlay_plane_color),
}
# Olympus FluoView
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'u4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('data_type', 'i4'), # CZ_DATA_TYPES
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('type_of_data', 'u4'), # CZ_TYPE_OF_DATA
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
# LSM 2.0 ends here
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
('offset_channel_wavelength', 'u4'),
('offset_channel_factors', 'u4'),
('objective_sphere_correction', 'f8'),
('offset_unmix_parameters', 'u4'),
# LSM 3.2, 4.0 end here
('offset_acquisition_parameters', 'u4'),
('offset_characteristics', 'u4'),
('offset_palette', 'u4'),
('time_difference_x', 'f8'),
('time_difference_y', 'f8'),
('time_difference_z', 'f8'),
('internal_use_1', 'u4'),
('dimension_p', 'i4'),
('dimension_m', 'i4'),
('dimensions_reserved', '16i4'),
('offset_tile_positions', 'u4'),
('reserved_1', '9u4'),
('offset_positions', 'u4'),
('reserved_2', '21u4'), # must be 0
]
# Import functions for LSM_INFO sub-records
CZ_LSM_INFO_READERS = {
'scan_info': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
'channel_colors': read_cz_lsm_floatpairs,
'positions': read_cz_lsm_floatpairs,
'tile_positions': read_cz_lsm_floatpairs,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
}
# Description of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
1: '8 bit unsigned integer',
2: '12 bit unsigned integer',
5: '32 bit float',
}
# Description of cz_lsm_info.type_of_data
CZ_TYPE_OF_DATA = {
0: 'Original scan data',
1: 'Calculated data',
2: '3D reconstruction',
3: 'Topography height map',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detection_channels",
0x80000000: "illumination_channels",
0xa0000000: "beam_splitters",
0xc0000000: "data_channels",
0x11000000: "timers",
0x13000000: "markers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
# 0x10000000: "recording",
0x40000000: "track",
0x50000000: "laser",
0x70000000: "detection_channel",
0x90000000: "illumination_channel",
0xb0000000: "beam_splitter",
0xd0000000: "data_channel",
0x12000000: "timer",
0x14000000: "marker",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
# recording
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "scan_type",
0x10000008: "scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bc_correction",
0x10000049: "position_bc_correction1",
0x10000050: "position_bc_correction2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
0x10000059: "rt_binning",
0x1000005a: "rt_frame_width",
0x1000005b: "rt_frame_height",
0x1000005c: "rt_region_width",
0x1000005d: "rt_region_height",
0x1000005e: "rt_offset_x",
0x1000005f: "rt_offset_y",
0x10000060: "rt_zoom",
0x10000061: "rt_line_period",
0x10000062: "prescan",
0x10000063: "scan_direction_z",
# track
0x40000001: "multiplex_type", # 0 after line; 1 after frame
0x40000002: "multiplex_order",
0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average
0x40000004: "sampling_method", # 1 mean; 2 sum
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# laser
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# detection_channel
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "channel_name",
0x70000015: "detector_gain_bc1",
0x70000016: "detector_gain_bc2",
0x70000017: "amplifier_gain_bc1",
0x70000018: "amplifier_gain_bc2",
0x70000019: "amplifier_offset_bc1",
0x70000020: "amplifier_offset_bc2",
0x70000021: "spectral_scan_channels",
0x70000022: "spi_wavelength_start",
0x70000023: "spi_wavelength_stop",
0x70000026: "dye_name",
0x70000027: "dye_folder",
# illumination_channel
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitter
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channel
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# timer
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
# marker
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal', 3: 'float'}),
318: ('white_point', None, 5, 2, None),
319: ('primary_chromaticities', None, 5, 6, None),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
340: ('smin_sample_value', None, None, None, None),
341: ('smax_sample_value', None, None, None, None),
347: ('jpeg_tables', None, 7, None, None),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
32996: ('sgi_matteing', None, None, 1, None), # use extra_samples
32996: ('sgi_datatype', None, None, 1, None), # use sample_format
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
37510: ('user_comment', None, None, None, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
50838: ('imagej_byte_counts', None, None, None, None),
51023: ('fibics_xml', None, 2, None, None),
65200: ('flex_xml', None, 2, None, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_bytes),
33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK
33629: ('uic2tag', read_uic2tag),
33630: ('uic3tag', read_uic3tag),
33631: ('uic4tag', read_uic4tag),
34361: ('mm_header', read_mm_header), # Olympus FluoView
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_metadata', read_bytes),
51123: ('micromanager_metadata', read_json),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported `from matplotlib import pyplot`.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image width and length.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 8 and
data.shape[-1] < data.shape[-3] // 8 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if isrgb:
data = data[..., :maxdim, :maxdim, :maxdim]
else:
data = data[..., :maxdim, :maxdim]
if photometric == 'palette' and isrgb:
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
# TODO: handle complex types
raise NotImplementedError("complex type")
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ubf' or vmin == 0:
cmap = 'cubehelix'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0,) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0,) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description="Display image data in TIFF files.",
version="%%prog %s" % __version__)
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="don't read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the docstring examples")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
try:
import tkFileDialog as filedialog
except ImportError:
from tkinter import filedialog
path = filedialog.askopenfilename(filetypes=[
("TIF files", "*.tif"), ("LSM files", "*.lsm"),
("STK files", "*.stk"), ("allfiles", "*")])
#parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
#if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
def notnone(x):
return next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags',
'mm_header', 'imagej_tags', 'micromanager_metadata',
'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if page.is_micromanager:
print('MICROMANAGER_FILE_METADATA')
print(Record(tif.micromanager_metadata))
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if 'gdal_nodata' in page.tags:
try:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
except ValueError:
pass
if page.is_stk:
try:
vmin = page.uic_tags['min_scale']
vmax = page.uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str, bytes
unicode = str
def str2bytes(s, encoding="latin-1"):
return s.encode(encoding)
else:
def str2bytes(s):
return s
if __name__ == "__main__":
sys.exit(main()) | gpl-3.0 |
Curly-Mo/sample-recognition | fingerprint/spectral_peaks.py | 1 | 5789 | import sys
import itertools
from collections import Counter
import logging
logger = logging.getLogger('spectral_peaks')
if not logger.handlers:
stream = logging.StreamHandler(sys.stdout)
stream.setLevel(logging.INFO)
logger.addHandler(stream)
import librosa
import numpy as np
import scipy
import matplotlib.pyplot as plt
from matplotlib.patches import ConnectionPatch
import seaborn
seaborn.set(style='ticks')
import ann
def spectral_peaks(audio_path, hop_length, octave_bins=24, n_octaves=7, wn=0.5, num_peaks=5, plot=False):
logger.info('{}: Loading signal into memory...'.format(audio_path))
y, sr = librosa.load(audio_path)
logger.info('{}: Generating Spectrogram...'.format(audio_path))
S = librosa.core.constantq.cqt(y, sr=22050, hop_length=hop_length, bins_per_octave=octave_bins, n_bins=octave_bins*n_octaves, fmin=20)
S = librosa.logamplitude(S, ref_power=np.max)
# Find peaks
logger.info('{}: Finding Peaks...'.format(audio_path))
peakx, peaky = scipy.signal.argrelextrema(lowpass(S.T, wn=wn), np.greater, axis=1)
# Convert peaks to 'matrix'
logger.info('{}: Creating peak matrix...'.format(audio_path))
peaks = np.full([num_peaks, S.shape[1]], -1, dtype=int)
for x, points in itertools.groupby(zip(peakx, peaky), lambda x: x[0]):
points = list(points)
values = [S[point[1], x] for point in points]
ys = [point[1] for (value, point) in sorted(zip(values, points), reverse=True)]
if len(ys) >= num_peaks:
peaks[:, x] = sorted(ys[:num_peaks])
peaks = np.clip(peaks, -1, 168)
# # Convert peaks to distances between peaks
# logger.info('{}: Finding peak distances...'.format(audio_path))
# peak_dists = np.empty([len(list(itertools.combinations(range(num_peaks), 2))), peaks.shape[1]], dtype=int)
# for x, frame in enumerate(peaks.T):
# peak_dists[:, x] = [b - a for (a, b) in itertools.combinations(frame, 2)]
# Plot
if plot:
# Plot spectrogram
librosa.display.specshow(
S,
sr=sr,
hop_length=hop_length,
bins_per_octave=octave_bins,
fmin=20,
x_axis='time',
y_axis='cqt_hz',
n_xticks=10,
n_yticks=20,
)
#plt.imshow(S, aspect='auto', origin='lower')
plt.title(audio_path)
plt.colorbar(format='%+2.0f dB')
# Plot Peaks
peakx = [np.repeat(x, len(frame)) for (x, frame) in enumerate(peaks.T)]
peakx = [i for i in peakx]
peaky = [i for frame in peaks.T for i in frame]
plt.scatter(peakx, peaky, c='y')
plt.tight_layout()
return S, peaks
def find_peaks(matrix, axis=1, width=10):
peaks = [[], []]
if axis == 1:
matrix = matrix.T
for x, frame in enumerate(matrix):
y_vals = scipy.signal.find_peaks_cwt(frame, np.array([width]))
x_vals = [x] * len(y_vals)
peaks[0].extend(x_vals)
peaks[1].extend(y_vals)
return peaks
def lowpass(x, order=8, wn=0.4, axis=1):
b, a = scipy.signal.butter(order, wn, btype='low')
y = scipy.signal.filtfilt(b, a, x, axis=axis)
return y
def match(path1, path2, hop_length, wn=0.4, num_peaks=5, thresh=0.001, plot=True):
target_peaks = num_peaks + 2
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
S1, peaks1 = spectral_peaks(path1, hop_length, wn=wn, num_peaks=target_peaks, plot=plot)
ax2 = fig.add_subplot(2, 1, 2)
S2, peaks2 = spectral_peaks(path2, hop_length, wn=wn, num_peaks=num_peaks, plot=plot)
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
logger.info('Finding peak distances...')
peak_dists2 = np.diff(peaks2, axis=0)
num_combos = len(list(itertools.combinations(range(target_peaks), num_peaks)))
peak_dists1 = np.empty([num_peaks-1, peaks1.shape[1]*(num_combos+1)], dtype=int)
for x, frame in enumerate(peaks1.T):
start = x*(num_combos+1)
end = (x+1)*(num_combos+1) - 1
peak_dists1[:, start:end] = np.array([np.diff(combo) for combo in itertools.combinations(frame, num_peaks)]).T
logger.info('Finding nearest neighbors')
distances, nearest_neighbors = ann.nearest_neighbors(peak_dists2.T, peak_dists1.T, k=1)
# nearest_neighbors = []
# for frame in peak_dists1:
# score = 0
# nn = None
# for i, yframe in enumerate(peak_dists2):
# count = count_matches(frame, yframe)
# if count > score:
# score = count
# nn = i
# nearest_neighbors.append({'score': score, 'index': nn})
logger.info('Drawing lines between matches')
for x, nn in enumerate(nearest_neighbors[:-1]):
if distances[x] < thresh:
if any(n in nearest_neighbors[x+1:x+(num_combos*3)] for n in [nn+1, nn+2, nn+3]):
con = ConnectionPatch(
xyA=(x/num_combos, 0), xyB=(nn, S2.shape[0]),
coordsA='data', coordsB='data',
axesA=ax1, axesB=ax2,
arrowstyle='<-', linewidth=1,
zorder=999
)
ax1.add_artist(con)
ax2.set_zorder(-1)
plt.show(block=False)
return nearest_neighbors, distances
def nearest_neighbors(peaks1, peaks2):
# Convert peaks to distances between peaks
peak_dists = [[] for i in range(len(peaks1))]
for x, frame in enumerate(peaks1):
for a, b in itertools.combinations(frame, 2):
peak_dists[x].append(b-a)
def count_matches(lista, listb):
if len(lista) > len(listb):
lista, listb = listb, lista
a_count = Counter(lista)
b_count = Counter(listb)
return sum(min(b_count[ak], av) for ak,av in a_count.iteritems())
| apache-2.0 |
elijah513/scikit-learn | sklearn/utils/tests/test_class_weight.py | 140 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
Myasuka/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
aabadie/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 65 | 5529 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float64)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float64)
y.fill(0.0)
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float64)
y.fill(1.0)
sw = np.ones(102, dtype=np.float64)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
selective-inference/selective-inference | selectinf/randomized/tests/test_group_lasso.py | 2 | 10569 | from __future__ import division, print_function
import numpy as np
import nose.tools as nt
import regreg.api as rr
from ..group_lasso import (group_lasso,
selected_targets,
full_targets,
debiased_targets)
from ...tests.instance import gaussian_instance
from ...tests.flags import SET_SEED
from ...tests.decorators import set_sampling_params_iftrue, set_seed_iftrue
from ...algorithms.sqrt_lasso import choose_lambda, solve_sqrt_lasso
from ..randomization import randomization
from ...tests.decorators import rpy_test_safe
@set_seed_iftrue(SET_SEED)
def test_group_lasso(n=400,
p=100,
signal_fac=3,
s=5,
sigma=3,
target='full',
rho=0.4,
randomizer_scale=.75,
ndraw=100000):
"""
Test group lasso
"""
inst, const = gaussian_instance, group_lasso.gaussian
signal = np.sqrt(signal_fac * np.log(p))
X, Y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
orthogonal = True
if orthogonal:
X = np.linalg.svd(X, full_matrices=False)[0]
Y = X.dot(beta) + sigma * np.random.standard_normal(n)
n, p = X.shape
sigma_ = np.std(Y)
groups = np.floor(np.arange(p)/2).astype(np.int)
weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)])
conv = const(X,
Y,
groups,
weights,
randomizer_scale=randomizer_scale * sigma_)
signs = conv.fit()
nonzero = conv.selection_variable['directions'].keys()
if target == 'full':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'selected':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'debiased':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = debiased_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
_, pval, intervals = conv.summary(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives,
ndraw=ndraw,
compute_intervals=False)
which = np.zeros(p, np.bool)
for group in conv.selection_variable['directions'].keys():
which_group = conv.penalty.groups == group
which += which_group
return pval[beta[which] == 0], pval[beta[which] != 0]
@set_seed_iftrue(SET_SEED)
def test_lasso(n=400,
p=200,
signal_fac=1.5,
s=5,
sigma=3,
target='full',
rho=0.4,
ndraw=10000):
"""
Test group lasso with groups of size 1, ie lasso
"""
inst, const = gaussian_instance, group_lasso.gaussian
signal = np.sqrt(signal_fac * np.log(p))
X, Y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
n, p = X.shape
sigma_ = np.std(Y)
groups = np.arange(p)
weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)])
conv = const(X,
Y,
groups,
weights)
signs = conv.fit()
nonzero = conv.selection_variable['directions'].keys()
if target == 'full':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'selected':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'debiased':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = debiased_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
_, pval, intervals = conv.summary(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives,
ndraw=ndraw,
compute_intervals=False)
which = np.zeros(p, np.bool)
for group in conv.selection_variable['directions'].keys():
which_group = conv.penalty.groups == group
which += which_group
return pval[beta[which] == 0], pval[beta[which] != 0]
@set_seed_iftrue(SET_SEED)
def test_mixed(n=400,
p=200,
signal_fac=1.5,
s=5,
sigma=3,
target='full',
rho=0.4,
ndraw=10000):
"""
Test group lasso with a mix of groups of size 1, and larger
"""
inst, const = gaussian_instance, group_lasso.gaussian
signal = np.sqrt(signal_fac * np.log(p))
X, Y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
n, p = X.shape
sigma_ = np.std(Y)
groups = np.arange(p)
groups[-5:] = -1
groups[-8:-5] = -2
Y += X[:,-8:].dot(np.ones(8)) * 5 # so we select the last two groups
weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)])
conv = const(X,
Y,
groups,
weights)
signs = conv.fit()
nonzero = conv.selection_variable['directions'].keys()
if target == 'full':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'selected':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'debiased':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = debiased_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
_, pval, intervals = conv.summary(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives,
ndraw=ndraw,
compute_intervals=False)
which = np.zeros(p, np.bool)
for group in conv.selection_variable['directions'].keys():
which_group = conv.penalty.groups == group
which += which_group
return pval[beta[which] == 0], pval[beta[which] != 0]
@set_seed_iftrue(SET_SEED)
def test_all_targets(n=100, p=20, signal_fac=1.5, s=5, sigma=3, rho=0.4):
for target in ['full', 'selected', 'debiased']:
test_group_lasso(n=n,
p=p,
signal_fac=signal_fac,
s=s,
sigma=sigma,
rho=rho,
target=target)
def main(nsim=500, n=200, p=50, target='full', sigma=3):
import matplotlib.pyplot as plt
P0, PA = [], []
from statsmodels.distributions import ECDF
for i in range(nsim):
try:
p0, pA = test_group_lasso(n=n, p=p, target=target, sigma=sigma)
except:
pass
print(len(p0), len(pA))
P0.extend(p0)
PA.extend(pA)
P0_clean = np.array(P0)
P0_clean = P0_clean[P0_clean > 1.e-5] #
print(np.mean(P0_clean), np.std(P0_clean), np.mean(np.array(PA) < 0.05), np.sum(np.array(PA) < 0.05) / (i+1), np.mean(np.array(P0) < 0.05), np.mean(P0_clean < 0.05), np.mean(np.array(P0) < 1e-5), 'null pvalue + power + failure')
if i % 3 == 0 and i > 0:
U = np.linspace(0, 1, 101)
plt.clf()
if len(P0_clean) > 0:
plt.plot(U, ECDF(P0_clean)(U))
if len(PA) > 0:
plt.plot(U, ECDF(PA)(U), 'r')
plt.plot([0, 1], [0, 1], 'k--')
plt.savefig("plot.pdf")
plt.show()
| bsd-3-clause |
valgur/metsaregister | metsaregister/cli.py | 1 | 2792 | # -*- coding: utf-8 -*-
"""Console script for metsaregister."""
from __future__ import print_function
import click
import geopandas as gpd
from shapely.ops import cascaded_union
import metsaregister
def _read_aoi(aoi_path):
gdf = gpd.read_file(aoi_path)
return cascaded_union(list(gdf.geometry)).wkt
def _add_crs(json):
return json.replace(
'{',
'{\n"crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:EPSG::3301" } }, ',
1
)
@click.group()
def cli():
return
@cli.command(name="list", help="List available layers and their IDs")
def list_layers():
layers = metsaregister.get_layers()
for name, id in layers.items():
print(id, name, sep='\t')
@cli.command(help="""Get any layer's features intersecting with a given AOI.
Takes a vector file containing the area of interest as input. Must be in L-EST97 CRS.
For a list of available layers and their IDs see the 'list' command.
The result is saved as a GeoJSON file.""")
@click.argument('aoi', type=str)
@click.argument('layer_id', type=int)
@click.argument('out_path', type=str)
def get_layer(aoi, layer_id, out_path):
aoi = _read_aoi(aoi)
gdf = metsaregister.query_layer(aoi, layer_id)
with open(out_path, 'w', encoding='utf8') as f:
f.write(_add_crs(gdf.to_json()))
@cli.command(help="""Fetch and save forest stands' information for a given AOI.
Takes a vector file containing the area of interest as input. Must be in L-EST97 CRS.
The result is saved as a GeoJSON file.""")
@click.argument('aoi', type=str)
@click.argument('out_path', type=str)
@click.option('--wait', default=0.5, type=float,
help="Time to wait in seconds between querying each stand's information "
"to not overload the server. Defaults to 0.5 s.")
def forest_stands(aoi, out_path, wait):
aoi = _read_aoi(aoi)
gdf = metsaregister.query_forest_stands(aoi, wait)
with open(out_path, 'w', encoding='utf8') as f:
f.write(_add_crs(gdf.to_json()))
@cli.command(help="""Fetch and save forest notifications' information for a given AOI.
Takes a vector file containing the area of interest as input. Must be in L-EST97 CRS.
The result is saved as a GeoJSON file.""")
@click.argument('aoi', type=str)
@click.argument('out_path', type=str)
@click.option('--wait', default=0.5, type=float,
help="Time to wait in seconds between querying each stand's information "
"to not overload the server. Defaults to 0.5 s.")
def forest_notifications(aoi, out_path, wait):
aoi = _read_aoi(aoi)
gdf = metsaregister.query_forest_notifications(aoi, wait)
with open(out_path, 'w', encoding='utf8') as f:
f.write(_add_crs(gdf.to_json()))
if __name__ == "__main__":
cli()
| mit |
tarzan0820/addons-yelizariev | sugarcrm_migration/wizard/upload.py | 16 | 3753 | from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp import tools
import logging
_logger = logging.getLogger(__name__)
import base64
import tempfile
try:
import MySQLdb
import MySQLdb.cursors
from pandas import DataFrame
except ImportError:
pass
from ..import_sugarcrm import import_sugarcrm
from ..import_kashflow import import_kashflow
import tarfile
import shutil
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import os
import glob
class sugarcrm_migration_upload(osv.TransientModel):
_name = "sugarcrm_migration.upload"
_description = "Upload dumps"
_columns = {
'sugarcrm_file': fields.char('Sugarcrm file (*.tar.gz)', help='Path on server'),
'kashflow_file': fields.char('Kashflow file (*.tar.gz)', help='Path on server'),
'db_host': fields.char('MySQL Host'),
'db_port': fields.char('MySQL Port'),
'db_name': fields.char('MySQL Database'),
'db_user': fields.char('MySQL User'),
'db_passwd': fields.char('MySQL Password'),
}
_defaults = {
'db_host': 'localhost',
'db_port': '3306',
'db_name': 'test',
'db_user': 'test',
'db_passwd': 'test',
}
def upload_button(self, cr, uid, ids, context=None):
record = self.browse(cr, uid, ids[0])
self.kashflow(record, cr, uid)
#self.sugarcrm(record, cr, uid)
return True
def sugarcrm(self, record, cr, uid):
#if not record.sugarcrm_file:
# return
#unzip files
files = []
tmp_dir = None
if record.sugarcrm_file:
tmp_dir,files = self.unzip_file(record.sugarcrm_file.strip())
instance = import_sugarcrm(self.pool, cr, uid,
'sugarcrm', #instance_name
'sugarcrm_migration', # module_name
context={'db_host': record.db_host,
'db_port': record.db_port,
'db_user': record.db_user,
'db_passwd': record.db_passwd,
'db_name': record.db_name,
'db_dump_fies': files
}
)
try:
shutil.rmtree(tmp_dir)
except:
pass
instance.run()
return instance
def kashflow(self, record, cr, uid):
if not record.kashflow_file:
return
# unzip files
tmp,files = self.unzip_file(record.kashflow_file.strip(), pattern='*.csv')
_logger.info('kashflow files: %s'%files)
# map data and save to base_import.import
instance = import_kashflow(self.pool, cr, uid,
'kashflow', #instance_name
'sugarcrm_migration', #module_name
context = {'csv_files': files,
'sugarcrm_instance_name':'sugarcrm'
}
)
instance.run()
return instance
def unzip_file(self, filename, pattern='*'):
'''
extract *.tar.gz files
returns list of extracted file names
'''
tar = tarfile.open(name=filename)
dir = tempfile.mkdtemp(prefix='tmp_sugarcrm_migration')
tar.extractall(path=dir)
return dir, glob.glob('%s/%s' % (dir, pattern))+glob.glob('%s/*/%s' % (dir, pattern))
| lgpl-3.0 |
fzalkow/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
neuroidss/nupic.research | projects/union_pooling/experiments/union_sdr_continuous/union_pooling_tm_learning.py | 8 | 9165 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import sys
import time
import os
import yaml
from optparse import OptionParser
import numpy
from pylab import rcParams
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.backends.backend_pdf import PdfPages
plt.ion()
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
from nupic.algorithms.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from htmresearch.frameworks.union_temporal_pooling.union_temporal_pooler_experiment import (
UnionTemporalPoolerExperiment)
"""
Experiment 2
Runs UnionTemporalPooler on input from a Temporal Memory while TM learns the sequence
"""
def experiment2():
paramDir = 'params/1024_baseline/5_trainingPasses.yaml'
outputDir = 'results/'
params = yaml.safe_load(open(paramDir, 'r'))
options = {'plotVerbosity': 2, 'consoleVerbosity': 2}
plotVerbosity = 2
consoleVerbosity = 1
print "Running SDR overlap experiment...\n"
print "Params dir: {0}".format(paramDir)
print "Output dir: {0}\n".format(outputDir)
# Dimensionality of sequence patterns
patternDimensionality = params["patternDimensionality"]
# Cardinality (ON / true bits) of sequence patterns
patternCardinality = params["patternCardinality"]
# TODO If this parameter is to be supported, the sequence generation code
# below must change
# Number of unique patterns from which sequences are built
# patternAlphabetSize = params["patternAlphabetSize"]
# Length of sequences shown to network
sequenceLength = params["sequenceLength"]
# Number of sequences used. Sequences may share common elements.
numberOfSequences = params["numberOfSequences"]
# Number of sequence passes for training the TM. Zero => no training.
trainingPasses = params["trainingPasses"]
tmParamOverrides = params["temporalMemoryParams"]
upParamOverrides = params["unionPoolerParams"]
# Generate a sequence list and an associated labeled list (both containing a
# set of sequences separated by None)
start = time.time()
print "\nGenerating sequences..."
patternAlphabetSize = sequenceLength * numberOfSequences
patternMachine = PatternMachine(patternDimensionality, patternCardinality,
patternAlphabetSize)
sequenceMachine = SequenceMachine(patternMachine)
numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength)
generatedSequences = sequenceMachine.generateFromNumbers(numbers)
sequenceLabels = [str(numbers[i + i*sequenceLength: i + (i+1)*sequenceLength])
for i in xrange(numberOfSequences)]
labeledSequences = []
for label in sequenceLabels:
for _ in xrange(sequenceLength):
labeledSequences.append(label)
labeledSequences.append(None)
# Set up the Temporal Memory and Union Pooler network
print "\nCreating network..."
experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides)
# Train only the Temporal Memory on the generated sequences
# if trainingPasses > 0:
#
# print "\nTraining Temporal Memory..."
# if consoleVerbosity > 0:
# print "\nPass\tBursting Columns Mean\tStdDev\tMax"
#
# for i in xrange(trainingPasses):
# experiment.runNetworkOnSequences(generatedSequences,
# labeledSequences,
# tmLearn=True,
# upLearn=None,
# verbosity=consoleVerbosity,
# progressInterval=_SHOW_PROGRESS_INTERVAL)
#
# if consoleVerbosity > 0:
# stats = experiment.getBurstingColumnsStats()
# print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2])
#
# # Reset the TM monitor mixin's records accrued during this training pass
# # experiment.tm.mmClearHistory()
#
# print
# print MonitorMixinBase.mmPrettyPrintMetrics(
# experiment.tm.mmGetDefaultMetrics())
# print
#
# if plotVerbosity >= 2:
# plotNetworkState(experiment, plotVerbosity, trainingPasses, phase="Training")
#
# experiment.tm.mmClearHistory()
# experiment.up.mmClearHistory()
print "\nRunning test phase..."
inputSequences = generatedSequences
inputCategories = labeledSequences
tmLearn = True
upLearn = False
classifierLearn = False
currentTime = time.time()
experiment.tm.reset()
experiment.up.reset()
poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 1))
activeCellsTrace = numpy.zeros((experiment.up._numColumns, 1))
activeSPTrace = numpy.zeros((experiment.up._numColumns, 1))
for _ in xrange(trainingPasses):
for i in xrange(len(inputSequences)):
sensorPattern = inputSequences[i]
inputCategory = inputCategories[i]
if sensorPattern is None:
pass
else:
experiment.tm.compute(sensorPattern,
learn=tmLearn,
sequenceLabel=inputCategory)
if upLearn is not None:
activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput()
experiment.up.compute(activeCells,
predActiveCells,
learn=upLearn,
sequenceLabel=inputCategory)
currentPoolingActivation = experiment.up._poolingActivation
currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1))
poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1)
currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1))
currentUnionSDR[experiment.up._unionSDR] = 1
activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1)
currentSPSDR = numpy.zeros((experiment.up._numColumns, 1))
currentSPSDR[experiment.up._activeCells] = 1
activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1)
print "\nPass\tBursting Columns Mean\tStdDev\tMax"
stats = experiment.getBurstingColumnsStats()
print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2])
print
print MonitorMixinBase.mmPrettyPrintMetrics(\
experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics())
print
experiment.tm.mmClearHistory()
# estimate fraction of shared bits across adjacent time point
unionSDRshared = experiment.up._mmComputeUnionSDRdiff()
bitLifeList = experiment.up._mmComputeBitLifeStats()
bitLife = numpy.array(bitLifeList)
# Plot SP outputs, UP persistence and UP outputs in testing phase
def showSequenceStartLine(ax, trainingPasses, sequenceLength):
for i in xrange(trainingPasses):
ax.vlines(i*sequenceLength, 0, 100, linestyles='--')
plt.figure()
ncolShow = 100
f, (ax1, ax2, ax3) = plt.subplots(nrows=1,ncols=3)
ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys,interpolation="nearest",aspect='auto')
showSequenceStartLine(ax1, trainingPasses, sequenceLength)
ax1.set_title('SP SDR')
ax1.set_ylabel('Columns')
ax2.imshow(poolingActivationTrace[1:100,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax2, trainingPasses, sequenceLength)
ax2.set_title('Persistence')
ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax3, trainingPasses, sequenceLength)
plt.title('Union SDR')
ax2.set_xlabel('Time (steps)')
pp = PdfPages('results/UnionPoolingDuringTMlearning_Experiment2.pdf')
pp.savefig()
pp.close()
f, (ax1, ax2, ax3) = plt.subplots(nrows=3,ncols=1)
ax1.plot((sum(activeCellsTrace))/experiment.up._numColumns*100)
ax1.set_ylabel('Union SDR size (%)')
ax1.set_xlabel('Time (steps)')
ax1.set_ylim(0,25)
ax2.plot(unionSDRshared)
ax2.set_ylabel('Shared Bits')
ax2.set_xlabel('Time (steps)')
ax3.hist(bitLife)
ax3.set_xlabel('Life duration for each bit')
pp = PdfPages('results/UnionSDRproperty_Experiment2.pdf')
pp.savefig()
pp.close()
if __name__ == "__main__":
experiment2()
| agpl-3.0 |
ian-r-rose/burnman | contrib/CHRU2014/paper_averaging.py | 1 | 6852 |
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
paper_averaging
---------------
This script reproduces :cite:`Cottaar2014`, Figure 2.
This example shows the effect of different averaging schemes. Currently four
averaging schemes are available:
1. Voight-Reuss-Hill
2. Voight averaging
3. Reuss averaging
4. Hashin-Shtrikman averaging
See :cite:`Watt1976` for explanations
of each averaging scheme.
requires:
- geotherms
- compute seismic velocities
teaches:
- averaging
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../../burnman'):
sys.path.insert(1, os.path.abspath('../..'))
import burnman
from burnman import minerals
import misc.colors as colors
if __name__ == "__main__":
figsize = (6, 5)
prop = {'size': 12}
# plt.rc('text', usetex=True)
plt.rc('font', family='sans-serif')
figure = plt.figure(dpi=100, figsize=figsize)
""" choose 'slb2' (finite-strain 2nd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'slb3 (finite-strain 3rd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'mgd3' (mie-gruneisen-debeye 3rd order shear modulus,
matas et al. 2007)
or 'mgd2' (mie-gruneisen-debeye 2nd order shear modulus,
matas et al. 2007)
or 'bm2' (birch-murnaghan 2nd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))
or 'bm3' (birch-murnaghan 3rd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))"""
amount_perovskite = 0.6
rock = burnman.Composite(
[minerals.SLB_2011.mg_perovskite(), minerals.SLB_2011.wuestite()],
[amount_perovskite, 1.0 - amount_perovskite])
perovskitite = burnman.Composite(
[minerals.SLB_2011.mg_perovskite()], [1.0])
periclasite = burnman.Composite([minerals.SLB_2011.wuestite()], [1.0])
# seismic model for comparison:
# pick from .prem() .slow() .fast() (see burnman/seismic.py)
seismic_model = burnman.seismic.PREM()
# set on how many depth slices the computations should be done
number_of_points = 20
# we will do our computation and comparison at the following depth values:
depths = np.linspace(700e3, 2800e3, number_of_points)
# alternatively, we could use the values where prem is defined:
# depths = seismic_model.internal_depth_list()
pressures, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate(
['pressure', 'density', 'v_p', 'v_s', 'v_phi'], depths)
temperatures = burnman.geotherm.brown_shankland(pressures)
print("Calculations are done for:")
rock.debug_print()
# calculate the seismic velocities of the rock using a whole battery of
# averaging schemes:
# evaluate the end members
rho_pv, vp_pv, vs_pv, vphi_pv, K_pv, G_pv = \
perovskitite.evaluate(
['rho', 'v_p', 'v_s', 'v_phi', 'K_S', 'G'], pressures, temperatures)
rho_fp, vp_fp, vs_fp, vphi_fp, K_fp, G_fp = \
periclasite.evaluate(
['rho', 'v_p', 'v_s', 'v_phi', 'K_S', 'G'], pressures, temperatures)
# Voigt Reuss Hill averaging
rock.set_averaging_scheme(burnman.averaging_schemes.VoigtReussHill())
rho_vrh, vp_vrh, vs_vrh, vphi_vrh, K_vrh, G_vrh = \
rock.evaluate(
['rho', 'v_p', 'v_s', 'v_phi', 'K_S', 'G'], pressures, temperatures)
# Voigt averaging
rock.set_averaging_scheme(burnman.averaging_schemes.Voigt())
rho_v, vp_v, vs_v, vphi_v, K_v, G_v = \
rock.evaluate(
['rho', 'v_p', 'v_s', 'v_phi', 'K_S', 'G'], pressures, temperatures)
# Reuss averaging
rock.set_averaging_scheme(burnman.averaging_schemes.Reuss())
rho_r, vp_r, vs_r, vphi_r, K_r, G_r = \
rock.evaluate(
['rho', 'v_p', 'v_s', 'v_phi', 'K_S', 'G'], pressures, temperatures)
# Upper bound for Hashin-Shtrikman averaging
rock.set_averaging_scheme(burnman.averaging_schemes.HashinShtrikmanUpper())
rho_hsu, vp_hsu, vs_hsu, vphi_hsu, K_hsu, G_hsu = \
rock.evaluate(
['rho', 'v_p', 'v_s', 'v_phi', 'K_S', 'G'], pressures, temperatures)
# Lower bound for Hashin-Shtrikman averaging
rock.set_averaging_scheme(burnman.averaging_schemes.HashinShtrikmanLower())
rho_hsl, vp_hsl, vs_hsl, vphi_hsl, K_hsl, G_hsl = \
rock.evaluate(
['rho', 'v_p', 'v_s', 'v_phi', 'K_S', 'G'], pressures, temperatures)
# linear fit
vs_lin = vs_pv * amount_perovskite + vs_fp * (1.0 - amount_perovskite)
# PLOTTING
# plot vs
ax = figure.add_subplot(1, 1, 1)
plt.plot(
pressures / 1.e9, vs_v / 1.e3, color=colors.color(0), linewidth=2, linestyle='-', marker='^',
markersize=4, label='Voigt')
plt.plot(
pressures / 1.e9, vs_r / 1.e3, color=colors.color(5), linewidth=2, linestyle='-', marker='v',
markersize=4, label='Reuss')
plt.plot(
pressures / 1.e9, vs_vrh / 1.e3, color=colors.color(1), linestyle='-', marker='*',
markersize=6, label='Voigt-Reuss-Hill')
plt.fill_between(pressures / 1.e9, vs_hsu / 1.e3, vs_hsl / 1.e3,
facecolor='red', lw=0, label='asdf', interpolate=False)
# plt.plot(pressures/1.e9,vs_hsu/1.e3,color='r',linestyle='-',\
# markersize=4,label='Hashin-Shtrikman')
# plt.plot(pressures/1.e9,vs_hsl/1.e3,color='r',linestyle='-',marker='x',\
# markersize=4)
plt.plot(
pressures / 1.e9, vs_lin / 1.e3, color='k', linewidth=2, linestyle='--',
markersize=4, label='linear')
plt.plot(
pressures / 1.e9, vs_pv / 1.e3, color=colors.color(2), linewidth=2, linestyle='-', marker='d',
markersize=4, label='Mg Perovskite')
plt.plot(
pressures / 1.e9, vs_fp / 1.e3, color=colors.color(4), linewidth=2, linestyle='-', marker='x',
markersize=6, label=r'W\"ustite')
plt.ylim(3.0, 7.5)
plt.xlim(min(pressures) / 1.e9, max(pressures) / 1.e9)
simArtist = plt.Line2D((0, 1), (0, 0), color='r', lw=5, linestyle='-')
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles[0:3] + [simArtist] + handles[3:], labels[0:3] + [
'Hashin-Shtrikman'] + labels[3:], loc='lower right', ncol=2, prop=prop)
plt.xlabel('Pressure (GPa)')
plt.ylabel('Shear velocity $V_s$ (km/s)')
if "RUNNING_TESTS" not in globals():
plt.savefig("example_averaging.pdf", bbox_inches='tight')
plt.show()
| gpl-2.0 |
kaiserroll14/301finalproject | main/pandas/sparse/tests/test_list.py | 16 | 2996 | from pandas.compat import range
import unittest
from numpy import nan
import numpy as np
from pandas.sparse.api import SparseList, SparseArray
from pandas.util.testing import assert_almost_equal
from .test_sparse import assert_sp_array_equal
def assert_sp_list_equal(left, right):
assert_sp_array_equal(left.to_array(), right.to_array())
class TestSparseList(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.na_data = np.array([nan, nan, 1, 2, 3, nan, 4, 5, nan, 6])
self.zero_data = np.array([0, 0, 1, 2, 3, 0, 4, 5, 0, 6])
def test_constructor(self):
lst1 = SparseList(self.na_data[:5])
exp = SparseList()
exp.append(self.na_data[:5])
assert_sp_list_equal(lst1, exp)
def test_len(self):
arr = self.na_data
splist = SparseList()
splist.append(arr[:5])
self.assertEqual(len(splist), 5)
splist.append(arr[5])
self.assertEqual(len(splist), 6)
splist.append(arr[6:])
self.assertEqual(len(splist), 10)
def test_append_na(self):
arr = self.na_data
splist = SparseList()
splist.append(arr[:5])
splist.append(arr[5])
splist.append(arr[6:])
sparr = splist.to_array()
assert_sp_array_equal(sparr, SparseArray(arr))
def test_append_zero(self):
arr = self.zero_data
splist = SparseList(fill_value=0)
splist.append(arr[:5])
splist.append(arr[5])
splist.append(arr[6:])
sparr = splist.to_array()
assert_sp_array_equal(sparr, SparseArray(arr, fill_value=0))
def test_consolidate(self):
arr = self.na_data
exp_sparr = SparseArray(arr)
splist = SparseList()
splist.append(arr[:5])
splist.append(arr[5])
splist.append(arr[6:])
consol = splist.consolidate(inplace=False)
self.assertEqual(consol.nchunks, 1)
self.assertEqual(splist.nchunks, 3)
assert_sp_array_equal(consol.to_array(), exp_sparr)
splist.consolidate()
self.assertEqual(splist.nchunks, 1)
assert_sp_array_equal(splist.to_array(), exp_sparr)
def test_copy(self):
arr = self.na_data
exp_sparr = SparseArray(arr)
splist = SparseList()
splist.append(arr[:5])
splist.append(arr[5])
cp = splist.copy()
cp.append(arr[6:])
self.assertEqual(splist.nchunks, 2)
assert_sp_array_equal(cp.to_array(), exp_sparr)
def test_getitem(self):
arr = self.na_data
splist = SparseList()
splist.append(arr[:5])
splist.append(arr[5])
splist.append(arr[6:])
for i in range(len(arr)):
assert_almost_equal(splist[i], arr[i])
assert_almost_equal(splist[-i], arr[-i])
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
sinhrks/pandas-ml | pandas_ml/skaccessors/test/test_multiclass.py | 1 | 2288 | #!/usr/bin/env python
import sklearn.datasets as datasets
import sklearn.multiclass as multiclass
import sklearn.svm as svm
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestMultiClass(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.multiclass.OneVsRestClassifier, multiclass.OneVsRestClassifier)
self.assertIs(df.multiclass.OneVsOneClassifier, multiclass.OneVsOneClassifier)
self.assertIs(df.multiclass.OutputCodeClassifier, multiclass.OutputCodeClassifier)
def test_Classifications(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
models = ['OneVsOneClassifier', 'OneVsOneClassifier']
for model in models:
svm1 = df.svm.LinearSVC(random_state=self.random_state)
svm2 = svm.LinearSVC(random_state=self.random_state)
mod1 = getattr(df.multiclass, model)(svm1)
mod2 = getattr(multiclass, model)(svm2)
df.fit(mod1)
mod2.fit(iris.data, iris.target)
result = df.predict(mod1)
expected = mod2.predict(iris.data)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
def test_Classifications_Random(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
models = ['OutputCodeClassifier']
for model in models:
svm1 = df.svm.LinearSVC(random_state=self.random_state)
svm2 = svm.LinearSVC(random_state=self.random_state)
mod1 = getattr(df.multiclass, model)(svm1, random_state=self.random_state)
mod2 = getattr(multiclass, model)(svm2, random_state=self.random_state)
df.fit(mod1)
mod2.fit(iris.data, iris.target)
result = df.predict(mod1)
expected = mod2.predict(iris.data)
self.assertIsInstance(result, pdml.ModelSeries)
self.assert_numpy_array_almost_equal(result.values, expected)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/IPython/core/display.py | 6 | 34087 | # -*- coding: utf-8 -*-
"""Top-level display functions for displaying object in different formats."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
try:
from base64 import encodebytes as base64_encode
except ImportError:
from base64 import encodestring as base64_encode
import json
import mimetypes
import os
import struct
import sys
import warnings
from IPython.utils.py3compat import (string_types, cast_bytes_py2, cast_unicode,
unicode_type)
from IPython.testing.skipdoctest import skip_doctest
__all__ = ['display', 'display_pretty', 'display_html', 'display_markdown',
'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'JSON', 'Javascript',
'Image', 'clear_output', 'set_matplotlib_formats', 'set_matplotlib_close',
'publish_display_data']
#-----------------------------------------------------------------------------
# utility functions
#-----------------------------------------------------------------------------
def _safe_exists(path):
"""Check path, but don't let exceptions raise"""
try:
return os.path.exists(path)
except Exception:
return False
def _merge(d1, d2):
"""Like update, but merges sub-dicts instead of clobbering at the top level.
Updates d1 in-place
"""
if not isinstance(d2, dict) or not isinstance(d1, dict):
return d2
for key, value in d2.items():
d1[key] = _merge(d1.get(key), value)
return d1
def _display_mimetype(mimetype, objs, raw=False, metadata=None):
"""internal implementation of all display_foo methods
Parameters
----------
mimetype : str
The mimetype to be published (e.g. 'image/png')
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
if metadata:
metadata = {mimetype: metadata}
if raw:
# turn list of pngdata into list of { 'image/png': pngdata }
objs = [ {mimetype: obj} for obj in objs ]
display(*objs, raw=raw, metadata=metadata, include=[mimetype])
#-----------------------------------------------------------------------------
# Main functions
#-----------------------------------------------------------------------------
def publish_display_data(data, metadata=None, source=None):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. mime-type keys matching those in data can be used
to specify metadata about particular representations.
source : str, deprecated
Unused.
"""
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.instance().display_pub.publish(
data=data,
metadata=metadata,
)
def display(*objs, **kwargs):
"""Display a Python object in all frontends.
By default all representations will be computed and sent to the frontends.
Frontends can decide which representation is used and how.
Parameters
----------
objs : tuple of objects
The Python objects to display.
raw : bool, optional
Are the objects to be displayed already mimetype-keyed dicts of raw display data,
or Python objects that need to be formatted before display? [default: False]
include : list or tuple, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
A list of format type strings (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
metadata : dict, optional
A dictionary of metadata to associate with the output.
mime-type keys in this dictionary will be associated with the individual
representation formats, if they exist.
"""
raw = kwargs.get('raw', False)
include = kwargs.get('include')
exclude = kwargs.get('exclude')
metadata = kwargs.get('metadata')
from IPython.core.interactiveshell import InteractiveShell
if not raw:
format = InteractiveShell.instance().display_formatter.format
for obj in objs:
if raw:
publish_display_data(data=obj, metadata=metadata)
else:
format_dict, md_dict = format(obj, include=include, exclude=exclude)
if not format_dict:
# nothing to display (e.g. _ipython_display_ took over)
continue
if metadata:
# kwarg-specified metadata gets precedence
_merge(md_dict, metadata)
publish_display_data(data=format_dict, metadata=md_dict)
def display_pretty(*objs, **kwargs):
"""Display the pretty (default) representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/plain', objs, **kwargs)
def display_html(*objs, **kwargs):
"""Display the HTML representation of an object.
Note: If raw=False and the object does not have a HTML
representation, no HTML will be shown.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw HTML data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/html', objs, **kwargs)
def display_markdown(*objs, **kwargs):
"""Displays the Markdown representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw markdown data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/markdown', objs, **kwargs)
def display_svg(*objs, **kwargs):
"""Display the SVG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw svg data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/svg+xml', objs, **kwargs)
def display_png(*objs, **kwargs):
"""Display the PNG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw png data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/png', objs, **kwargs)
def display_jpeg(*objs, **kwargs):
"""Display the JPEG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw JPEG data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/jpeg', objs, **kwargs)
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/latex', objs, **kwargs)
def display_json(*objs, **kwargs):
"""Display the JSON representation of an object.
Note that not many frontends support displaying JSON.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw json data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/json', objs, **kwargs)
def display_javascript(*objs, **kwargs):
"""Display the Javascript representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/javascript', objs, **kwargs)
def display_pdf(*objs, **kwargs):
"""Display the PDF representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/pdf', objs, **kwargs)
#-----------------------------------------------------------------------------
# Smart classes
#-----------------------------------------------------------------------------
class DisplayObject(object):
"""An object that wraps data to be displayed."""
_read_flags = 'r'
_show_mem_addr = False
def __init__(self, data=None, url=None, filename=None):
"""Create a display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. The MIME type of the data should match the
subclasses used, so the Png subclass should be used for 'image/png'
data. If the data is a URL, the data will first be downloaded
and then displayed. If
Parameters
----------
data : unicode, str or bytes
The raw data or a URL or file to load the data from
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
"""
if data is not None and isinstance(data, string_types):
if data.startswith('http') and url is None:
url = data
filename = None
data = None
elif _safe_exists(data) and filename is None:
url = None
filename = data
data = None
self.data = data
self.url = url
self.filename = None if filename is None else unicode_type(filename)
self.reload()
self._check_data()
def __repr__(self):
if not self._show_mem_addr:
cls = self.__class__
r = "<%s.%s object>" % (cls.__module__, cls.__name__)
else:
r = super(DisplayObject, self).__repr__()
return r
def _check_data(self):
"""Override in subclasses if there's something to check."""
pass
def reload(self):
"""Reload the raw data from file or URL."""
if self.filename is not None:
with open(self.filename, self._read_flags) as f:
self.data = f.read()
elif self.url is not None:
try:
try:
from urllib.request import urlopen # Py3
except ImportError:
from urllib2 import urlopen
response = urlopen(self.url)
self.data = response.read()
# extract encoding from header, if there is one:
encoding = None
for sub in response.headers['content-type'].split(';'):
sub = sub.strip()
if sub.startswith('charset'):
encoding = sub.split('=')[-1].strip()
break
# decode data, if an encoding was specified
if encoding:
self.data = self.data.decode(encoding, 'replace')
except:
self.data = None
class TextDisplayObject(DisplayObject):
"""Validate that display data is text"""
def _check_data(self):
if self.data is not None and not isinstance(self.data, string_types):
raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
class Pretty(TextDisplayObject):
def _repr_pretty_(self):
return self.data
class HTML(TextDisplayObject):
def _repr_html_(self):
return self.data
def __html__(self):
"""
This method exists to inform other HTML-using modules (e.g. Markupsafe,
htmltag, etc) that this object is HTML and does not need things like
special characters (<>&) escaped.
"""
return self._repr_html_()
class Markdown(TextDisplayObject):
def _repr_markdown_(self):
return self.data
class Math(TextDisplayObject):
def _repr_latex_(self):
s = self.data.strip('$')
return "$$%s$$" % s
class Latex(TextDisplayObject):
def _repr_latex_(self):
return self.data
class SVG(DisplayObject):
_read_flags = 'rb'
# wrap data in a property, which extracts the <svg> tag, discarding
# document headers
_data = None
@property
def data(self):
return self._data
@data.setter
def data(self, svg):
if svg is None:
self._data = None
return
# parse into dom object
from xml.dom import minidom
svg = cast_bytes_py2(svg)
x = minidom.parseString(svg)
# get svg tag (should be 1)
found_svg = x.getElementsByTagName('svg')
if found_svg:
svg = found_svg[0].toxml()
else:
# fallback on the input, trust the user
# but this is probably an error.
pass
svg = cast_unicode(svg)
self._data = svg
def _repr_svg_(self):
return self.data
class JSON(DisplayObject):
"""JSON expects a JSON-able dict or list
not an already-serialized JSON string.
Scalar types (None, number, string) are not allowed, only dict or list containers.
"""
# wrap data in a property, which warns about passing already-serialized JSON
_data = None
def _check_data(self):
if self.data is not None and not isinstance(self.data, (dict, list)):
raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
@property
def data(self):
return self._data
@data.setter
def data(self, data):
if isinstance(data, string_types):
warnings.warn("JSON expects JSONable dict or list, not JSON strings")
data = json.loads(data)
self._data = data
def _repr_json_(self):
return self.data
css_t = """$("head").append($("<link/>").attr({
rel: "stylesheet",
type: "text/css",
href: "%s"
}));
"""
lib_t1 = """$.getScript("%s", function () {
"""
lib_t2 = """});
"""
class Javascript(TextDisplayObject):
def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
"""Create a Javascript display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. If the data is a URL, the data will first be
downloaded and then displayed.
In the Notebook, the containing element will be available as `element`,
and jQuery will be available. Content appended to `element` will be
visible in the output area.
Parameters
----------
data : unicode, str or bytes
The Javascript source code or a URL to download it from.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
lib : list or str
A sequence of Javascript library URLs to load asynchronously before
running the source code. The full URLs of the libraries should
be given. A single Javascript library URL can also be given as a
string.
css: : list or str
A sequence of css files to load before running the source code.
The full URLs of the css files should be given. A single css URL
can also be given as a string.
"""
if isinstance(lib, string_types):
lib = [lib]
elif lib is None:
lib = []
if isinstance(css, string_types):
css = [css]
elif css is None:
css = []
if not isinstance(lib, (list,tuple)):
raise TypeError('expected sequence, got: %r' % lib)
if not isinstance(css, (list,tuple)):
raise TypeError('expected sequence, got: %r' % css)
self.lib = lib
self.css = css
super(Javascript, self).__init__(data=data, url=url, filename=filename)
def _repr_javascript_(self):
r = ''
for c in self.css:
r += css_t % c
for l in self.lib:
r += lib_t1 % l
r += self.data
r += lib_t2*len(self.lib)
return r
# constants for identifying png/jpeg data
_PNG = b'\x89PNG\r\n\x1a\n'
_JPEG = b'\xff\xd8'
def _pngxy(data):
"""read the (width, height) from a PNG header"""
ihdr = data.index(b'IHDR')
# next 8 bytes are width/height
w4h4 = data[ihdr+4:ihdr+12]
return struct.unpack('>ii', w4h4)
def _jpegxy(data):
"""read the (width, height) from a JPEG header"""
# adapted from http://www.64lines.com/jpeg-width-height
idx = 4
while True:
block_size = struct.unpack('>H', data[idx:idx+2])[0]
idx = idx + block_size
if data[idx:idx+2] == b'\xFF\xC0':
# found Start of Frame
iSOF = idx
break
else:
# read another block
idx += 2
h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
return w, h
class Image(DisplayObject):
_read_flags = 'rb'
_FMT_JPEG = u'jpeg'
_FMT_PNG = u'png'
_ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
def __init__(self, data=None, url=None, filename=None, format=None,
embed=None, width=None, height=None, retina=False,
unconfined=False, metadata=None):
"""Create a PNG/JPEG image object given raw data.
When this object is returned by an input cell or passed to the
display function, it will result in the image being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Images from a file are always embedded.
format : unicode
The format of the image data (png/jpeg/jpg). If a filename or URL is given
for format will be inferred from the filename extension.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
width : int
Width in pixels to which to constrain the image in html
height : int
Height in pixels to which to constrain the image in html
retina : bool
Automatically set the width and height to half of the measured
width and height.
This only works for embedded images because it reads the width/height
from image data.
For non-embedded images, you can just set the desired display width
and height directly.
unconfined: bool
Set unconfined=True to disable max-width confinement of the image.
metadata: dict
Specify extra metadata to attach to the image.
Examples
--------
# embedded image data, works in qtconsole and notebook
# when passed positionally, the first arg can be any of raw image data,
# a URL, or a filename from which to load image data.
# The result is always embedding image data for inline images.
Image('http://www.google.fr/images/srpr/logo3w.png')
Image('/path/to/image.jpg')
Image(b'RAW_PNG_DATA...')
# Specifying Image(url=...) does not embed the image data,
# it only generates `<img>` tag with a link to the source.
# This will not work in the qtconsole or offline.
Image(url='http://www.google.fr/images/srpr/logo3w.png')
"""
if filename is not None:
ext = self._find_ext(filename)
elif url is not None:
ext = self._find_ext(url)
elif data is None:
raise ValueError("No image data found. Expecting filename, url, or data.")
elif isinstance(data, string_types) and (
data.startswith('http') or _safe_exists(data)
):
ext = self._find_ext(data)
else:
ext = None
if format is None:
if ext is not None:
if ext == u'jpg' or ext == u'jpeg':
format = self._FMT_JPEG
if ext == u'png':
format = self._FMT_PNG
else:
format = ext.lower()
elif isinstance(data, bytes):
# infer image type from image data header,
# only if format has not been specified.
if data[:2] == _JPEG:
format = self._FMT_JPEG
# failed to detect format, default png
if format is None:
format = 'png'
if format.lower() == 'jpg':
# jpg->jpeg
format = self._FMT_JPEG
self.format = unicode_type(format).lower()
self.embed = embed if embed is not None else (url is None)
if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
raise ValueError("Cannot embed the '%s' image format" % (self.format))
self.width = width
self.height = height
self.retina = retina
self.unconfined = unconfined
self.metadata = metadata
super(Image, self).__init__(data=data, url=url, filename=filename)
if retina:
self._retina_shape()
def _retina_shape(self):
"""load pixel-doubled width and height from image data"""
if not self.embed:
return
if self.format == 'png':
w, h = _pngxy(self.data)
elif self.format == 'jpeg':
w, h = _jpegxy(self.data)
else:
# retina only supports png
return
self.width = w // 2
self.height = h // 2
def reload(self):
"""Reload the raw data from file or URL."""
if self.embed:
super(Image,self).reload()
if self.retina:
self._retina_shape()
def _repr_html_(self):
if not self.embed:
width = height = klass = ''
if self.width:
width = ' width="%d"' % self.width
if self.height:
height = ' height="%d"' % self.height
if self.unconfined:
klass = ' class="unconfined"'
return u'<img src="{url}"{width}{height}{klass}/>'.format(
url=self.url,
width=width,
height=height,
klass=klass,
)
def _data_and_metadata(self):
"""shortcut for returning metadata with shape information, if defined"""
md = {}
if self.width:
md['width'] = self.width
if self.height:
md['height'] = self.height
if self.unconfined:
md['unconfined'] = self.unconfined
if self.metadata:
md.update(self.metadata)
if md:
return self.data, md
else:
return self.data
def _repr_png_(self):
if self.embed and self.format == u'png':
return self._data_and_metadata()
def _repr_jpeg_(self):
if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
return self._data_and_metadata()
def _find_ext(self, s):
return unicode_type(s.split('.')[-1].lower())
class Video(DisplayObject):
def __init__(self, data=None, url=None, filename=None, embed=False, mimetype=None):
"""Create a video object given raw data or an URL.
When this object is returned by an input cell or passed to the
display function, it will result in the video being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw video data or a URL or filename to load the data from.
Raw data will require passing `embed=True`.
url : unicode
A URL for the video. If you specify `url=`,
the image data will not be embedded.
filename : unicode
Path to a local file containing the video.
Will be interpreted as a local URL unless `embed=True`.
embed : bool
Should the video be embedded using a data URI (True) or be
loaded using a <video> tag (False).
Since videos are large, embedding them should be avoided, if possible.
You must confirm embedding as your intention by passing `embed=True`.
Local files can be displayed with URLs without embedding the content, via::
Video('./video.mp4')
mimetype: unicode
Specify the mimetype for embedded videos.
Default will be guessed from file extension, if available.
Examples
--------
Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
Video('path/to/video.mp4')
Video('path/to/video.mp4', embed=True)
Video(b'raw-videodata', embed=True)
"""
if url is None and isinstance(data, string_types) and data.startswith(('http:', 'https:')):
url = data
data = None
elif os.path.exists(data):
filename = data
data = None
if data and not embed:
msg = ''.join([
"To embed videos, you must pass embed=True ",
"(this may make your notebook files huge)\n",
"Consider passing Video(url='...')",
])
raise ValueError(msg)
self.mimetype = mimetype
self.embed = embed
super(Video, self).__init__(data=data, url=url, filename=filename)
def _repr_html_(self):
# External URLs and potentially local files are not embedded into the
# notebook output.
if not self.embed:
url = self.url if self.url is not None else self.filename
output = """<video src="{0}" controls>
Your browser does not support the <code>video</code> element.
</video>""".format(url)
return output
# Embedded videos are base64-encoded.
mimetype = self.mimetype
if self.filename is not None:
if not mimetype:
mimetype, _ = mimetypes.guess_type(self.filename)
with open(self.filename, 'rb') as f:
video = f.read()
else:
video = self.data
if isinstance(video, unicode_type):
# unicode input is already b64-encoded
b64_video = video
else:
b64_video = base64_encode(video).decode('ascii').rstrip()
output = """<video controls>
<source src="data:{0};base64,{1}" type="{0}">
Your browser does not support the video tag.
</video>""".format(mimetype, b64_video)
return output
def reload(self):
# TODO
pass
def _repr_png_(self):
# TODO
pass
def _repr_jpeg_(self):
# TODO
pass
def clear_output(wait=False):
"""Clear the output of the current cell receiving output.
Parameters
----------
wait : bool [default: false]
Wait to clear the output until new output is available to replace it."""
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
InteractiveShell.instance().display_pub.clear_output(wait)
else:
print('\033[2K\r', end='')
sys.stdout.flush()
print('\033[2K\r', end='')
sys.stderr.flush()
@skip_doctest
def set_matplotlib_formats(*formats, **kwargs):
"""Select figure formats for the inline backend. Optionally pass quality for JPEG.
For example, this enables PNG and JPEG output with a JPEG quality of 90%::
In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
To set this in your config files use the following::
c.InlineBackend.figure_formats = {'png', 'jpeg'}
c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
Parameters
----------
*formats : strs
One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs :
Keyword args will be relayed to ``figure.canvas.print_figure``.
"""
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.pylabtools import select_figure_formats
# build kwargs, starting with InlineBackend config
kw = {}
from ipykernel.pylab.config import InlineBackend
cfg = InlineBackend.instance()
kw.update(cfg.print_figure_kwargs)
kw.update(**kwargs)
shell = InteractiveShell.instance()
select_figure_formats(shell, formats, **kw)
@skip_doctest
def set_matplotlib_close(close=True):
"""Set whether the inline backend closes all figures automatically or not.
By default, the inline backend used in the IPython Notebook will close all
matplotlib figures automatically after each cell is run. This means that
plots in different cells won't interfere. Sometimes, you may want to make
a plot in one cell and then refine it in later cells. This can be accomplished
by::
In [1]: set_matplotlib_close(False)
To set this in your config files use the following::
c.InlineBackend.close_figures = False
Parameters
----------
close : bool
Should all matplotlib figures be automatically closed after each cell is
run?
"""
from ipykernel.pylab.config import InlineBackend
cfg = InlineBackend.instance()
cfg.close_figures = close
| gpl-3.0 |
saltastro/salt-data-quality-site | app/main/pages/instrument/hrs/environment/focus/plots.py | 1 | 6305 | import pandas as pd
from bokeh.embed import components
from bokeh.models import HoverTool
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.plotting import figure, ColumnDataSource
from app import db
from app.decorators import data_quality
# creates your plot
date_formatter = DatetimeTickFormatter(microseconds=['%f'],
milliseconds=['%S.%2Ns'],
seconds=[':%Ss'],
minsec=[':%Mm:%Ss'],
minutes=['%H:%M:%S'],
hourmin=['%H:%M:'],
hours=["%H:%M"],
days=["%d %b"],
months=["%d %b %Y"],
years=["%b %Y"])
@data_quality(name='focus_bmir', caption='')
def bmir_focus_plot(start_date, end_date):
"""Return a <div> element with a HRS focus plot.
The plot shows the HRS focus for the period between start_date (inclusive) and end_date (exclusive).
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the focus plot.
"""
title = "BMIR Focus"
y_axis_label = 'Focus'
# creates your query
table = 'FitsHeaderHrs'
column = 'FOC_BMIR'
logic = " "
sql = "select UTStart, {column} as FOCUS, FileName, CONVERT(UTStart,char) AS Time " \
" from {table} join FileData using (FileData_Id) " \
" where UTStart > '{start_date}' and UTStart <'{end_date}' {logic}"\
.format(column=column, start_date=start_date, end_date=end_date,
table=table, logic=logic)
df = pd.read_sql(sql, db.engine)
df2 = pd.read_sql(sql, db.engine)
source = ColumnDataSource(df)
source2 = ColumnDataSource(df2)
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Focus: </span>
<span style="font-size: 15px;"> @FOCUS</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Filename: </span>
<span style="font-size: 15px;"> @FileName</span>
</div>
</div>
"""
)
p = figure(title=title,
x_axis_label='Date',
y_axis_label=y_axis_label,
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=source, x='UTStart', y='FOCUS', color='blue', fill_alpha=0.2, size=12, legend='Blue Arm')
p.scatter(source=source2, x='UTStart', y='FOCUS', color='red', fill_alpha=0.2, size=10, legend='Red Arm')
p.legend.location = "top_right"
p.legend.click_policy = "hide"
p.legend.background_fill_alpha = 0.3
p.legend.inactive_fill_alpha = 0.8
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='focus_rmir', caption='')
def rmir_focus_plot(start_date, end_date):
"""Return a <div> element with a HRS focus plot.
The plot shows the HRS focus for the period between start_date (inclusive) and end_date (exclusive).
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the focus plot.
"""
title = "RMIR Focus"
y_axis_label = 'Focus'
# creates your query
table = 'FitsHeaderHrs'
column = 'FOC_RMIR'
logic = " and FileName like 'H%%' "
logic2 = " and FileName like 'R%%' "
sql = "select UTStart, {column} as FOCUS, FileName, CONVERT(UTStart,char) AS Time " \
" from {table} join FileData using (FileData_Id) " \
" where UTStart > '{start_date}' and UTStart <'{end_date}' {logic}"
sql1 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic)
sql2 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic2)
df = pd.read_sql(sql1, db.engine)
df2 = pd.read_sql(sql2, db.engine)
source = ColumnDataSource(df)
source2 = ColumnDataSource(df2)
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Focus: </span>
<span style="font-size: 15px;"> @FOCUS</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Filename: </span>
<span style="font-size: 15px;"> @FileName</span>
</div>
</div>
"""
)
p = figure(title=title,
x_axis_label='Date',
y_axis_label=y_axis_label,
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=source, x='UTStart', y='FOCUS', color='blue', fill_alpha=0.2, size=12, legend='Blue Arm')
p.scatter(source=source2, x='UTStart', y='FOCUS', color='red', fill_alpha=0.2, size=10, legend='Red Arm')
p.legend.location = "top_right"
p.legend.click_policy = "hide"
p.legend.background_fill_alpha = 0.3
p.legend.inactive_fill_alpha = 0.8
p.xaxis[0].formatter = date_formatter
return p
| mit |
imaculate/scikit-learn | sklearn/utils/setup.py | 24 | 2920 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
Achuth17/scikit-learn | sklearn/tree/tests/test_export.py | 76 | 9318 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
| bsd-3-clause |
thomasantony/CarND-Projects | Exercises/Term1/alexnet-feature-extraction/feature_extraction.py | 2 | 1499 | import time
import tensorflow as tf
import numpy as np
import pandas as pd
from scipy.misc import imread
from alexnet import AlexNet
sign_names = pd.read_csv('signnames.csv')
nb_classes = 43
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
resized = tf.image.resize_images(x, (227, 227))
# NOTE: By setting `feature_extract` to `True` we return
# the second to last layer.
fc7 = AlexNet(resized, feature_extract=True)
# TODO: Define a new fully connected layer followed by a softmax activation to classify
# the traffic signs. Assign the result of the softmax activation to `probs` below.
shape = (fc7.get_shape().as_list()[-1], nb_classes) # use this shape for the weight matrix
fc8W = tf.Variable(tf.truncated_normal(shape, stddev=1e-2))
fc8b = tf.Variable(tf.zeros(nb_classes))
logits = tf.nn.xw_plus_b(fc7, fc8W, fc8b)
probs = tf.nn.softmax(logits)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# Read Images
im1 = imread("construction.jpg").astype(np.float32)
im1 = im1 - np.mean(im1)
im2 = imread("stop.jpg").astype(np.float32)
im2 = im2 - np.mean(im2)
# Run Inference
t = time.time()
output = sess.run(probs, feed_dict={x: [im1, im2]})
# Print Output
for input_im_ind in range(output.shape[0]):
inds = np.argsort(output)[input_im_ind, :]
print("Image", input_im_ind)
for i in range(5):
print("%s: %.3f" % (sign_names.ix[inds[-1 - i]][1], output[input_im_ind, inds[-1 - i]]))
print()
print("Time: %.3f seconds" % (time.time() - t))
| mit |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/internals/test_internals.py | 1 | 46804 | from collections import OrderedDict
from datetime import date, datetime
from distutils.version import LooseVersion
import itertools
import operator
import re
import sys
import numpy as np
import pytest
from pandas._libs.internals import BlockPlacement
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
SparseArray,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import DatetimeArray, TimedeltaArray
from pandas.core.internals import BlockManager, SingleBlockManager, make_block
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal,
assert_frame_equal,
assert_series_equal,
randn,
)
# in 3.6.1 a c-api slicing function changed, see src/compat_helper.h
PY361 = LooseVersion(sys.version) >= LooseVersion("3.6.1")
@pytest.fixture
def mgr():
return create_mgr(
"a: f8; b: object; c: f8; d: object; e: f8;"
"f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;"
"k: M8[ns, US/Eastern]; l: M8[ns, CET];"
)
def assert_block_equal(left, right):
tm.assert_numpy_array_equal(left.values, right.values)
assert left.dtype == right.dtype
assert isinstance(left.mgr_locs, BlockPlacement)
assert isinstance(right.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(left.mgr_locs.as_array, right.mgr_locs.as_array)
def get_numeric_mat(shape):
arr = np.arange(shape[0])
return np.lib.stride_tricks.as_strided(
x=arr, shape=shape, strides=(arr.itemsize,) + (0,) * (len(shape) - 1)
).copy()
N = 10
def create_block(typestr, placement, item_shape=None, num_offset=0):
"""
Supported typestr:
* float, f8, f4, f2
* int, i8, i4, i2, i1
* uint, u8, u4, u2, u1
* complex, c16, c8
* bool
* object, string, O
* datetime, dt, M8[ns], M8[ns, tz]
* timedelta, td, m8[ns]
* sparse (SparseArray with fill_value=0.0)
* sparse_na (SparseArray with fill_value=np.nan)
* category, category2
"""
placement = BlockPlacement(placement)
num_items = len(placement)
if item_shape is None:
item_shape = (N,)
shape = (num_items,) + item_shape
mat = get_numeric_mat(shape)
if typestr in (
"float",
"f8",
"f4",
"f2",
"int",
"i8",
"i4",
"i2",
"i1",
"uint",
"u8",
"u4",
"u2",
"u1",
):
values = mat.astype(typestr) + num_offset
elif typestr in ("complex", "c16", "c8"):
values = 1.0j * (mat.astype(typestr) + num_offset)
elif typestr in ("object", "string", "O"):
values = np.reshape(
["A{i:d}".format(i=i) for i in mat.ravel() + num_offset], shape
)
elif typestr in ("b", "bool"):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ("datetime", "dt", "M8[ns]"):
values = (mat * 1e9).astype("M8[ns]")
elif typestr.startswith("M8[ns"):
# datetime with tz
m = re.search(r"M8\[ns,\s*(\w+\/?\w*)\]", typestr)
assert m is not None, "incompatible typestr -> {0}".format(typestr)
tz = m.groups()[0]
assert num_items == 1, "must have only 1 num items for a tz-aware"
values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)
elif typestr in ("timedelta", "td", "m8[ns]"):
values = (mat * 1).astype("m8[ns]")
elif typestr in ("category",):
values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4])
elif typestr in ("category2",):
values = Categorical(["a", "a", "a", "a", "b", "b", "c", "c", "c", "d"])
elif typestr in ("sparse", "sparse_na"):
# FIXME: doesn't support num_rows != 10
assert shape[-1] == 10
assert all(s == 1 for s in shape[:-1])
if typestr.endswith("_na"):
fill_value = np.nan
else:
fill_value = 0.0
values = SparseArray(
[fill_value, fill_value, 1, 2, 3, fill_value, 4, 5, fill_value, 6],
fill_value=fill_value,
)
arr = values.sp_values.view()
arr += num_offset - 1
else:
raise ValueError('Unsupported typestr: "%s"' % typestr)
return make_block(values, placement=placement, ndim=len(shape))
def create_single_mgr(typestr, num_rows=None):
if num_rows is None:
num_rows = N
return SingleBlockManager(
create_block(typestr, placement=slice(0, num_rows), item_shape=()),
np.arange(num_rows),
)
def create_mgr(descr, item_shape=None):
"""
Construct BlockManager from string description.
String description syntax looks similar to np.matrix initializer. It looks
like this::
a,b,c: f8; d,e,f: i8
Rules are rather simple:
* see list of supported datatypes in `create_block` method
* components are semicolon-separated
* each component is `NAME,NAME,NAME: DTYPE_ID`
* whitespace around colons & semicolons are removed
* components with same DTYPE_ID are combined into single block
* to force multiple blocks with same dtype, use '-SUFFIX'::
'a:f8-1; b:f8-2; c:f8-foobar'
"""
if item_shape is None:
item_shape = (N,)
offset = 0
mgr_items = []
block_placements = OrderedDict()
for d in descr.split(";"):
d = d.strip()
if not len(d):
continue
names, blockstr = d.partition(":")[::2]
blockstr = blockstr.strip()
names = names.strip().split(",")
mgr_items.extend(names)
placement = list(np.arange(len(names)) + offset)
try:
block_placements[blockstr].extend(placement)
except KeyError:
block_placements[blockstr] = placement
offset += len(names)
mgr_items = Index(mgr_items)
blocks = []
num_offset = 0
for blockstr, placement in block_placements.items():
typestr = blockstr.split("-")[0]
blocks.append(
create_block(
typestr, placement, item_shape=item_shape, num_offset=num_offset
)
)
num_offset += len(placement)
return BlockManager(
sorted(blocks, key=lambda b: b.mgr_locs[0]),
[mgr_items] + [np.arange(n) for n in item_shape],
)
class TestBlock:
def setup_method(self, method):
# self.fblock = get_float_ex() # a,c,e
# self.cblock = get_complex_ex() #
# self.oblock = get_obj_ex()
# self.bool_block = get_bool_ex()
# self.int_block = get_int_ex()
self.fblock = create_block("float", [0, 2, 4])
self.cblock = create_block("complex", [7])
self.oblock = create_block("object", [1, 3])
self.bool_block = create_block("bool", [5])
self.int_block = create_block("int", [6])
def test_constructor(self):
int32block = create_block("i4", [0])
assert int32block.dtype == np.int32
def test_pickle(self):
def _check(blk):
assert_block_equal(tm.round_trip_pickle(blk), blk)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_mgr_locs(self):
assert isinstance(self.fblock.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(
self.fblock.mgr_locs.as_array, np.array([0, 2, 4], dtype=np.int64)
)
def test_attrs(self):
assert self.fblock.shape == self.fblock.values.shape
assert self.fblock.dtype == self.fblock.values.dtype
assert len(self.fblock) == len(self.fblock.values)
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = Index(["e", "a", "b", "d", "f"])
ablock = make_block(avals, ref_cols.get_indexer(["e", "b"]))
bblock = make_block(bvals, ref_cols.get_indexer(["a", "d"]))
merged = ablock.merge(bblock)
tm.assert_numpy_array_equal(
merged.mgr_locs.as_array, np.array([0, 1, 2, 3], dtype=np.int64)
)
tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals))
tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals))
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
assert cop is not self.fblock
assert_block_equal(self.fblock, cop)
def test_reindex_index(self):
pass
def test_reindex_cast(self):
pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.copy()
newb.delete(0)
assert isinstance(newb.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(
newb.mgr_locs.as_array, np.array([2, 4], dtype=np.int64)
)
assert (newb.values[0] == 1).all()
newb = self.fblock.copy()
newb.delete(1)
assert isinstance(newb.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(
newb.mgr_locs.as_array, np.array([0, 4], dtype=np.int64)
)
assert (newb.values[1] == 2).all()
newb = self.fblock.copy()
newb.delete(2)
tm.assert_numpy_array_equal(
newb.mgr_locs.as_array, np.array([0, 2], dtype=np.int64)
)
assert (newb.values[1] == 1).all()
newb = self.fblock.copy()
with pytest.raises(Exception):
newb.delete(3)
def test_make_block_same_class(self):
# issue 19431
block = create_block("M8[ns, US/Eastern]", [3])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
block.make_block_same_class(block.values, dtype=block.values.dtype)
class TestDatetimeBlock:
def test_try_coerce_arg(self):
block = create_block("datetime", [0])
# coerce None
none_coerced = block._try_coerce_args(None)
assert pd.Timestamp(none_coerced) is pd.NaT
# coerce different types of date bojects
vals = (np.datetime64("2010-10-10"), datetime(2010, 10, 10), date(2010, 10, 10))
for val in vals:
coerced = block._try_coerce_args(val)
assert np.int64 == type(coerced)
assert pd.Timestamp("2010-10-10") == pd.Timestamp(coerced)
class TestBlockManager:
def test_constructor_corner(self):
pass
def test_attrs(self):
mgr = create_mgr("a,b,c: f8-1; d,e,f: f8-2")
assert mgr.nblocks == 2
assert len(mgr) == 6
def test_is_mixed_dtype(self):
assert not create_mgr("a,b:f8").is_mixed_type
assert not create_mgr("a:f8-1; b:f8-2").is_mixed_type
assert create_mgr("a,b:f8; c,d: f4").is_mixed_type
assert create_mgr("a,b:f8; c,d: object").is_mixed_type
def test_duplicate_ref_loc_failure(self):
tmp_mgr = create_mgr("a:bool; a: f8")
axes, blocks = tmp_mgr.axes, tmp_mgr.blocks
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([0])
# test trying to create block manager with overlapping ref locs
with pytest.raises(AssertionError):
BlockManager(blocks, axes)
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([1])
mgr = BlockManager(blocks, axes)
mgr.iget(1)
def test_contains(self, mgr):
assert "a" in mgr
assert "baz" not in mgr
def test_pickle(self, mgr):
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
# share ref_items
# assert mgr2.blocks[0].ref_items is mgr2.blocks[1].ref_items
# GH2431
assert hasattr(mgr2, "_is_consolidated")
assert hasattr(mgr2, "_known_consolidated")
# reset to False on load
assert not mgr2._is_consolidated
assert not mgr2._known_consolidated
def test_non_unique_pickle(self):
mgr = create_mgr("a,a,a:f8")
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
mgr = create_mgr("a: f8; a: i8")
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
def test_categorical_block_pickle(self):
mgr = create_mgr("a: category")
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
smgr = create_single_mgr("category")
smgr2 = tm.round_trip_pickle(smgr)
assert_series_equal(Series(smgr), Series(smgr2))
def test_get(self):
cols = Index(list("abc"))
values = np.random.rand(3, 3)
block = make_block(values=values.copy(), placement=np.arange(3))
mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)])
assert_almost_equal(mgr.get("a").internal_values(), values[0])
assert_almost_equal(mgr.get("b").internal_values(), values[1])
assert_almost_equal(mgr.get("c").internal_values(), values[2])
def test_set(self):
mgr = create_mgr("a,b,c: int", item_shape=(3,))
mgr.set("d", np.array(["foo"] * 3))
mgr.set("b", np.array(["bar"] * 3))
tm.assert_numpy_array_equal(mgr.get("a").internal_values(), np.array([0] * 3))
tm.assert_numpy_array_equal(
mgr.get("b").internal_values(), np.array(["bar"] * 3, dtype=np.object_)
)
tm.assert_numpy_array_equal(mgr.get("c").internal_values(), np.array([2] * 3))
tm.assert_numpy_array_equal(
mgr.get("d").internal_values(), np.array(["foo"] * 3, dtype=np.object_)
)
def test_set_change_dtype(self, mgr):
mgr.set("baz", np.zeros(N, dtype=bool))
mgr.set("baz", np.repeat("foo", N))
assert mgr.get("baz").dtype == np.object_
mgr2 = mgr.consolidate()
mgr2.set("baz", np.repeat("foo", N))
assert mgr2.get("baz").dtype == np.object_
mgr2.set("quux", randn(N).astype(int))
assert mgr2.get("quux").dtype == np.int_
mgr2.set("quux", randn(N))
assert mgr2.get("quux").dtype == np.float_
def test_set_change_dtype_slice(self): # GH8850
cols = MultiIndex.from_tuples([("1st", "a"), ("2nd", "b"), ("3rd", "c")])
df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols)
df["2nd"] = df["2nd"] * 2.0
blocks = df._to_dict_of_blocks()
assert sorted(blocks.keys()) == ["float64", "int64"]
assert_frame_equal(
blocks["float64"], DataFrame([[1.0, 4.0], [4.0, 10.0]], columns=cols[:2])
)
assert_frame_equal(blocks["int64"], DataFrame([[3], [6]], columns=cols[2:]))
def test_copy(self, mgr):
cp = mgr.copy(deep=False)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# view assertion
assert cp_blk.equals(blk)
if isinstance(blk.values, np.ndarray):
assert cp_blk.values.base is blk.values.base
else:
# DatetimeTZBlock has DatetimeIndex values
assert cp_blk.values._data.base is blk.values._data.base
cp = mgr.copy(deep=True)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# copy assertion we either have a None for a base or in case of
# some blocks it is an array (e.g. datetimetz), but was copied
assert cp_blk.equals(blk)
if not isinstance(cp_blk.values, np.ndarray):
assert cp_blk.values._data.base is not blk.values._data.base
else:
assert cp_blk.values.base is None and blk.values.base is None
def test_sparse(self):
mgr = create_mgr("a: sparse-1; b: sparse-2")
# what to test here?
assert mgr.as_array().dtype == np.float64
def test_sparse_mixed(self):
mgr = create_mgr("a: sparse-1; b: sparse-2; c: f8")
assert len(mgr.blocks) == 3
assert isinstance(mgr, BlockManager)
# what to test here?
def test_as_array_float(self):
mgr = create_mgr("c: f4; d: f2; e: f8")
assert mgr.as_array().dtype == np.float64
mgr = create_mgr("c: f4; d: f2")
assert mgr.as_array().dtype == np.float32
def test_as_array_int_bool(self):
mgr = create_mgr("a: bool-1; b: bool-2")
assert mgr.as_array().dtype == np.bool_
mgr = create_mgr("a: i8-1; b: i8-2; c: i4; d: i2; e: u1")
assert mgr.as_array().dtype == np.int64
mgr = create_mgr("c: i4; d: i2; e: u1")
assert mgr.as_array().dtype == np.int32
def test_as_array_datetime(self):
mgr = create_mgr("h: datetime-1; g: datetime-2")
assert mgr.as_array().dtype == "M8[ns]"
def test_as_array_datetime_tz(self):
mgr = create_mgr("h: M8[ns, US/Eastern]; g: M8[ns, CET]")
assert mgr.get("h").dtype == "datetime64[ns, US/Eastern]"
assert mgr.get("g").dtype == "datetime64[ns, CET]"
assert mgr.as_array().dtype == "object"
def test_astype(self):
# coerce all
mgr = create_mgr("c: f4; d: f2; e: f8")
for t in ["float16", "float32", "float64", "int32", "int64"]:
t = np.dtype(t)
tmgr = mgr.astype(t)
assert tmgr.get("c").dtype.type == t
assert tmgr.get("d").dtype.type == t
assert tmgr.get("e").dtype.type == t
# mixed
mgr = create_mgr("a,b: object; c: bool; d: datetime; e: f4; f: f2; g: f8")
for t in ["float16", "float32", "float64", "int32", "int64"]:
t = np.dtype(t)
tmgr = mgr.astype(t, errors="ignore")
assert tmgr.get("c").dtype.type == t
assert tmgr.get("e").dtype.type == t
assert tmgr.get("f").dtype.type == t
assert tmgr.get("g").dtype.type == t
assert tmgr.get("a").dtype.type == np.object_
assert tmgr.get("b").dtype.type == np.object_
if t != np.int64:
assert tmgr.get("d").dtype.type == np.datetime64
else:
assert tmgr.get("d").dtype.type == t
def test_convert(self):
def _compare(old_mgr, new_mgr):
""" compare the blocks, numeric compare ==, object don't """
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
assert len(old_blocks) == len(new_blocks)
# compare non-numeric
for b in old_blocks:
found = False
for nb in new_blocks:
if (b.values == nb.values).all():
found = True
break
assert found
for b in new_blocks:
found = False
for ob in old_blocks:
if (b.values == ob.values).all():
found = True
break
assert found
# noops
mgr = create_mgr("f: i8; g: f8")
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
mgr = create_mgr("a, b: object; f: i8; g: f8")
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
# convert
mgr = create_mgr("a,b,foo: object; f: i8; g: f8")
mgr.set("a", np.array(["1"] * N, dtype=np.object_))
mgr.set("b", np.array(["2."] * N, dtype=np.object_))
mgr.set("foo", np.array(["foo."] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
assert new_mgr.get("a").dtype == np.int64
assert new_mgr.get("b").dtype == np.float64
assert new_mgr.get("foo").dtype == np.object_
assert new_mgr.get("f").dtype == np.int64
assert new_mgr.get("g").dtype == np.float64
mgr = create_mgr(
"a,b,foo: object; f: i4; bool: bool; dt: datetime; i: i8; g: f8; h: f2"
)
mgr.set("a", np.array(["1"] * N, dtype=np.object_))
mgr.set("b", np.array(["2."] * N, dtype=np.object_))
mgr.set("foo", np.array(["foo."] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
assert new_mgr.get("a").dtype == np.int64
assert new_mgr.get("b").dtype == np.float64
assert new_mgr.get("foo").dtype == np.object_
assert new_mgr.get("f").dtype == np.int32
assert new_mgr.get("bool").dtype == np.bool_
assert new_mgr.get("dt").dtype.type, np.datetime64
assert new_mgr.get("i").dtype == np.int64
assert new_mgr.get("g").dtype == np.float64
assert new_mgr.get("h").dtype == np.float16
def test_interleave(self):
# self
for dtype in ["f8", "i8", "object", "bool", "complex", "M8[ns]", "m8[ns]"]:
mgr = create_mgr("a: {0}".format(dtype))
assert mgr.as_array().dtype == dtype
mgr = create_mgr("a: {0}; b: {0}".format(dtype))
assert mgr.as_array().dtype == dtype
# will be converted according the actual dtype of the underlying
mgr = create_mgr("a: category")
assert mgr.as_array().dtype == "i8"
mgr = create_mgr("a: category; b: category")
assert mgr.as_array().dtype == "i8"
mgr = create_mgr("a: category; b: category2")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: category2")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: category2; b: category2")
assert mgr.as_array().dtype == "object"
# combinations
mgr = create_mgr("a: f8")
assert mgr.as_array().dtype == "f8"
mgr = create_mgr("a: f8; b: i8")
assert mgr.as_array().dtype == "f8"
mgr = create_mgr("a: f4; b: i8")
assert mgr.as_array().dtype == "f8"
mgr = create_mgr("a: f4; b: i8; d: object")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: bool; b: i8")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: complex")
assert mgr.as_array().dtype == "complex"
mgr = create_mgr("a: f8; b: category")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: M8[ns]; b: category")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: M8[ns]; b: bool")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: M8[ns]; b: i8")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: m8[ns]; b: bool")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: m8[ns]; b: i8")
assert mgr.as_array().dtype == "object"
mgr = create_mgr("a: M8[ns]; b: m8[ns]")
assert mgr.as_array().dtype == "object"
def test_interleave_non_unique_cols(self):
df = DataFrame(
[[pd.Timestamp("20130101"), 3.5], [pd.Timestamp("20130102"), 4.5]],
columns=["x", "x"],
index=[1, 2],
)
df_unique = df.copy()
df_unique.columns = ["x", "y"]
assert df_unique.values.shape == df.values.shape
tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])
tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])
def test_consolidate(self):
pass
def test_consolidate_ordering_issues(self, mgr):
mgr.set("f", randn(N))
mgr.set("d", randn(N))
mgr.set("b", randn(N))
mgr.set("g", randn(N))
mgr.set("h", randn(N))
# we have datetime/tz blocks in mgr
cons = mgr.consolidate()
assert cons.nblocks == 4
cons = mgr.consolidate().get_numeric_data()
assert cons.nblocks == 1
assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(
cons.blocks[0].mgr_locs.as_array, np.arange(len(cons.items), dtype=np.int64)
)
def test_reindex_index(self):
# TODO: should this be pytest.skip?
pass
def test_reindex_items(self):
# mgr is not consolidated, f8 & f8-2 blocks
mgr = create_mgr("a: f8; b: i8; c: f8; d: i8; e: f8; f: bool; g: f8-2")
reindexed = mgr.reindex_axis(["g", "c", "a", "d"], axis=0)
assert reindexed.nblocks == 2
tm.assert_index_equal(reindexed.items, pd.Index(["g", "c", "a", "d"]))
assert_almost_equal(
mgr.get("g").internal_values(), reindexed.get("g").internal_values()
)
assert_almost_equal(
mgr.get("c").internal_values(), reindexed.get("c").internal_values()
)
assert_almost_equal(
mgr.get("a").internal_values(), reindexed.get("a").internal_values()
)
assert_almost_equal(
mgr.get("d").internal_values(), reindexed.get("d").internal_values()
)
def test_get_numeric_data(self):
mgr = create_mgr(
"int: int; float: float; complex: complex;"
"str: object; bool: bool; obj: object; dt: datetime",
item_shape=(3,),
)
mgr.set("obj", np.array([1, 2, 3], dtype=np.object_))
numeric = mgr.get_numeric_data()
tm.assert_index_equal(
numeric.items, pd.Index(["int", "float", "complex", "bool"])
)
assert_almost_equal(
mgr.get("float").internal_values(), numeric.get("float").internal_values()
)
# Check sharing
numeric.set("float", np.array([100.0, 200.0, 300.0]))
assert_almost_equal(
mgr.get("float").internal_values(), np.array([100.0, 200.0, 300.0])
)
numeric2 = mgr.get_numeric_data(copy=True)
tm.assert_index_equal(
numeric.items, pd.Index(["int", "float", "complex", "bool"])
)
numeric2.set("float", np.array([1000.0, 2000.0, 3000.0]))
assert_almost_equal(
mgr.get("float").internal_values(), np.array([100.0, 200.0, 300.0])
)
def test_get_bool_data(self):
mgr = create_mgr(
"int: int; float: float; complex: complex;"
"str: object; bool: bool; obj: object; dt: datetime",
item_shape=(3,),
)
mgr.set("obj", np.array([True, False, True], dtype=np.object_))
bools = mgr.get_bool_data()
tm.assert_index_equal(bools.items, pd.Index(["bool"]))
assert_almost_equal(
mgr.get("bool").internal_values(), bools.get("bool").internal_values()
)
bools.set("bool", np.array([True, False, True]))
tm.assert_numpy_array_equal(
mgr.get("bool").internal_values(), np.array([True, False, True])
)
# Check sharing
bools2 = mgr.get_bool_data(copy=True)
bools2.set("bool", np.array([False, True, False]))
tm.assert_numpy_array_equal(
mgr.get("bool").internal_values(), np.array([True, False, True])
)
def test_unicode_repr_doesnt_raise(self):
repr(create_mgr("b,\u05d0: object"))
def test_missing_unicode_key(self):
df = DataFrame({"a": [1]})
try:
df.loc[:, "\u05d0"] # should not raise UnicodeEncodeError
except KeyError:
pass # this is the expected exception
def test_equals(self):
# unique items
bm1 = create_mgr("a,b,c: i8-1; d,e,f: i8-2")
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
assert bm1.equals(bm2)
bm1 = create_mgr("a,a,a: i8-1; b,b,b: i8-2")
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
assert bm1.equals(bm2)
def test_equals_block_order_different_dtypes(self):
# GH 9330
mgr_strings = [
"a:i8;b:f8", # basic case
"a:i8;b:f8;c:c8;d:b", # many types
"a:i8;e:dt;f:td;g:string", # more types
"a:i8;b:category;c:category2;d:category2", # categories
"c:sparse;d:sparse_na;b:f8", # sparse
]
for mgr_string in mgr_strings:
bm = create_mgr(mgr_string)
block_perms = itertools.permutations(bm.blocks)
for bm_perm in block_perms:
bm_this = BlockManager(bm_perm, bm.axes)
assert bm.equals(bm_this)
assert bm_this.equals(bm)
def test_single_mgr_ctor(self):
mgr = create_single_mgr("f8", num_rows=5)
assert mgr.as_array().tolist() == [0.0, 1.0, 2.0, 3.0, 4.0]
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
bm1 = create_mgr("a,b,c: i8-1; d,e,f: i8-2")
for value in invalid_values:
with pytest.raises(ValueError):
bm1.replace_list([1], [2], inplace=value)
class TestIndexing:
# Nosetests-style data-driven tests.
#
# This test applies different indexing routines to block managers and
# compares the outcome to the result of same operations on np.ndarray.
#
# NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests
# and are disabled.
MANAGERS = [
create_single_mgr("f8", N),
create_single_mgr("i8", N),
# 2-dim
create_mgr("a,b,c,d,e,f: f8", item_shape=(N,)),
create_mgr("a,b,c,d,e,f: i8", item_shape=(N,)),
create_mgr("a,b: f8; c,d: i8; e,f: string", item_shape=(N,)),
create_mgr("a,b: f8; c,d: i8; e,f: f8", item_shape=(N,)),
# 3-dim
create_mgr("a,b,c,d,e,f: f8", item_shape=(N, N)),
create_mgr("a,b,c,d,e,f: i8", item_shape=(N, N)),
create_mgr("a,b: f8; c,d: i8; e,f: string", item_shape=(N, N)),
create_mgr("a,b: f8; c,d: i8; e,f: f8", item_shape=(N, N)),
]
# MANAGERS = [MANAGERS[6]]
def test_get_slice(self):
def assert_slice_ok(mgr, axis, slobj):
mat = mgr.as_array()
# we maybe using an ndarray to test slicing and
# might not be the full length of the axis
if isinstance(slobj, np.ndarray):
ax = mgr.axes[axis]
if len(ax) and len(slobj) and len(slobj) != len(ax):
slobj = np.concatenate(
[slobj, np.zeros(len(ax) - len(slobj), dtype=bool)]
)
sliced = mgr.get_slice(slobj, axis=axis)
mat_slobj = (slice(None),) * axis + (slobj,)
tm.assert_numpy_array_equal(
mat[mat_slobj], sliced.as_array(), check_dtype=False
)
tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# slice
assert_slice_ok(mgr, ax, slice(None))
assert_slice_ok(mgr, ax, slice(3))
assert_slice_ok(mgr, ax, slice(100))
assert_slice_ok(mgr, ax, slice(1, 4))
assert_slice_ok(mgr, ax, slice(3, 0, -2))
# boolean mask
assert_slice_ok(mgr, ax, np.array([], dtype=np.bool_))
assert_slice_ok(mgr, ax, np.ones(mgr.shape[ax], dtype=np.bool_))
assert_slice_ok(mgr, ax, np.zeros(mgr.shape[ax], dtype=np.bool_))
if mgr.shape[ax] >= 3:
assert_slice_ok(mgr, ax, np.arange(mgr.shape[ax]) % 3 == 0)
assert_slice_ok(
mgr, ax, np.array([True, True, False], dtype=np.bool_)
)
# fancy indexer
assert_slice_ok(mgr, ax, [])
assert_slice_ok(mgr, ax, list(range(mgr.shape[ax])))
if mgr.shape[ax] >= 3:
assert_slice_ok(mgr, ax, [0, 1, 2])
assert_slice_ok(mgr, ax, [-1, -2, -3])
def test_take(self):
def assert_take_ok(mgr, axis, indexer):
mat = mgr.as_array()
taken = mgr.take(indexer, axis)
tm.assert_numpy_array_equal(
np.take(mat, indexer, axis), taken.as_array(), check_dtype=False
)
tm.assert_index_equal(mgr.axes[axis].take(indexer), taken.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# take/fancy indexer
assert_take_ok(mgr, ax, indexer=[])
assert_take_ok(mgr, ax, indexer=[0, 0, 0])
assert_take_ok(mgr, ax, indexer=list(range(mgr.shape[ax])))
if mgr.shape[ax] >= 3:
assert_take_ok(mgr, ax, indexer=[0, 1, 2])
assert_take_ok(mgr, ax, indexer=[-1, -2, -3])
def test_reindex_axis(self):
def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):
mat = mgr.as_array()
indexer = mgr.axes[axis].get_indexer_for(new_labels)
reindexed = mgr.reindex_axis(new_labels, axis, fill_value=fill_value)
tm.assert_numpy_array_equal(
algos.take_nd(mat, indexer, axis, fill_value=fill_value),
reindexed.as_array(),
check_dtype=False,
)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.0):
assert_reindex_axis_is_ok(mgr, ax, pd.Index([]), fill_value)
assert_reindex_axis_is_ok(mgr, ax, mgr.axes[ax], fill_value)
assert_reindex_axis_is_ok(
mgr, ax, mgr.axes[ax][[0, 0, 0]], fill_value
)
assert_reindex_axis_is_ok(
mgr, ax, pd.Index(["foo", "bar", "baz"]), fill_value
)
assert_reindex_axis_is_ok(
mgr, ax, pd.Index(["foo", mgr.axes[ax][0], "baz"]), fill_value
)
if mgr.shape[ax] >= 3:
assert_reindex_axis_is_ok(
mgr, ax, mgr.axes[ax][:-3], fill_value
)
assert_reindex_axis_is_ok(
mgr, ax, mgr.axes[ax][-3::-1], fill_value
)
assert_reindex_axis_is_ok(
mgr, ax, mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value
)
def test_reindex_indexer(self):
def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, fill_value):
mat = mgr.as_array()
reindexed_mat = algos.take_nd(mat, indexer, axis, fill_value=fill_value)
reindexed = mgr.reindex_indexer(
new_labels, indexer, axis, fill_value=fill_value
)
tm.assert_numpy_array_equal(
reindexed_mat, reindexed.as_array(), check_dtype=False
)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.0):
assert_reindex_indexer_is_ok(mgr, ax, pd.Index([]), [], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value
)
assert_reindex_indexer_is_ok(
mgr,
ax,
pd.Index(["foo"] * mgr.shape[ax]),
np.arange(mgr.shape[ax]),
fill_value,
)
assert_reindex_indexer_is_ok(
mgr,
ax,
mgr.axes[ax][::-1],
np.arange(mgr.shape[ax]),
fill_value,
)
assert_reindex_indexer_is_ok(
mgr,
ax,
mgr.axes[ax],
np.arange(mgr.shape[ax])[::-1],
fill_value,
)
assert_reindex_indexer_is_ok(
mgr, ax, pd.Index(["foo", "bar", "baz"]), [0, 0, 0], fill_value
)
assert_reindex_indexer_is_ok(
mgr,
ax,
pd.Index(["foo", "bar", "baz"]),
[-1, 0, -1],
fill_value,
)
assert_reindex_indexer_is_ok(
mgr,
ax,
pd.Index(["foo", mgr.axes[ax][0], "baz"]),
[-1, -1, -1],
fill_value,
)
if mgr.shape[ax] >= 3:
assert_reindex_indexer_is_ok(
mgr,
ax,
pd.Index(["foo", "bar", "baz"]),
[0, 1, 2],
fill_value,
)
# test_get_slice(slice_like, axis)
# take(indexer, axis)
# reindex_axis(new_labels, axis)
# reindex_indexer(new_labels, indexer, axis)
class TestBlockPlacement:
def test_slice_len(self):
assert len(BlockPlacement(slice(0, 4))) == 4
assert len(BlockPlacement(slice(0, 4, 2))) == 2
assert len(BlockPlacement(slice(0, 3, 2))) == 2
assert len(BlockPlacement(slice(0, 1, 2))) == 1
assert len(BlockPlacement(slice(1, 0, -1))) == 1
def test_zero_step_raises(self):
with pytest.raises(ValueError):
BlockPlacement(slice(1, 1, 0))
with pytest.raises(ValueError):
BlockPlacement(slice(1, 2, 0))
def test_unbounded_slice_raises(self):
def assert_unbounded_slice_error(slc):
with pytest.raises(ValueError, match="unbounded slice"):
BlockPlacement(slc)
assert_unbounded_slice_error(slice(None, None))
assert_unbounded_slice_error(slice(10, None))
assert_unbounded_slice_error(slice(None, None, -1))
assert_unbounded_slice_error(slice(None, 10, -1))
# These are "unbounded" because negative index will change depending on
# container shape.
assert_unbounded_slice_error(slice(-1, None))
assert_unbounded_slice_error(slice(None, -1))
assert_unbounded_slice_error(slice(-1, -1))
assert_unbounded_slice_error(slice(-1, None, -1))
assert_unbounded_slice_error(slice(None, -1, -1))
assert_unbounded_slice_error(slice(-1, -1, -1))
def test_not_slice_like_slices(self):
def assert_not_slice_like(slc):
assert not BlockPlacement(slc).is_slice_like
assert_not_slice_like(slice(0, 0))
assert_not_slice_like(slice(100, 0))
assert_not_slice_like(slice(100, 100, -1))
assert_not_slice_like(slice(0, 100, -1))
assert not BlockPlacement(slice(0, 0)).is_slice_like
assert not BlockPlacement(slice(100, 100)).is_slice_like
def test_array_to_slice_conversion(self):
def assert_as_slice_equals(arr, slc):
assert BlockPlacement(arr).as_slice == slc
assert_as_slice_equals([0], slice(0, 1, 1))
assert_as_slice_equals([100], slice(100, 101, 1))
assert_as_slice_equals([0, 1, 2], slice(0, 3, 1))
assert_as_slice_equals([0, 5, 10], slice(0, 15, 5))
assert_as_slice_equals([0, 100], slice(0, 200, 100))
assert_as_slice_equals([2, 1], slice(2, 0, -1))
if not PY361:
assert_as_slice_equals([2, 1, 0], slice(2, None, -1))
assert_as_slice_equals([100, 0], slice(100, None, -100))
def test_not_slice_like_arrays(self):
def assert_not_slice_like(arr):
assert not BlockPlacement(arr).is_slice_like
assert_not_slice_like([])
assert_not_slice_like([-1])
assert_not_slice_like([-1, -2, -3])
assert_not_slice_like([-10])
assert_not_slice_like([-1])
assert_not_slice_like([-1, 0, 1, 2])
assert_not_slice_like([-2, 0, 2, 4])
assert_not_slice_like([1, 0, -1])
assert_not_slice_like([1, 1, 1])
def test_slice_iter(self):
assert list(BlockPlacement(slice(0, 3))) == [0, 1, 2]
assert list(BlockPlacement(slice(0, 0))) == []
assert list(BlockPlacement(slice(3, 0))) == []
if not PY361:
assert list(BlockPlacement(slice(3, 0, -1))) == [3, 2, 1]
assert list(BlockPlacement(slice(3, None, -1))) == [3, 2, 1, 0]
def test_slice_to_array_conversion(self):
def assert_as_array_equals(slc, asarray):
tm.assert_numpy_array_equal(
BlockPlacement(slc).as_array, np.asarray(asarray, dtype=np.int64)
)
assert_as_array_equals(slice(0, 3), [0, 1, 2])
assert_as_array_equals(slice(0, 0), [])
assert_as_array_equals(slice(3, 0), [])
assert_as_array_equals(slice(3, 0, -1), [3, 2, 1])
if not PY361:
assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0])
assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1])
def test_blockplacement_add(self):
bpl = BlockPlacement(slice(0, 5))
assert bpl.add(1).as_slice == slice(1, 6, 1)
assert bpl.add(np.arange(5)).as_slice == slice(0, 10, 2)
assert list(bpl.add(np.arange(5, 0, -1))) == [5, 5, 5, 5, 5]
def test_blockplacement_add_int(self):
def assert_add_equals(val, inc, result):
assert list(BlockPlacement(val).add(inc)) == result
assert_add_equals(slice(0, 0), 0, [])
assert_add_equals(slice(1, 4), 0, [1, 2, 3])
assert_add_equals(slice(3, 0, -1), 0, [3, 2, 1])
assert_add_equals([1, 2, 4], 0, [1, 2, 4])
assert_add_equals(slice(0, 0), 10, [])
assert_add_equals(slice(1, 4), 10, [11, 12, 13])
assert_add_equals(slice(3, 0, -1), 10, [13, 12, 11])
assert_add_equals([1, 2, 4], 10, [11, 12, 14])
assert_add_equals(slice(0, 0), -1, [])
assert_add_equals(slice(1, 4), -1, [0, 1, 2])
assert_add_equals([1, 2, 4], -1, [0, 1, 3])
with pytest.raises(ValueError):
BlockPlacement(slice(1, 4)).add(-10)
with pytest.raises(ValueError):
BlockPlacement([1, 2, 4]).add(-10)
if not PY361:
assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])
assert_add_equals(slice(2, None, -1), 0, [2, 1, 0])
assert_add_equals(slice(2, None, -1), 10, [12, 11, 10])
with pytest.raises(ValueError):
BlockPlacement(slice(2, None, -1)).add(-1)
class DummyElement:
def __init__(self, value, dtype):
self.value = value
self.dtype = np.dtype(dtype)
def __array__(self):
return np.array(self.value, dtype=self.dtype)
def __str__(self):
return "DummyElement({}, {})".format(self.value, self.dtype)
def __repr__(self):
return str(self)
def astype(self, dtype, copy=False):
self.dtype = dtype
return self
def view(self, dtype):
return type(self)(self.value.view(dtype), dtype)
def any(self, axis=None):
return bool(self.value)
class TestCanHoldElement:
@pytest.mark.parametrize(
"value, dtype",
[
(1, "i8"),
(1.0, "f8"),
(2 ** 63, "f8"),
(1j, "complex128"),
(2 ** 63, "complex128"),
(True, "bool"),
(np.timedelta64(20, "ns"), "<m8[ns]"),
(np.datetime64(20, "ns"), "<M8[ns]"),
],
)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.mod,
operator.pow,
],
ids=lambda x: x.__name__,
)
def test_binop_other(self, op, value, dtype):
skip = {
(operator.add, "bool"),
(operator.sub, "bool"),
(operator.mul, "bool"),
(operator.truediv, "bool"),
(operator.mod, "i8"),
(operator.mod, "complex128"),
(operator.pow, "bool"),
}
if (op, dtype) in skip:
pytest.skip("Invalid combination {},{}".format(op, dtype))
e = DummyElement(value, dtype)
s = pd.DataFrame({"A": [e.value, e.value]}, dtype=e.dtype)
invalid = {
(operator.pow, "<M8[ns]"),
(operator.mod, "<M8[ns]"),
(operator.truediv, "<M8[ns]"),
(operator.mul, "<M8[ns]"),
(operator.add, "<M8[ns]"),
(operator.pow, "<m8[ns]"),
(operator.mul, "<m8[ns]"),
}
if (op, dtype) in invalid:
with pytest.raises(TypeError):
op(s, e.value)
else:
# FIXME: Since dispatching to Series, this test no longer
# asserts anything meaningful
result = op(s, e.value).dtypes
expected = op(s, value).dtypes
assert_series_equal(result, expected)
@pytest.mark.parametrize(
"typestr, holder",
[
("category", Categorical),
("M8[ns]", DatetimeArray),
("M8[ns, US/Central]", DatetimeArray),
("m8[ns]", TimedeltaArray),
("sparse", SparseArray),
],
)
def test_holder(typestr, holder):
blk = create_block(typestr, [1])
assert blk._holder is holder
def test_deprecated_fastpath():
# GH#19265
values = np.random.rand(3, 3)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
make_block(values, placement=np.arange(3), fastpath=True)
def test_validate_ndim():
values = np.array([1.0, 2.0])
placement = slice(2)
msg = r"Wrong number of dimensions. values.ndim != ndim \[1 != 2\]"
with pytest.raises(ValueError, match=msg):
make_block(values, placement, ndim=2)
def test_block_shape():
idx = pd.Index([0, 1, 2, 3, 4])
a = pd.Series([1, 2, 3]).reindex(idx)
b = pd.Series(pd.Categorical([1, 2, 3])).reindex(idx)
assert a._data.blocks[0].mgr_locs.indexer == b._data.blocks[0].mgr_locs.indexer
def test_make_block_no_pandas_array():
# https://github.com/pandas-dev/pandas/pull/24866
arr = pd.array([1, 2])
# PandasArray, no dtype
result = make_block(arr, slice(len(arr)))
assert result.is_integer is True
assert result.is_extension is False
# PandasArray, PandasDtype
result = make_block(arr, slice(len(arr)), dtype=arr.dtype)
assert result.is_integer is True
assert result.is_extension is False
# ndarray, PandasDtype
result = make_block(arr.to_numpy(), slice(len(arr)), dtype=arr.dtype)
assert result.is_integer is True
assert result.is_extension is False
| apache-2.0 |
manashmndl/scikit-learn | examples/model_selection/plot_roc.py | 146 | 3697 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# Plot ROC curve
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]))
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
ToniRV/Learning-to-navigate-without-a-map | rlvision/tests/vin_po_export_value_reward_28.py | 1 | 3042 | """Export Value and Reward Map.
Get a model and export all the path and score
for grid 28x28
Author: Yuhuang Hu
Email : [email protected]
"""
from __future__ import print_function
import os
import cPickle as pickle
import numpy as np
import keras.backend as K
import matplotlib.pyplot as plt
import rlvision
from rlvision import utils
from rlvision.vin import vin_model, get_layer_output
from rlvision.utils import process_map_data
from rlvision.grid import GridSampler
def get_action(a):
if a == 0:
return -1, -1
if a == 1:
return 0, -1
if a == 2:
return 1, -1
if a == 3:
return -1, 0
if a == 4:
return 1, 0
if a == 5:
return -1, 1
if a == 6:
return 0, 1
if a == 7:
return 1, 1
return None
def find_goal(m):
return np.argwhere(m.max() == m)[0][::-1]
def predict(im, pos, model, k):
im_ary = np.array([im]).transpose((0, 2, 3, 1)) \
if K.image_data_format() == 'channels_last' else np.array([im])
res = model.predict([im_ary,
np.array([pos])])
action = np.argmax(res)
reward = get_layer_output(model, 'reward', im_ary)
value = get_layer_output(model, 'value{}'.format(k), im_ary)
reward = np.reshape(reward, im.shape[1:])
value = np.reshape(value, im.shape[1:])
return action, reward, value
file_name = os.path.join(rlvision.RLVISION_DATA,
"chain_data", "grid28_with_idx.pkl")
model_file = os.path.join(
rlvision.RLVISION_MODEL, "grid28-po",
"vin-model-po-28-77-0.89.h5")
im_data, state_data, label_data, sample_idx = process_map_data(
file_name, return_full=True)
model = vin_model(l_s=im_data.shape[2], k=20)
model.load_weights(model_file)
sampler = GridSampler(im_data, state_data, label_data, sample_idx, (28, 28))
gt_collector = []
po_collector = []
diff_collector = []
grid, state, label, goal = sampler.get_grid(77)
gt_collector.append(state)
step_map = np.zeros((2, 28, 28))
step_map[0] = np.ones((28, 28))
step_map[1] = grid[1]
pos = [state[0, 0], state[0, 1]]
path = [(pos[0], pos[1])]
start = (pos[0], pos[1])
for step in xrange(32):
masked_img = utils.mask_grid((pos[1], pos[0]),
grid[0], 3, one_is_free=False)
step_map[0] = utils.accumulate_map(step_map[0], masked_img,
one_is_free=False)
action, reward, value = predict(step_map, pos, model, 20)
dx, dy = get_action(action)
pos[0] = pos[0] + dx
pos[1] = pos[1] + dy
path.append((pos[0], pos[1]))
plt.figure()
plt.imshow(value, cmap="jet")
plt.colorbar()
plt.scatter(x=[start[0]], y=[start[1]], marker="*", c="orange", s=50)
plt.scatter(x=[pos[0]], y=[pos[1]], marker=".", c="purple", s=50)
plt.scatter(x=[goal[0]], y=[goal[1]], marker="*", c="black", s=50)
plt.savefig("grid28-77-%d.png" % (step), dpi=300)
if pos[0] == goal[0] and pos[1] == goal[1]:
print ("[MESSAGE] Found the path!")
break
| mit |
soulmachine/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 20 | 4491 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that caracterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/numpy/lib/function_base.py | 7 | 169604 | from __future__ import division, absolute_import, print_function
import collections
import operator
import re
import sys
import warnings
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d, transpose
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar, absolute, AxisError
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import (
_insert, add_docstring, digitize, bincount, normalize_axis_index,
interp as compiled_interp, interp_complex as compiled_interp_complex
)
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
from numpy.compat.py3k import basestring
if sys.version_info[0] < 3:
# Force range to be a generator, for np.delete's usage.
range = xrange
import __builtin__ as builtins
else:
import builtins
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip',
'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def rot90(m, k=1, axes=(0,1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
.. versionadded:: 1.12.0
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes: (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
Returns
-------
y : ndarray
A rotated view of `m`.
See Also
--------
flip : Reverse the order of elements in an array along the given axis.
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Notes
-----
rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
"""
axes = tuple(axes)
if len(axes) != 2:
raise ValueError("len(axes) must be 2.")
m = asanyarray(m)
if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim:
raise ValueError("Axes must be different.")
if (axes[0] >= m.ndim or axes[0] < -m.ndim
or axes[1] >= m.ndim or axes[1] < -m.ndim):
raise ValueError("Axes={} out of range for array of ndim={}."
.format(axes, m.ndim))
k %= 4
if k == 0:
return m[:]
if k == 2:
return flip(flip(m, axes[0]), axes[1])
axes_list = arange(0, m.ndim)
(axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]],
axes_list[axes[0]])
if k == 1:
return transpose(flip(m,axes[1]), axes_list)
else:
# k == 3
return flip(transpose(m, axes_list), axes[1])
def flip(m, axis):
"""
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
.. versionadded:: 1.12.0
Parameters
----------
m : array_like
Input array.
axis : integer
Axis in array, which entries are reversed.
Returns
-------
out : array_like
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
See Also
--------
flipud : Flip an array vertically (axis=0).
fliplr : Flip an array horizontally (axis=1).
Notes
-----
flip(m, 0) is equivalent to flipud(m).
flip(m, 1) is equivalent to fliplr(m).
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> A = np.random.randn(3,4,5)
>>> np.all(flip(A,2) == A[:,:,::-1,...])
True
"""
if not hasattr(m, 'ndim'):
m = asarray(m)
indexer = [slice(None)] * m.ndim
try:
indexer[axis] = slice(None, None, -1)
except IndexError:
raise ValueError("axis=%i is invalid for the %i-dimensional input array"
% (axis, m.ndim))
return m[tuple(indexer)]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : bool
Return ``True`` if the object has an iterator method or is a
sequence and ``False`` otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
True
>>> np.iterable(2)
False
"""
try:
iter(y)
except TypeError:
return False
return True
def _hist_bin_sqrt(x):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / np.sqrt(x.size)
def _hist_bin_sturges(x):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_doane(x):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return x.ptp() / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x`. The Sturges estimator
is quite good for small (<1000) datasets and is the default in the R
language. This method gives good off the shelf behaviour.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
# There is no need to check for zero here. If ptp is, so is IQR and
# vice versa. Either both are zero or neither one is.
return min(_hist_bin_fd(x), _hist_bin_sturges(x))
# Private dict initialized at module load time
_hist_bin_selectors = {'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string from the list below, `histogram` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy
behavior. It will be removed in NumPy 2.0.0. Use the ``density``
keyword instead. If ``False``, the result will contain the
number of samples in each bin. If ``True``, the result is the
value of the probability *density* function at the bin,
normalized such that the *integral* over the range is 1. Note
that this latter behavior is known to be buggy with unequal bin
widths; use ``density`` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
.. versionadded:: 1.11.0
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))`.
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'Rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'Sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'Doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'Sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist * np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> plt.hist(a, bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
# Do not modify the original value of range so we can check for `None`
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
mn, mx = 0.0, 1.0
else:
mn, mx = a.min() + 0.0, a.max() + 0.0
else:
mn, mx = [mi + 0.0 for mi in range]
if mn > mx:
raise ValueError(
'max must be larger than min in range parameter.')
if not np.all(np.isfinite([mn, mx])):
raise ValueError(
'range parameter must be finite.')
if mn == mx:
mn -= 0.5
mx += 0.5
if isinstance(bins, basestring):
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bins not in _hist_bin_selectors:
raise ValueError("{0} not a valid estimator for bins".format(bins))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
# Make a reference to `a`
b = a
# Update the reference if the range needs truncation
if range is not None:
keep = (a >= mn)
keep &= (a <= mx)
if not np.logical_and.reduce(keep):
b = a[keep]
if b.size == 0:
bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bins](b)
if width:
bins = int(np.ceil((mx - mn) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
bins = 1
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, np.complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
# Initialize empty histogram
n = np.zeros(bins, ntype)
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
# Compute the bin edges for potential correction.
bin_edges = linspace(mn, mx, bins + 1, endpoint=True)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= mn)
keep &= (tmp_a <= mx)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a_data = tmp_a.astype(float)
tmp_a = tmp_a_data - mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
# need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a_data < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = ((tmp_a_data >= bin_edges[indices + 1])
& (indices != bins - 1))
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real,
minlength=bins)
n.imag += np.bincount(indices, weights=tmp_w.imag,
minlength=bins)
else:
n += np.bincount(indices, weights=tmp_w,
minlength=bins).astype(ntype)
# Rename the bin edges for return.
bins = bin_edges
else:
bins = asarray(bins)
if np.any(bins[:-1] > bins[1:]):
raise ValueError(
'bins must increase monotonically.')
# Initialize empty histogram
n = np.zeros(bins.shape, ntype)
if weights is None:
for i in arange(0, len(a), BLOCK):
sa = sort(a[i:i+BLOCK])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for NumPy 2.0.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
if not np.all(np.isfinite(range)):
raise ValueError(
'range parameter must be finite.')
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which to average `a`. The default,
axis=None, will average over all of the elements of the input array.
If axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, averaging is performed on all of the axes
specified in the tuple instead of a single axis or all the axes as
before.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
a = np.asanyarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
wgt = np.asanyarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
else:
result_dtype = np.result_type(a.dtype, wgt.dtype)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=result_dtype)
if np.any(scl == 0.0):
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl
if returned:
if scl.shape != avg.shape:
scl = np.broadcast_to(scl, avg.shape).copy()
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print('ValueError')
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray or scalar
The input domain.
condlist : list of bool arrays or bool scalars
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., alpha=1)``, then each function is called as
``f(x, alpha=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
Apply the same function to a scalar value.
>>> y = -2
>>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x])
array(2)
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
if not isscalar(condlist) and x.size == 1 and x.ndim == 0:
condlist = [[c] for c in condlist]
else:
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
# Only able to stack vertically if the array is 1d or less
if x.ndim <= 1:
condlist = np.vstack([condlist, ~totlist])
else:
condlist = [asarray(c, dtype=bool) for c in condlist]
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning, stacklevel=2)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it separately optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:`ndarray.copy` are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to:
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior points and either first or second order accurate one-sides
(forward or backwards) differences at the boundaries.
The returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar or array, optional
Spacing between f values. Default unitary spacing for all dimensions.
Spacing can be specified using:
1. single scalar to specify a sample distance for all dimensions.
2. N scalars to specify a constant sample distance for each dimension.
i.e. `dx`, `dy`, `dz`, ...
3. N arrays to specify the coordinates of the values along each
dimension of F. The length of the array must match the size of
the corresponding dimension
4. Any combination of N scalars/arrays with the meaning of 2. and 3.
If `axis` is given, the number of varargs must equal the number of axes.
Default: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N-th order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
The default (axis = None) is to calculate the gradient for all the axes
of the input array. axis may be negative, in which case it counts from
the last to the first axis.
.. versionadded:: 1.11.0
Returns
-------
gradient : ndarray or list of ndarray
A set of ndarrays (or a single ndarray if there is only one dimension)
corresponding to the derivatives of f with respect to each dimension.
Each derivative has the same shape as f.
Examples
--------
>>> f = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(f)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(f, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
Spacing can be also specified with an array that represents the coordinates
of the values F along the dimensions.
For instance a uniform spacing:
>>> x = np.arange(f.size)
>>> np.gradient(f, x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
Or a non uniform one:
>>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=np.float)
>>> np.gradient(f, x)
array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
In this example the spacing is also specified:
uniform for axis=0 and non uniform for axis=1
>>> dx = 2.
>>> y = [1., 1.5, 3.5]
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), dx, y)
[array([[ 1. , 1. , -0.5],
[ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ],
[ 2. , 1.7, 0.5]])]
It is possible to specify how boundaries are treated using `edge_order`
>>> x = np.array([0, 1, 2, 3, 4])
>>> f = x**2
>>> np.gradient(f, edge_order=1)
array([ 1., 2., 4., 6., 7.])
>>> np.gradient(f, edge_order=2)
array([-0., 2., 4., 6., 8.])
The `axis` keyword can be used to specify a subset of axes of which the
gradient is calculated
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
Notes
-----
Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous
derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the
spacing the finite difference coefficients are computed by minimising
the consistency error :math:`\\eta_{i}`:
.. math::
\\eta_{i} = f_{i}^{\\left(1\\right)} -
\\left[ \\alpha f\\left(x_{i}\\right) +
\\beta f\\left(x_{i} + h_{d}\\right) +
\\gamma f\\left(x_{i}-h_{s}\\right)
\\right]
By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})`
with their Taylor series expansion, this translates into solving
the following the linear system:
.. math::
\\left\\{
\\begin{array}{r}
\\alpha+\\beta+\\gamma=0 \\\\
-\\beta h_{d}+\\gamma h_{s}=1 \\\\
\\beta h_{d}^{2}+\\gamma h_{s}^{2}=0
\\end{array}
\\right.
The resulting approximation of :math:`f_{i}^{(1)}` is the following:
.. math::
\\hat f_{i}^{(1)} =
\\frac{
h_{s}^{2}f\\left(x_{i} + h_{d}\\right)
+ \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right)
- h_{d}^{2}f\\left(x_{i}-h_{s}\\right)}
{ h_{s}h_{d}\\left(h_{d} + h_{s}\\right)}
+ \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2}
+ h_{s}h_{d}^{2}}{h_{d}
+ h_{s}}\\right)
It is worth noting that if :math:`h_{s}=h_{d}`
(i.e., data are evenly spaced)
we find the standard second order approximation:
.. math::
\\hat f_{i}^{(1)}=
\\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h}
+ \\mathcal{O}\\left(h^{2}\\right)
With a similar procedure the forward/backward approximations used for
boundaries can be derived.
References
----------
.. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics
(Texts in Applied Mathematics). New York: Springer.
.. [2] Durran D. R. (1999) Numerical Methods for Wave Equations
in Geophysical Fluid Dynamics. New York: Springer.
.. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on
Arbitrarily Spaced Grids,
Mathematics of Computation 51, no. 184 : 699-706.
`PDF <http://www.ams.org/journals/mcom/1988-51-184/
S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_.
"""
f = np.asanyarray(f)
N = f.ndim # number of dimensions
axes = kwargs.pop('axis', None)
if axes is None:
axes = tuple(range(N))
else:
axes = _nx.normalize_axis_tuple(axes, N)
len_axes = len(axes)
n = len(varargs)
if n == 0:
# no spacing argument - use 1 in all axes
dx = [1.0] * len_axes
elif n == 1 and np.ndim(varargs[0]) == 0:
# single scalar for all axes
dx = varargs * len_axes
elif n == len_axes:
# scalar or 1d array for each axis
dx = list(varargs)
for i, distances in enumerate(dx):
if np.ndim(distances) == 0:
continue
elif np.ndim(distances) != 1:
raise ValueError("distances must be either scalars or 1d")
if len(distances) != f.shape[axes[i]]:
raise ValueError("when 1d, distances must match "
"the length of the corresponding dimension")
diffx = np.diff(distances)
# if distances are constant reduce to the scalar case
# since it brings a consistent speedup
if (diffx == diffx[0]).all():
diffx = diffx[0]
dx[i] = diffx
else:
raise TypeError("invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for i, axis in enumerate(axes):
if y.shape[axis] < edge_order + 1:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least (edge_order + 1) elements are required.")
# result allocation
out = np.empty_like(y, dtype=otype)
uniform_spacing = np.ndim(dx[i]) == 0
# Numerical differentiation: 2nd order interior
slice1[axis] = slice(1, -1)
slice2[axis] = slice(None, -2)
slice3[axis] = slice(1, -1)
slice4[axis] = slice(2, None)
if uniform_spacing:
out[slice1] = (f[slice4] - f[slice2]) / (2. * dx[i])
else:
dx1 = dx[i][0:-1]
dx2 = dx[i][1:]
a = -(dx2)/(dx1 * (dx1 + dx2))
b = (dx2 - dx1) / (dx1 * dx2)
c = dx1 / (dx2 * (dx1 + dx2))
# fix the shape for broadcasting
shape = np.ones(N, dtype=int)
shape[axis] = -1
a.shape = b.shape = c.shape = shape
# 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4]
# Numerical differentiation: 1st order edges
if edge_order == 1:
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
dx_0 = dx[i] if uniform_spacing else dx[i][0]
# 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0])
out[slice1] = (y[slice2] - y[slice3]) / dx_0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
dx_n = dx[i] if uniform_spacing else dx[i][-1]
# 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
out[slice1] = (y[slice2] - y[slice3]) / dx_n
# Numerical differentiation: 2nd order edges
else:
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
if uniform_spacing:
a = -1.5 / dx[i]
b = 2. / dx[i]
c = -0.5 / dx[i]
else:
dx1 = dx[i][0]
dx2 = dx[i][1]
a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2))
b = (dx1 + dx2) / (dx1 * dx2)
c = - dx1 / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2]
out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]
slice1[axis] = -1
slice2[axis] = -3
slice3[axis] = -2
slice4[axis] = -1
if uniform_spacing:
a = 0.5 / dx[i]
b = -2. / dx[i]
c = 1.5 / dx[i]
else:
dx1 = dx[i][-2]
dx2 = dx[i][-1]
a = (dx2) / (dx1 * (dx1 + dx2))
b = - (dx2 + dx1) / (dx1 * dx2)
c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len_axes == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th discrete difference along given axis.
The first difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The n-th differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`. The
type of the output is the same as that of the input.
See Also
--------
gradient, ediff1d, cumsum
Notes
-----
For boolean arrays, the preservation of type means that the result
will contain `False` when consecutive elements are the same and
`True` when they differ.
For unsigned integer arrays, the results will also be unsigned. This should
not be surprising, as the result is consistent with calculating the
difference directly:
>>> u8_arr = np.array([1, 0], dtype=np.uint8)
>>> np.diff(u8_arr)
array([255], dtype=uint8)
>>> u8_arr[1,...] - u8_arr[0,...]
array(255, np.uint8)
If this is not desirable, then the array should be cast to a larger integer
type first:
>>> i16_arr = u8_arr.astype(np.int16)
>>> np.diff(i16_arr)
array([-1], dtype=int16)
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = a.ndim
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of float or complex
The y-coordinates of the data points, same length as `xp`.
left : optional float or complex corresponding to fp
Value to return for `x < xp[0]`, default is `fp[0]`.
right : optional float or complex corresponding to fp
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or complex (corresponding to fp) or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
Complex interpolation
>>> x = [1.5, 4.0]
>>> xp = [2,3,5]
>>> fp = [1.0j, 0, 2+3j]
>>> np.interp(x, xp, fp)
array([ 0.+1.j , 1.+1.5j])
"""
fp = np.asarray(fp)
if np.iscomplexobj(fp):
interp_func = compiled_interp_complex
input_dtype = np.complex128
else:
interp_func = compiled_interp
input_dtype = np.float64
if period is None:
if isinstance(x, (float, int, number)):
return interp_func([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return interp_func([x], xp, fp, left, right).item()
else:
return interp_func(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=input_dtype)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return interp_func(x, xp, fp, left, right)
else:
return interp_func(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = p.ndim
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : ndarray
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N, it will be repeated, and if elements of `a` are to be masked,
this sequence must be non-empty.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
if not isinstance(arr, np.ndarray):
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(arr).__name__))
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
# See http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
_DIMENSION_NAME = r'\w+'
_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME)
_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST)
_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT)
_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST)
def _parse_gufunc_signature(signature):
"""
Parse string signatures for a generalized universal function.
Arguments
---------
signature : string
Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)``
for ``np.matmul``.
Returns
-------
Tuple of input and output core dimensions parsed from the signature, each
of the form List[Tuple[str, ...]].
"""
if not re.match(_SIGNATURE, signature):
raise ValueError(
'not a valid gufunc signature: {}'.format(signature))
return tuple([tuple(re.findall(_DIMENSION_NAME, arg))
for arg in re.findall(_ARGUMENT, arg_list)]
for arg_list in signature.split('->'))
def _update_dim_sizes(dim_sizes, arg, core_dims):
"""
Incrementally check and update core dimension sizes for a single argument.
Arguments
---------
dim_sizes : Dict[str, int]
Sizes of existing core dimensions. Will be updated in-place.
arg : ndarray
Argument to examine.
core_dims : Tuple[str, ...]
Core dimensions for this argument.
"""
if not core_dims:
return
num_core_dims = len(core_dims)
if arg.ndim < num_core_dims:
raise ValueError(
'%d-dimensional argument does not have enough '
'dimensions for all core dimensions %r'
% (arg.ndim, core_dims))
core_shape = arg.shape[-num_core_dims:]
for dim, size in zip(core_dims, core_shape):
if dim in dim_sizes:
if size != dim_sizes[dim]:
raise ValueError(
'inconsistent size for core dimension %r: %r vs %r'
% (dim, size, dim_sizes[dim]))
else:
dim_sizes[dim] = size
def _parse_input_dimensions(args, input_core_dims):
"""
Parse broadcast and core dimensions for vectorize with a signature.
Arguments
---------
args : Tuple[ndarray, ...]
Tuple of input arguments to examine.
input_core_dims : List[Tuple[str, ...]]
List of core dimensions corresponding to each input.
Returns
-------
broadcast_shape : Tuple[int, ...]
Common shape to broadcast all non-core dimensions to.
dim_sizes : Dict[str, int]
Common sizes for named core dimensions.
"""
broadcast_args = []
dim_sizes = {}
for arg, core_dims in zip(args, input_core_dims):
_update_dim_sizes(dim_sizes, arg, core_dims)
ndim = arg.ndim - len(core_dims)
dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim])
broadcast_args.append(dummy_array)
broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args)
return broadcast_shape, dim_sizes
def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims):
"""Helper for calculating broadcast shapes with core dimensions."""
return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims)
for core_dims in list_of_core_dims]
def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes):
"""Helper for creating output arrays in vectorize."""
shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims)
arrays = tuple(np.empty(shape, dtype=dtype)
for shape, dtype in zip(shapes, dtypes))
return arrays
class vectorize(object):
"""
vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False,
signature=None)
Generalized function class.
Define a vectorized function which takes a nested sequence of objects or
numpy arrays as inputs and returns an single or tuple of numpy array as
output. The vectorized function evaluates `pyfunc` over successive tuples
of the input arrays like the python map function, except it uses the
broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
signature : string, optional
Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for
vectorized matrix-vector multiplication. If provided, ``pyfunc`` will
be called with (and expected to return) arrays with shapes given by the
size of corresponding core dimensions. By default, ``pyfunc`` is
assumed to take scalars as input and output.
.. versionadded:: 1.12.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified:
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified:
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
The `signature` argument allows for vectorizing functions that act on
non-scalar arrays of fixed length. For example, you can use it for a
vectorized calculation of Pearson correlation coefficient and its p-value:
>>> import scipy.stats
>>> pearsonr = np.vectorize(scipy.stats.pearsonr,
... signature='(n),(n)->(),()')
>>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]])
(array([ 1., -1.]), array([ 0., 0.]))
Or for a vectorized convolution:
>>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)')
>>> convolve(np.eye(4), [1, 2, 1])
array([[ 1., 2., 1., 0., 0., 0.],
[ 0., 1., 2., 1., 0., 0.],
[ 0., 0., 1., 2., 1., 0.],
[ 0., 0., 0., 1., 2., 1.]])
See Also
--------
frompyfunc : Takes an arbitrary Python function and returns a ufunc
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
References
----------
.. [1] NumPy Reference, section `Generalized Universal Function API
<http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html>`_.
"""
def __init__(self, pyfunc, otypes=None, doc=None, excluded=None,
cache=False, signature=None):
self.pyfunc = pyfunc
self.cache = cache
self.signature = signature
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
for char in otypes:
if char not in typecodes['All']:
raise ValueError("Invalid otype specified: %s" % (char,))
elif iterable(otypes):
otypes = ''.join([_nx.dtype(x).char for x in otypes])
elif otypes is not None:
raise ValueError("Invalid otype specification")
self.otypes = otypes
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
if signature is not None:
self._in_and_out_core_dims = _parse_gufunc_signature(signature)
else:
self._in_and_out_core_dims = None
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes is not None:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
args = [asarray(arg) for arg in args]
if builtins.any(arg.size == 0 for arg in args):
raise ValueError('cannot call `vectorize` on size 0 inputs '
'unless `otypes` is set')
inputs = [arg.flat[0] for arg in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if self.signature is not None:
res = self._vectorize_call_with_signature(func, args)
elif not args:
res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(a, copy=False, subok=True, dtype=object)
for a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
res = array(outputs, copy=False, subok=True, dtype=otypes[0])
else:
res = tuple([array(x, copy=False, subok=True, dtype=t)
for x, t in zip(outputs, otypes)])
return res
def _vectorize_call_with_signature(self, func, args):
"""Vectorized call over positional arguments with a signature."""
input_core_dims, output_core_dims = self._in_and_out_core_dims
if len(args) != len(input_core_dims):
raise TypeError('wrong number of positional arguments: '
'expected %r, got %r'
% (len(input_core_dims), len(args)))
args = tuple(asanyarray(arg) for arg in args)
broadcast_shape, dim_sizes = _parse_input_dimensions(
args, input_core_dims)
input_shapes = _calculate_shapes(broadcast_shape, dim_sizes,
input_core_dims)
args = [np.broadcast_to(arg, shape, subok=True)
for arg, shape in zip(args, input_shapes)]
outputs = None
otypes = self.otypes
nout = len(output_core_dims)
for index in np.ndindex(*broadcast_shape):
results = func(*(arg[index] for arg in args))
n_results = len(results) if isinstance(results, tuple) else 1
if nout != n_results:
raise ValueError(
'wrong number of outputs from pyfunc: expected %r, got %r'
% (nout, n_results))
if nout == 1:
results = (results,)
if outputs is None:
for result, core_dims in zip(results, output_core_dims):
_update_dim_sizes(dim_sizes, result, core_dims)
if otypes is None:
otypes = [asarray(result).dtype for result in results]
outputs = _create_arrays(broadcast_shape, dim_sizes,
output_core_dims, otypes)
for output, result in zip(outputs, results):
output[index] = result
if outputs is None:
# did not call the function even once
if otypes is None:
raise ValueError('cannot call `vectorize` on size 0 inputs '
'unless `otypes` is set')
if builtins.any(dim not in dim_sizes
for dims in output_core_dims
for dim in dims):
raise ValueError('cannot call `vectorize` with a signature '
'including new output dimensions on size 0 '
'inputs')
outputs = _create_arrays(broadcast_shape, dim_sizes,
output_core_dims, otypes)
return outputs[0] if nout == 1 else outputs
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N - 1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True,
then normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print(np.cov(X))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x, y))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x))
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions")
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
if y.ndim > 2:
raise ValueError("y has more than 2 dimensions")
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if not rowvar and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if not rowvar and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = X.shape[1] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice",
RuntimeWarning, stacklevel=2)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
c = dot(X, X_T.conj())
c *= 1. / np.float64(fact)
return c.squeeze()
def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
Due to floating point rounding the resulting array may not be Hermitian,
the diagonal elements may not be 1, and the elements may not satisfy the
inequality abs(a) <= 1. The real and imaginary parts are clipped to the
interval [-1, 1] in an attempt to improve on that situation but is not
much help in the complex case.
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
DeprecationWarning, stacklevel=2)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError:
# scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
stddev = sqrt(d.real)
c /= stddev[:, None]
c /= stddev[None, :]
# Clip real and imaginary parts to [-1, 1]. This does not guarantee
# abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without
# excessive work.
np.clip(c.real, -1, 1, out=c.real)
if np.iscomplexobj(c):
np.clip(c.imag, -1, 1, out=c.imag)
return c
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function capable of receiving a single axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
axis = _nx.normalize_axis_tuple(axis, nd)
for ax in axis:
keepdim[ax] = 1
if len(axis) == 1:
kwargs['axis'] = axis[0]
else:
keep = set(range(nd)) - set(axis)
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i
e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the
two middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact) and sz > 0:
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
return np.lib.utils._median_nancheck(part, rout, axis, out)
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute, which must be between 0 and 100 inclusive.
axis : {int, sequence of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array. A sequence of axes is supported since
version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a`
calculations. The input array will be modified by the call to
`percentile`. This will save memory when you do not need to
preserve the contents of the input array. In this case you
should not make any assumptions about the contents of the input
`a` after this function completes -- treat it as undefined.
Default is False. If `a` is not already an array, this parameter
will have no effect as `a` will be converted to an array
internally regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, median, nanpercentile
Notes
-----
Given a vector ``V`` of length ``N``, the ``q``-th percentile of
``V`` is the value ``q/100`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> np.percentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=out)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a == b)
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = 0.5 * (floor(indices) + ceil(indices))
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in percentile",
RuntimeWarning, stacklevel=3)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
The sample points corresponding to the `y` values. If `x` is None,
the sample points are assumed to be evenly spaced `dx` apart. The
default is None.
dx : scalar, optional
The spacing between sample points when `x` is None. The default is 1.
axis : int, optional
The axis along which to integrate.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = y.ndim
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""
Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = np.meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = np.meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:])
for i, x in enumerate(xi)]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + s0[2:]
output[1].shape = (-1, 1) + s0[2:]
if not sparse:
# Return the full N-D matrix (not only the 1-D vector)
output = np.broadcast_arrays(*output, subok=True)
if copy_:
output = [x.copy() for x in output]
return output
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = -1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning, stacklevel=2)
if wrap:
return wrap(arr)
else:
return arr.copy(order=arrorder)
axis = normalize_axis_index(axis, ndim)
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy(order=arrorder))
else:
return arr.copy(order=arrorder)
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arrorder)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn("in the future insert will treat boolean arrays and "
"array-likes as boolean index instead of casting it "
"to integer", FutureWarning, stacklevel=2)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning, stacklevel=2)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning, stacklevel=2)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning, stacklevel=2)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
elif ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning, stacklevel=2)
arr = arr.copy(order=arrorder)
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
else:
axis = normalize_axis_index(axis, ndim)
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning, stacklevel=2)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning, stacklevel=2)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arrorder)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| apache-2.0 |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/tseries/tests/test_timezones.py | 1 | 34781 | # pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, tzinfo, date
import sys
import os
import nose
import numpy as np
import pytz
from pandas import (Index, Series, TimeSeries, DataFrame, isnull,
date_range, Timestamp)
from pandas import DatetimeIndex, Int64Index, to_datetime, NaT
from pandas.core.daterange import DateRange
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
from pandas.tseries.index import bdate_range, date_range
import pandas.tseries.tools as tools
from pytz import NonExistentTimeError
from pandas.util.testing import assert_series_equal, assert_almost_equal, assertRaisesRegexp
import pandas.util.testing as tm
import pandas.lib as lib
import pandas.core.datetools as dt
from numpy.random import rand
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
from pandas.compat import range, lrange, zip, cPickle as pickle
from pandas.core.datetools import BDay
import pandas.core.common as com
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
try:
import pytz
except ImportError:
pass
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, '-07:00')
fixed_off_no_name = FixedOffset(-330, None)
class TestTimeZoneSupport(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
_skip_if_no_pytz()
def test_utc_to_local_no_modify(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert('US/Eastern')
# Values are unmodified
self.assert_(np.array_equal(rng.asi8, rng_eastern.asi8))
self.assert_(rng_eastern.tz == pytz.timezone('US/Eastern'))
def test_localize_utc_conversion(self):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range('3/10/2012', '3/11/2012', freq='30T')
converted = rng.tz_localize('US/Eastern')
expected_naive = rng + offsets.Hour(5)
self.assert_(np.array_equal(converted.asi8, expected_naive.asi8))
# DST ambiguity, this should fail
rng = date_range('3/11/2012', '3/12/2012', freq='30T')
# Is this really how it should fail??
self.assertRaises(NonExistentTimeError, rng.tz_localize, 'US/Eastern')
def test_timestamp_tz_localize(self):
stamp = Timestamp('3/11/2012 04:00')
result = stamp.tz_localize('US/Eastern')
expected = Timestamp('3/11/2012 04:00', tz='US/Eastern')
self.assertEquals(result.hour, expected.hour)
self.assertEquals(result, expected)
def test_timestamp_constructed_by_date_and_tz(self):
# Fix Issue 2993, Timestamp cannot be constructed by datetime.date
# and tz correctly
result = Timestamp(date(2012, 3, 11), tz='US/Eastern')
expected = Timestamp('3/11/2012', tz='US/Eastern')
self.assertEquals(result.hour, expected.hour)
self.assertEquals(result, expected)
def test_timestamp_to_datetime_tzoffset(self):
# tzoffset
from dateutil.tz import tzoffset
tzinfo = tzoffset(None, 7200)
expected = Timestamp('3/11/2012 04:00', tz=tzinfo)
result = Timestamp(expected.to_datetime())
self.assertEquals(expected, result)
def test_timedelta_push_over_dst_boundary(self):
# #1389
# 4 hours before DST transition
stamp = Timestamp('3/10/2012 22:00', tz='US/Eastern')
result = stamp + timedelta(hours=6)
# spring forward, + "7" hours
expected = Timestamp('3/11/2012 05:00', tz='US/Eastern')
self.assertEquals(result, expected)
def test_tz_localize_dti(self):
from pandas.tseries.offsets import Hour
dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256',
freq='L')
dti2 = dti.tz_localize('US/Eastern')
dti_utc = DatetimeIndex(start='1/1/2005 05:00',
end='1/1/2005 5:00:30.256', freq='L',
tz='utc')
self.assert_(np.array_equal(dti2.values, dti_utc.values))
dti3 = dti2.tz_convert('US/Pacific')
self.assert_(np.array_equal(dti3.values, dti_utc.values))
dti = DatetimeIndex(start='11/6/2011 1:59',
end='11/6/2011 2:00', freq='L')
self.assertRaises(pytz.AmbiguousTimeError, dti.tz_localize,
'US/Eastern')
dti = DatetimeIndex(start='3/13/2011 1:59', end='3/13/2011 2:00',
freq='L')
self.assertRaises(
pytz.NonExistentTimeError, dti.tz_localize, 'US/Eastern')
def test_tz_localize_empty_series(self):
# #2248
ts = Series()
ts2 = ts.tz_localize('utc')
self.assertTrue(ts2.index.tz == pytz.utc)
ts2 = ts.tz_localize('US/Eastern')
self.assertTrue(ts2.index.tz == pytz.timezone('US/Eastern'))
def test_astimezone(self):
utc = Timestamp('3/11/2012 22:00', tz='UTC')
expected = utc.tz_convert('US/Eastern')
result = utc.astimezone('US/Eastern')
self.assertEquals(expected, result)
tm.assert_isinstance(result, Timestamp)
def test_create_with_tz(self):
stamp = Timestamp('3/11/2012 05:00', tz='US/Eastern')
self.assertEquals(stamp.hour, 5)
rng = date_range(
'3/11/2012 04:00', periods=10, freq='H', tz='US/Eastern')
self.assertEquals(stamp, rng[1])
utc_stamp = Timestamp('3/11/2012 05:00', tz='utc')
self.assert_(utc_stamp.tzinfo is pytz.utc)
self.assertEquals(utc_stamp.hour, 5)
stamp = Timestamp('3/11/2012 05:00').tz_localize('utc')
self.assertEquals(utc_stamp.hour, 5)
def test_create_with_fixed_tz(self):
off = FixedOffset(420, '+07:00')
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
self.assertEqual(off, rng.tz)
rng2 = date_range(start, periods=len(rng), tz=off)
self.assert_(rng.equals(rng2))
rng3 = date_range(
'3/11/2012 05:00:00+07:00', '6/11/2012 05:00:00+07:00')
self.assert_((rng.values == rng3.values).all())
def test_create_with_fixedoffset_noname(self):
off = fixed_off_no_name
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
self.assertEqual(off, rng.tz)
idx = Index([start, end])
self.assertEqual(off, idx.tz)
def test_date_range_localize(self):
rng = date_range(
'3/11/2012 03:00', periods=15, freq='H', tz='US/Eastern')
rng2 = DatetimeIndex(['3/11/2012 03:00', '3/11/2012 04:00'],
tz='US/Eastern')
rng3 = date_range('3/11/2012 03:00', periods=15, freq='H')
rng3 = rng3.tz_localize('US/Eastern')
self.assert_(rng.equals(rng3))
# DST transition time
val = rng[0]
exp = Timestamp('3/11/2012 03:00', tz='US/Eastern')
self.assertEquals(val.hour, 3)
self.assertEquals(exp.hour, 3)
self.assertEquals(val, exp) # same UTC value
self.assert_(rng[:2].equals(rng2))
# Right before the DST transition
rng = date_range(
'3/11/2012 00:00', periods=2, freq='H', tz='US/Eastern')
rng2 = DatetimeIndex(['3/11/2012 00:00', '3/11/2012 01:00'],
tz='US/Eastern')
self.assert_(rng.equals(rng2))
exp = Timestamp('3/11/2012 00:00', tz='US/Eastern')
self.assertEquals(exp.hour, 0)
self.assertEquals(rng[0], exp)
exp = Timestamp('3/11/2012 01:00', tz='US/Eastern')
self.assertEquals(exp.hour, 1)
self.assertEquals(rng[1], exp)
rng = date_range('3/11/2012 00:00', periods=10, freq='H',
tz='US/Eastern')
self.assert_(rng[2].hour == 3)
def test_utc_box_timestamp_and_localize(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert('US/Eastern')
tz = pytz.timezone('US/Eastern')
expected = tz.normalize(rng[-1])
stamp = rng_eastern[-1]
self.assertEquals(stamp, expected)
self.assertEquals(stamp.tzinfo, expected.tzinfo)
# right tzinfo
rng = date_range('3/13/2012', '3/14/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert('US/Eastern')
self.assert_('EDT' in repr(rng_eastern[0].tzinfo))
def test_timestamp_tz_convert(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates, tz='US/Eastern')
conv = idx[0].tz_convert('US/Pacific')
expected = idx.tz_convert('US/Pacific')[0]
self.assertEquals(conv, expected)
def test_pass_dates_localize_to_utc(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates)
conv = idx.tz_localize('US/Eastern')
fromdates = DatetimeIndex(strdates, tz='US/Eastern')
self.assert_(conv.tz == fromdates.tz)
self.assert_(np.array_equal(conv.values, fromdates.values))
def test_field_access_localize(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
rng = DatetimeIndex(strdates, tz='US/Eastern')
self.assert_((rng.hour == 0).all())
# a more unusual time zone, #1946
dr = date_range('2011-10-02 00:00', freq='h', periods=10,
tz='America/Atikokan')
expected = np.arange(10)
self.assert_(np.array_equal(dr.hour, expected))
def test_with_tz(self):
tz = pytz.timezone('US/Central')
# just want it to work
start = datetime(2011, 3, 12, tzinfo=pytz.utc)
dr = bdate_range(start, periods=50, freq=datetools.Hour())
self.assert_(dr.tz is pytz.utc)
# DateRange with naive datetimes
dr = bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc)
dr = bdate_range('1/1/2005', '1/1/2009', tz=tz)
# normalized
central = dr.tz_convert(tz)
self.assert_(central.tz is tz)
self.assert_(central[0].tz is tz)
# datetimes with tzinfo set
dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc),
'1/1/2009', tz=pytz.utc)
self.assertRaises(Exception, bdate_range,
datetime(2005, 1, 1, tzinfo=pytz.utc),
'1/1/2009', tz=tz)
def test_tz_localize(self):
dr = bdate_range('1/1/2009', '1/1/2010')
dr_utc = bdate_range('1/1/2009', '1/1/2010', tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
self.assert_(np.array_equal(dr_utc, localized))
def test_with_tz_ambiguous_times(self):
tz = pytz.timezone('US/Eastern')
rng = bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1))
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3,
freq=datetools.Hour())
self.assertRaises(pytz.NonExistentTimeError, dr.tz_localize, tz)
# after dst transition, it works
dr = date_range(datetime(2011, 3, 13, 3, 30), periods=3,
freq=datetools.Hour(), tz=tz)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3,
freq=datetools.Hour())
self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# UTC is OK
dr = date_range(datetime(2011, 3, 13), periods=48,
freq=datetools.Minute(30), tz=pytz.utc)
def test_infer_dst(self):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
tz = pytz.timezone('US/Eastern')
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=datetools.Hour())
self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize,
tz, infer_dst=True)
# With repeated hours, we can infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=datetools.Hour(), tz=tz)
di = DatetimeIndex(['11/06/2011 00:00', '11/06/2011 01:00',
'11/06/2011 01:00', '11/06/2011 02:00',
'11/06/2011 03:00'])
localized = di.tz_localize(tz, infer_dst=True)
self.assert_(np.array_equal(dr, localized))
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10,
freq=datetools.Hour())
localized = dr.tz_localize(tz)
localized_infer = dr.tz_localize(tz, infer_dst=True)
self.assert_(np.array_equal(localized, localized_infer))
# test utility methods
def test_infer_tz(self):
eastern = pytz.timezone('US/Eastern')
utc = pytz.utc
_start = datetime(2001, 1, 1)
_end = datetime(2009, 1, 1)
start = eastern.localize(_start)
end = eastern.localize(_end)
assert(tools._infer_tzinfo(start, end) is eastern)
assert(tools._infer_tzinfo(start, None) is eastern)
assert(tools._infer_tzinfo(None, end) is eastern)
start = utc.localize(_start)
end = utc.localize(_end)
assert(tools._infer_tzinfo(start, end) is utc)
end = eastern.localize(_end)
self.assertRaises(Exception, tools._infer_tzinfo, start, end)
self.assertRaises(Exception, tools._infer_tzinfo, end, start)
def test_tz_string(self):
result = date_range('1/1/2000', periods=10, tz='US/Eastern')
expected = date_range('1/1/2000', periods=10,
tz=pytz.timezone('US/Eastern'))
self.assert_(result.equals(expected))
def test_take_dont_lose_meta(self):
_skip_if_no_pytz()
rng = date_range('1/1/2000', periods=20, tz='US/Eastern')
result = rng.take(lrange(5))
self.assert_(result.tz == rng.tz)
self.assert_(result.freq == rng.freq)
def test_index_with_timezone_repr(self):
rng = date_range('4/13/2010', '5/6/2010')
rng_eastern = rng.tz_localize('US/Eastern')
rng_repr = repr(rng_eastern)
self.assert_('2010-04-13 00:00:00' in rng_repr)
def test_index_astype_asobject_tzinfos(self):
# #1345
# dates around a dst transition
rng = date_range('2/13/2010', '5/6/2010', tz='US/Eastern')
objs = rng.asobject
for i, x in enumerate(objs):
exval = rng[i]
self.assertEquals(x, exval)
self.assertEquals(x.tzinfo, exval.tzinfo)
objs = rng.astype(object)
for i, x in enumerate(objs):
exval = rng[i]
self.assertEquals(x, exval)
self.assertEquals(x.tzinfo, exval.tzinfo)
def test_localized_at_time_between_time(self):
from datetime import time
rng = date_range('4/16/2012', '5/1/2012', freq='H')
ts = Series(np.random.randn(len(rng)), index=rng)
ts_local = ts.tz_localize('US/Eastern')
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize('US/Eastern')
assert_series_equal(result, expected)
self.assert_(result.index.tz.zone == 'US/Eastern')
t1, t2 = time(10, 0), time(11, 0)
result = ts_local.between_time(t1, t2)
expected = ts.between_time(t1, t2).tz_localize('US/Eastern')
assert_series_equal(result, expected)
self.assert_(result.index.tz.zone == 'US/Eastern')
def test_string_index_alias_tz_aware(self):
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts['1/3/2000']
self.assertAlmostEqual(result, ts[2])
def test_fixed_offset(self):
dates = [datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off)]
result = to_datetime(dates)
self.assert_(result.tz == fixed_off)
def test_fixedtz_topydatetime(self):
dates = np.array([datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off)])
result = to_datetime(dates).to_pydatetime()
self.assert_(np.array_equal(dates, result))
result = to_datetime(dates)._mpl_repr()
self.assert_(np.array_equal(dates, result))
def test_convert_tz_aware_datetime_datetime(self):
# #1581
tz = pytz.timezone('US/Eastern')
dates = [datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)]
dates_aware = [tz.localize(x) for x in dates]
result = to_datetime(dates_aware)
self.assert_(result.tz.zone == 'US/Eastern')
converted = to_datetime(dates_aware, utc=True)
ex_vals = [Timestamp(x).value for x in dates_aware]
self.assert_(np.array_equal(converted.asi8, ex_vals))
self.assert_(converted.tz is pytz.utc)
def test_to_datetime_utc(self):
from dateutil.parser import parse
arr = np.array([parse('2012-06-13T01:39:00Z')], dtype=object)
result = to_datetime(arr, utc=True)
self.assert_(result.tz is pytz.utc)
def test_to_datetime_tzlocal(self):
from dateutil.parser import parse
from dateutil.tz import tzlocal
dt = parse('2012-06-13T01:39:00Z')
dt = dt.replace(tzinfo=tzlocal())
arr = np.array([dt], dtype=object)
result = to_datetime(arr, utc=True)
self.assert_(result.tz is pytz.utc)
rng = date_range('2012-11-03 03:00', '2012-11-05 03:00', tz=tzlocal())
arr = rng.to_pydatetime()
result = to_datetime(arr, utc=True)
self.assert_(result.tz is pytz.utc)
def test_frame_no_datetime64_dtype(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize('US/Eastern')
e = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr)
self.assert_(e['B'].dtype == 'M8[ns]')
# GH 2810 (with timezones)
datetimes_naive = [ ts.to_pydatetime() for ts in dr ]
datetimes_with_tz = [ ts.to_pydatetime() for ts in dr_tz ]
df = DataFrame({'dr' : dr, 'dr_tz' : dr_tz,
'datetimes_naive': datetimes_naive,
'datetimes_with_tz' : datetimes_with_tz })
result = df.get_dtype_counts()
expected = Series({ 'datetime64[ns]' : 3, 'object' : 1 })
assert_series_equal(result, expected)
def test_hongkong_tz_convert(self):
# #1673
dr = date_range(
'2012-01-01', '2012-01-10', freq='D', tz='Hongkong')
# it works!
dr.hour
def test_tz_convert_unsorted(self):
dr = date_range('2012-03-09', freq='H', periods=100, tz='utc')
dr = dr.tz_convert('US/Eastern')
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
def test_shift_localized(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize('US/Eastern')
result = dr_tz.shift(1, '10T')
self.assert_(result.tz == dr_tz.tz)
def test_tz_aware_asfreq(self):
dr = date_range(
'2011-12-01', '2012-07-20', freq='D', tz='US/Eastern')
s = Series(np.random.randn(len(dr)), index=dr)
# it works!
s.asfreq('T')
def test_static_tzinfo(self):
# it works!
index = DatetimeIndex([datetime(2012, 1, 1)], tz='EST')
index.hour
index[0]
def test_tzaware_datetime_to_index(self):
d = [datetime(2012, 8, 19, tzinfo=pytz.timezone('US/Eastern'))]
index = DatetimeIndex(d)
self.assert_(index.tz.zone == 'US/Eastern')
def test_date_range_span_dst_transition(self):
# #1778
# Standard -> Daylight Savings Time
dr = date_range('03/06/2012 00:00', periods=200, freq='W-FRI',
tz='US/Eastern')
self.assert_((dr.hour == 0).all())
dr = date_range('2012-11-02', periods=10, tz='US/Eastern')
self.assert_((dr.hour == 0).all())
def test_convert_datetime_list(self):
dr = date_range('2012-06-02', periods=10, tz='US/Eastern')
dr2 = DatetimeIndex(list(dr), name='foo')
self.assert_(dr.equals(dr2))
self.assert_(dr.tz == dr2.tz)
self.assert_(dr2.name == 'foo')
def test_frame_from_records_utc(self):
rec = {'datum': 1.5,
'begin_time': datetime(2006, 4, 27, tzinfo=pytz.utc)}
# it works
DataFrame.from_records([rec], index='begin_time')
def test_frame_reset_index(self):
dr = date_range('2012-06-02', periods=10, tz='US/Eastern')
df = DataFrame(np.random.randn(len(dr)), dr)
roundtripped = df.reset_index().set_index('index')
xp = df.index.tz
rs = roundtripped.index.tz
self.assertEquals(xp, rs)
def test_dateutil_tzoffset_support(self):
from dateutil.tz import tzoffset
values = [188.5, 328.25]
tzinfo = tzoffset(None, 7200)
index = [datetime(2012, 5, 11, 11, tzinfo=tzinfo),
datetime(2012, 5, 11, 12, tzinfo=tzinfo)]
series = Series(data=values, index=index)
self.assertEquals(series.index.tz, tzinfo)
# it works! #2443
repr(series.index[0])
def test_getitem_pydatetime_tz(self):
index = date_range(start='2012-12-24 16:00',
end='2012-12-24 18:00', freq='H',
tz='Europe/Berlin')
ts = Series(index=index, data=index.hour)
time_pandas = Timestamp('2012-12-24 17:00', tz='Europe/Berlin')
time_datetime = datetime(2012, 12, 24, 17, 0,
tzinfo=pytz.timezone('Europe/Berlin'))
self.assertEqual(ts[time_pandas], ts[time_datetime])
def test_index_drop_dont_lose_tz(self):
# #2621
ind = date_range("2012-12-01", periods=10, tz="utc")
ind = ind.drop(ind[-1])
self.assertTrue(ind.tz is not None)
def test_datetimeindex_tz(self):
""" Test different DatetimeIndex constructions with timezone
Follow-up of #4229
"""
arr = ['11/10/2005 08:00:00', '11/10/2005 09:00:00']
idx1 = to_datetime(arr).tz_localize('US/Eastern')
idx2 = DatetimeIndex(start="2005-11-10 08:00:00", freq='H', periods=2, tz='US/Eastern')
idx3 = DatetimeIndex(arr, tz='US/Eastern')
idx4 = DatetimeIndex(np.array(arr), tz='US/Eastern')
for other in [idx2, idx3, idx4]:
self.assert_(idx1.equals(other))
def test_datetimeindex_tz_nat(self):
idx = to_datetime([Timestamp("2013-1-1", tz='US/Eastern'), NaT])
self.assertTrue(isnull(idx[1]))
self.assertTrue(idx[0].tzinfo is not None)
class TestTimeZones(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
_skip_if_no_pytz()
def test_index_equals_with_tz(self):
left = date_range('1/1/2011', periods=100, freq='H', tz='utc')
right = date_range('1/1/2011', periods=100, freq='H',
tz='US/Eastern')
self.assert_(not left.equals(right))
def test_tz_localize_naive(self):
rng = date_range('1/1/2011', periods=100, freq='H')
conv = rng.tz_localize('US/Pacific')
exp = date_range('1/1/2011', periods=100, freq='H', tz='US/Pacific')
self.assert_(conv.equals(exp))
def test_series_frame_tz_localize(self):
rng = date_range('1/1/2011', periods=100, freq='H')
ts = Series(1, index=rng)
result = ts.tz_localize('utc')
self.assert_(result.index.tz.zone == 'UTC')
df = DataFrame({'a': 1}, index=rng)
result = df.tz_localize('utc')
expected = DataFrame({'a': 1}, rng.tz_localize('UTC'))
self.assert_(result.index.tz.zone == 'UTC')
assert_frame_equal(result, expected)
df = df.T
result = df.tz_localize('utc', axis=1)
self.assert_(result.columns.tz.zone == 'UTC')
assert_frame_equal(result, expected.T)
# Can't localize if already tz-aware
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
ts = Series(1, index=rng)
assertRaisesRegexp(TypeError, 'Already tz-aware', ts.tz_localize, 'US/Eastern')
def test_series_frame_tz_convert(self):
rng = date_range('1/1/2011', periods=200, freq='D',
tz='US/Eastern')
ts = Series(1, index=rng)
result = ts.tz_convert('Europe/Berlin')
self.assert_(result.index.tz.zone == 'Europe/Berlin')
df = DataFrame({'a': 1}, index=rng)
result = df.tz_convert('Europe/Berlin')
expected = DataFrame({'a': 1}, rng.tz_convert('Europe/Berlin'))
self.assert_(result.index.tz.zone == 'Europe/Berlin')
assert_frame_equal(result, expected)
df = df.T
result = df.tz_convert('Europe/Berlin', axis=1)
self.assert_(result.columns.tz.zone == 'Europe/Berlin')
assert_frame_equal(result, expected.T)
# can't convert tz-naive
rng = date_range('1/1/2011', periods=200, freq='D')
ts = Series(1, index=rng)
assertRaisesRegexp(TypeError, "Cannot convert tz-naive", ts.tz_convert, 'US/Eastern')
def test_join_utc_convert(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
left = rng.tz_convert('US/Eastern')
right = rng.tz_convert('Europe/Berlin')
for how in ['inner', 'outer', 'left', 'right']:
result = left.join(left[:-5], how=how)
tm.assert_isinstance(result, DatetimeIndex)
self.assert_(result.tz == left.tz)
result = left.join(right[:-5], how=how)
tm.assert_isinstance(result, DatetimeIndex)
self.assert_(result.tz.zone == 'UTC')
def test_join_aware(self):
rng = date_range('1/1/2011', periods=10, freq='H')
ts = Series(np.random.randn(len(rng)), index=rng)
ts_utc = ts.tz_localize('utc')
self.assertRaises(Exception, ts.__add__, ts_utc)
self.assertRaises(Exception, ts_utc.__add__, ts)
test1 = DataFrame(np.zeros((6, 3)),
index=date_range("2012-11-15 00:00:00", periods=6,
freq="100L", tz="US/Central"))
test2 = DataFrame(np.zeros((3, 3)),
index=date_range("2012-11-15 00:00:00", periods=3,
freq="250L", tz="US/Central"),
columns=lrange(3, 6))
result = test1.join(test2, how='outer')
ex_index = test1.index.union(test2.index)
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result.index.tz.zone == 'US/Central')
# non-overlapping
rng = date_range("2012-11-15 00:00:00", periods=6,
freq="H", tz="US/Central")
rng2 = date_range("2012-11-15 12:00:00", periods=6,
freq="H", tz="US/Eastern")
result = rng.union(rng2)
self.assertTrue(result.tz.zone == 'UTC')
def test_align_aware(self):
idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern')
idx2 = date_range('2001', periods=5, freq='2H', tz='US/Eastern')
df1 = DataFrame(np.random.randn(len(idx1), 3), idx1)
df2 = DataFrame(np.random.randn(len(idx2), 3), idx2)
new1, new2 = df1.align(df2)
self.assertEqual(df1.index.tz, new1.index.tz)
self.assertEqual(df2.index.tz, new2.index.tz)
def test_append_aware(self):
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H',
tz='US/Eastern')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',
tz='US/Eastern')
ts1 = Series(np.random.randn(len(rng1)), index=rng1)
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
self.assertEqual(ts_result.index.tz, rng1.tz)
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H',
tz='UTC')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',
tz='UTC')
ts1 = Series(np.random.randn(len(rng1)), index=rng1)
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
utc = rng1.tz
self.assertEqual(utc, ts_result.index.tz)
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H',
tz='US/Eastern')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',
tz='US/Central')
ts1 = Series(np.random.randn(len(rng1)), index=rng1)
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
self.assertEqual(utc, ts_result.index.tz)
def test_append_aware_naive(self):
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',
tz='US/Eastern')
ts1 = Series(np.random.randn(len(rng1)), index=rng1)
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
self.assert_(ts_result.index.equals(
ts1.index.asobject.append(ts2.index.asobject)))
# mixed
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H')
rng2 = lrange(100)
ts1 = Series(np.random.randn(len(rng1)), index=rng1)
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
self.assert_(ts_result.index.equals(
ts1.index.asobject.append(ts2.index)))
def test_equal_join_ensure_utc(self):
rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), index=rng)
ts_moscow = ts.tz_convert('Europe/Moscow')
result = ts + ts_moscow
self.assert_(result.index.tz is pytz.utc)
result = ts_moscow + ts
self.assert_(result.index.tz is pytz.utc)
df = DataFrame({'a': ts})
df_moscow = df.tz_convert('Europe/Moscow')
result = df + df_moscow
self.assert_(result.index.tz is pytz.utc)
result = df_moscow + df
self.assert_(result.index.tz is pytz.utc)
def test_arith_utc_convert(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
perm = np.random.permutation(100)[:90]
ts1 = Series(np.random.randn(90),
index=rng.take(perm).tz_convert('US/Eastern'))
perm = np.random.permutation(100)[:90]
ts2 = Series(np.random.randn(90),
index=rng.take(perm).tz_convert('Europe/Berlin'))
result = ts1 + ts2
uts1 = ts1.tz_convert('utc')
uts2 = ts2.tz_convert('utc')
expected = uts1 + uts2
self.assert_(result.index.tz == pytz.UTC)
assert_series_equal(result, expected)
def test_intersection(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
left = rng[10:90][::-1]
right = rng[20:80][::-1]
self.assert_(left.tz == rng.tz)
result = left.intersection(right)
self.assert_(result.tz == left.tz)
def test_timestamp_equality_different_timezones(self):
utc_range = date_range('1/1/2000', periods=20, tz='UTC')
eastern_range = utc_range.tz_convert('US/Eastern')
berlin_range = utc_range.tz_convert('Europe/Berlin')
for a, b, c in zip(utc_range, eastern_range, berlin_range):
self.assertEquals(a, b)
self.assertEquals(b, c)
self.assertEquals(a, c)
self.assert_((utc_range == eastern_range).all())
self.assert_((utc_range == berlin_range).all())
self.assert_((berlin_range == eastern_range).all())
def test_datetimeindex_tz(self):
rng = date_range('03/12/2012 00:00', periods=10, freq='W-FRI',
tz='US/Eastern')
rng2 = DatetimeIndex(data=rng, tz='US/Eastern')
self.assert_(rng.equals(rng2))
def test_normalize_tz(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D',
tz='US/Eastern')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D',
tz='US/Eastern')
self.assert_(result.equals(expected))
self.assert_(result.is_normalized)
self.assert_(not rng.is_normalized)
rng = date_range('1/1/2000 9:30', periods=10, freq='D',
tz='UTC')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D',
tz='UTC')
self.assert_(result.equals(expected))
self.assert_(result.is_normalized)
self.assert_(not rng.is_normalized)
from dateutil.tz import tzlocal
rng = date_range('1/1/2000 9:30', periods=10, freq='D',
tz=tzlocal())
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D',
tz=tzlocal())
self.assert_(result.equals(expected))
self.assert_(result.is_normalized)
self.assert_(not rng.is_normalized)
def test_tzaware_offset(self):
dates = date_range('2012-11-01', periods=3, tz='US/Pacific')
offset = dates + offsets.Hour(5)
self.assertEqual(dates[0] + offsets.Hour(5), offset[0])
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
shakamunyi/tensorflow | tensorflow/contrib/learn/python/learn/estimators/multioutput_test.py | 136 | 1696 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
from tensorflow.python.platform import test
class MultiOutputTest(test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
label_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(np.array(list(regressor.predict_scores(x))), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
vladpopovici/WSItk | WSItk/tools/wsi_bot_codebook3.py | 1 | 7170 | #!/usr/bin/env python2
#
# wsi_bot_codebook3
#
# Version 3 of codebook construction:
#
# -uses OpenCV for faster operation - but different local descriptors than in the 1st version;
# -uses annotation files for defining the regions from where the descriptors are to be
# extracted
# - try to optimize the codebook with respect to some class labels
from __future__ import (absolute_import, division, print_function, unicode_literals)
__version__ = 0.1
__author__ = 'Vlad Popovici'
import os
import argparse as opt
import numpy as np
import numpy.linalg
from scipy.stats import ttest_ind
import skimage.draw
import skimage.io
from skimage.exposure import equalize_adapthist, rescale_intensity
import cv2
import cv2.xfeatures2d
from sklearn.cluster import MiniBatchKMeans
from sklearn.lda import LDA
from stain.he import rgb2he
from util.storage import ModelPersistence
def find_in_list(_value, _list):
"""
Returns the indexes of all occurrences of value in a list.
"""
return np.array([i for i, v in enumerate(_list) if v == _value], dtype=int)
def main():
p = opt.ArgumentParser(description="""
Extracts features from annotated regions and constructs a codebook of a given size.
""")
p.add_argument('in_file', action='store', help='a file with image file, annotation file and label (0/1)')
p.add_argument('out_file', action='store', help='resulting model file name')
#p.add_argument('codebook_size', action='store', help='codebook size', type=int)
p.add_argument('-t', '--threshold', action='store', type=int, default=5000,
help='Hessian threshold for SURF features.')
p.add_argument('-s', '--standardize', action='store_true', default=False,
help='should the features be standardized before codebook construction?')
p.add_argument('-v', '--verbose', action='store_true', help='verbose?')
args = p.parse_args()
th = args.threshold
all_image_names, all_descriptors = [], []
all_roi = []
y = []
unique_image_names = []
with open(args.in_file, mode='r') as fin:
for l in fin.readlines():
l = l.strip()
if len(l) == 0:
break
img_file, annot_file, lbl = [z_ for z_ in l.split()][0:3] # file names: image and its annotation and label
y.append(int(lbl))
if args.verbose:
print("Image:", img_file)
img = cv2.imread(img_file)
coords = np.fromfile(annot_file, dtype=int, sep=' ') # x y - values
coords = np.reshape(coords, (coords.size/2, 2), order='C')
# get the bounding box:
xmin, ymin = coords.min(axis=0)
xmax, ymax = coords.max(axis=0)
if args.verbose:
print("\t...H&E extraction")
img = img[ymin:ymax+2, xmin:xmax+2, :] # keep only the region of interest
img_h, _ = rgb2he(img, normalize=True) # get the H- component
img_h = equalize_adapthist(img_h)
img_h = rescale_intensity(img_h, out_range=(0,255))
# make sure the dtype is right for image and the mask: OpenCV is sensitive to data type
img_h = img_h.astype(np.uint8)
if args.verbose:
print("\t...building mask")
mask = np.zeros(img_h.shape, dtype=np.uint8)
r, c = skimage.draw.polygon(coords[:,1]-ymin, coords[:,0]-xmin) # adapt to new image...
mask[r,c] = 1 # everything outside the region is black
if args.verbose:
print("\t...feature detection and computation")
img_h *= mask
feat = cv2.xfeatures2d.SURF_create(hessianThreshold=th)
keyp, desc = feat.detectAndCompute(img_h, mask)
if args.verbose:
print("\t...", str(len(keyp)), "features extracted")
all_descriptors.extend(desc)
all_image_names.extend([img_file] * len(keyp))
unique_image_names.append(img_file)
# end for
X = np.hstack(all_descriptors)
X = np.reshape(X, (len(all_descriptors), all_descriptors[0].size), order='C')
if args.standardize:
# make sure each variable (column) is mean-centered and has unit standard deviation
Xm = np.mean(X, axis=0)
Xs = np.std(X, axis=0)
Xs[np.isclose(Xs, 1e-16)] = 1.0
X = (X - Xm) / Xs
y = np.array(y, dtype=int)
rng = np.random.RandomState(0)
acc = [] # will keep accuracy of the classifier
vqs = [] # all quantizers, to find the best
for k in np.arange(10, 121, 10):
# Method:
# -generate a codebook with k codewords
# -re-code the data
# -compute frequencies
# -estimate classification on best 10 features
if args.verbose:
print("\nK-means clustering (k =", str(k), ")")
print("\t...with", str(X.shape[0]), "points")
#-codebook and re-coding
vq = MiniBatchKMeans(n_clusters=k, random_state=rng,
batch_size=500, compute_labels=True, verbose=False) # vector quantizer
vq.fit(X)
vqs.append(vq)
#-codeword frequencies
frq = np.zeros((len(unique_image_names), k))
for i in range(vq.labels_.size):
frq[unique_image_names.index(all_image_names[i]), vq.labels_[i]] += 1.0
for i in range(len(unique_image_names)):
if frq[i, :].sum() > 0:
frq[i, :] /= frq[i, :].sum()
if args.verbose:
print("...\tfeature selection (t-test)")
pv = np.ones(k)
for i in range(k):
_, pv[i] = ttest_ind(frq[y == 0, i], frq[y == 1, i])
idx = np.argsort(pv) # order of the p-values
if args.verbose:
print("\t...classification performance estimation")
clsf = LDA(solver='lsqr', shrinkage='auto').fit(frq[:,idx[:10]], y) # keep top 10 features
acc.append(clsf.score(frq[:, idx[:10]], y))
acc = np.array(acc)
k = np.arange(10, 121, 10)[acc.argmax()] # best k
if args.verbose:
print("\nOptimal codebook size:", str(k))
# final codebook:
vq = vqs[acc.argmax()]
# compute the average distance and std.dev. of the points in each cluster:
avg_dist = np.zeros(k)
sd_dist = np.zeros(k)
for k in range(0, k):
d = numpy.linalg.norm(X[vq.labels_ == k, :] - vq.cluster_centers_[k, :], axis=1)
avg_dist[k] = d.mean()
sd_dist[k] = d.std()
with ModelPersistence(args.out_file, 'c', format='pickle') as d:
d['codebook'] = vq
d['shift'] = Xm
d['scale'] = Xs
d['standardize'] = args.standardize
d['avg_dist_to_centroid'] = avg_dist
d['stddev_dist_to_centroid'] = sd_dist
return True
if __name__ == '__main__':
main()
| mit |
paladin74/neural-network-animation | matplotlib/backends/backend_svg.py | 10 | 45804 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from six import unichr
import os, base64, tempfile, gzip, io, sys, codecs, re
import numpy as np
from hashlib import md5
from matplotlib import verbose, __version__, rcParams
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import is_string_like, is_writable_file_like, maxdict
from matplotlib.colors import rgb2hex
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, FontProperties
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D, Affine2DBase
from matplotlib import _png
from xml.sax.saxutils import escape as escape_xml_text
backend_version = __version__
# ----------------------------------------------------------------------
# SimpleXMLWriter class
#
# Based on an original by Fredrik Lundh, but modified here to:
# 1. Support modern Python idioms
# 2. Remove encoding support (it's handled by the file writer instead)
# 3. Support proper indentation
# 4. Minify things a little bit
# --------------------------------------------------------------------
# The SimpleXMLWriter module is
#
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
def escape_cdata(s):
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
_escape_xml_comment = re.compile(r'-(?=-)')
def escape_comment(s):
s = escape_cdata(s)
return _escape_xml_comment.sub('- ', s)
def escape_attrib(s):
s = s.replace("&", "&")
s = s.replace("'", "'")
s = s.replace("\"", """)
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
##
# XML writer class.
#
# @param file A file or file-like object. This object must implement
# a <b>write</b> method that takes an 8-bit string.
class XMLWriter:
def __init__(self, file):
self.__write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self.__open = 0 # true if start tag is open
self.__tags = []
self.__data = []
self.__indentation = " " * 64
def __flush(self, indent=True):
# flush internal buffers
if self.__open:
if indent:
self.__write(">\n")
else:
self.__write(">")
self.__open = 0
if self.__data:
data = ''.join(self.__data)
self.__write(escape_cdata(data))
self.__data = []
## Opens a new element. Attributes can be given as keyword
# arguments, or as a string/string dictionary. The method returns
# an opaque identifier that can be passed to the <b>close</b>
# method, to close all open elements up to and including this one.
#
# @param tag Element tag.
# @param attrib Attribute dictionary. Alternatively, attributes
# can be given as keyword arguments.
# @return An element identifier.
def start(self, tag, attrib={}, **extra):
self.__flush()
tag = escape_cdata(tag)
self.__data = []
self.__tags.append(tag)
self.__write(self.__indentation[:len(self.__tags) - 1])
self.__write("<%s" % tag)
if attrib or extra:
attrib = attrib.copy()
attrib.update(extra)
attrib = list(six.iteritems(attrib))
attrib.sort()
for k, v in attrib:
if not v == '':
k = escape_cdata(k)
v = escape_attrib(v)
self.__write(" %s=\"%s\"" % (k, v))
self.__open = 1
return len(self.__tags)-1
##
# Adds a comment to the output stream.
#
# @param comment Comment text, as a Unicode string.
def comment(self, comment):
self.__flush()
self.__write(self.__indentation[:len(self.__tags)])
self.__write("<!-- %s -->\n" % escape_comment(comment))
##
# Adds character data to the output stream.
#
# @param text Character data, as a Unicode string.
def data(self, text):
self.__data.append(text)
##
# Closes the current element (opened by the most recent call to
# <b>start</b>).
#
# @param tag Element tag. If given, the tag must match the start
# tag. If omitted, the current element is closed.
def end(self, tag=None, indent=True):
if tag:
assert self.__tags, "unbalanced end(%s)" % tag
assert escape_cdata(tag) == self.__tags[-1],\
"expected end(%s), got %s" % (self.__tags[-1], tag)
else:
assert self.__tags, "unbalanced end()"
tag = self.__tags.pop()
if self.__data:
self.__flush(indent)
elif self.__open:
self.__open = 0
self.__write("/>\n")
return
if indent:
self.__write(self.__indentation[:len(self.__tags)])
self.__write("</%s>\n" % tag)
##
# Closes open elements, up to (and including) the element identified
# by the given identifier.
#
# @param id Element identifier, as returned by the <b>start</b> method.
def close(self, id):
while len(self.__tags) > id:
self.end()
##
# Adds an entire element. This is the same as calling <b>start</b>,
# <b>data</b>, and <b>end</b> in sequence. The <b>text</b> argument
# can be omitted.
def element(self, tag, text=None, attrib={}, **extra):
self.start(*(tag, attrib), **extra)
if text:
self.data(text)
self.end(indent=False)
##
# Flushes the output stream.
def flush(self):
pass # replaced by the constructor
# ----------------------------------------------------------------------
def generate_transform(transform_list=[]):
if len(transform_list):
output = io.StringIO()
for type, value in transform_list:
if type == 'scale' and (value == (1.0,) or value == (1.0, 1.0)):
continue
if type == 'translate' and value == (0.0, 0.0):
continue
if type == 'rotate' and value == (0.0,):
continue
if type == 'matrix' and isinstance(value, Affine2DBase):
value = value.to_values()
output.write('%s(%s)' % (type, ' '.join(str(x) for x in value)))
return output.getvalue()
return ''
def generate_css(attrib={}):
if attrib:
output = io.StringIO()
attrib = list(six.iteritems(attrib))
attrib.sort()
for k, v in attrib:
k = escape_attrib(k)
v = escape_attrib(v)
output.write("%s:%s;" % (k, v))
return output.getvalue()
return ''
_capstyle_d = {'projecting' : 'square', 'butt' : 'butt', 'round': 'round',}
class RendererSVG(RendererBase):
FONT_SCALE = 100.0
fontd = maxdict(50)
def __init__(self, width, height, svgwriter, basename=None, image_dpi=72):
self.width = width
self.height = height
self.writer = XMLWriter(svgwriter)
self.image_dpi = image_dpi # the actual dpi we want to rasterize stuff with
self._groupd = {}
if not rcParams['svg.image_inline']:
assert basename is not None
self.basename = basename
self._imaged = {}
self._clipd = {}
self._char_defs = {}
self._markers = {}
self._path_collection_id = 0
self._imaged = {}
self._hatchd = {}
self._has_gouraud = False
self._n_gradients = 0
self._fonts = {}
self.mathtext_parser = MathTextParser('SVG')
RendererBase.__init__(self)
self._glyph_map = dict()
svgwriter.write(svgProlog)
self._start_id = self.writer.start(
'svg',
width='%ipt' % width, height='%ipt' % height,
viewBox='0 0 %i %i' % (width, height),
xmlns="http://www.w3.org/2000/svg",
version="1.1",
attrib={'xmlns:xlink': "http://www.w3.org/1999/xlink"})
self._write_default_style()
def finalize(self):
self._write_clips()
self._write_hatches()
self._write_svgfonts()
self.writer.close(self._start_id)
self.writer.flush()
def _write_default_style(self):
writer = self.writer
default_style = generate_css({
'stroke-linejoin': 'round',
'stroke-linecap': 'butt'})
writer.start('defs')
writer.start('style', type='text/css')
writer.data('*{%s}\n' % default_style)
writer.end('style')
writer.end('defs')
def _make_id(self, type, content):
content = str(content)
if six.PY3:
content = content.encode('utf8')
return '%s%s' % (type, md5(content).hexdigest()[:10])
def _make_flip_transform(self, transform):
return (transform +
Affine2D()
.scale(1.0, -1.0)
.translate(0.0, self.height))
def _get_font(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(fname)
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _get_hatch(self, gc, rgbFace):
"""
Create a new hatch pattern
"""
if rgbFace is not None:
rgbFace = tuple(rgbFace)
edge = gc.get_rgb()
if edge is not None:
edge = tuple(edge)
dictkey = (gc.get_hatch(), rgbFace, edge)
oid = self._hatchd.get(dictkey)
if oid is None:
oid = self._make_id('h', dictkey)
self._hatchd[dictkey] = ((gc.get_hatch_path(), rgbFace, edge), oid)
else:
_, oid = oid
return oid
def _write_hatches(self):
if not len(self._hatchd):
return
HATCH_SIZE = 72
writer = self.writer
writer.start('defs')
for ((path, face, stroke), oid) in six.itervalues(self._hatchd):
writer.start(
'pattern',
id=oid,
patternUnits="userSpaceOnUse",
x="0", y="0", width=six.text_type(HATCH_SIZE),
height=six.text_type(HATCH_SIZE))
path_data = self._convert_path(
path,
Affine2D().scale(HATCH_SIZE).scale(1.0, -1.0).translate(0, HATCH_SIZE),
simplify=False)
if face is None:
fill = 'none'
else:
fill = rgb2hex(face)
writer.element(
'rect',
x="0", y="0", width=six.text_type(HATCH_SIZE+1),
height=six.text_type(HATCH_SIZE+1),
fill=fill)
writer.element(
'path',
d=path_data,
style=generate_css({
'fill': rgb2hex(stroke),
'stroke': rgb2hex(stroke),
'stroke-width': '1.0',
'stroke-linecap': 'butt',
'stroke-linejoin': 'miter'
})
)
writer.end('pattern')
writer.end('defs')
def _get_style_dict(self, gc, rgbFace):
"""
return the style string. style is generated from the
GraphicsContext and rgbFace
"""
attrib = {}
forced_alpha = gc.get_forced_alpha()
if gc.get_hatch() is not None:
attrib['fill'] = "url(#%s)" % self._get_hatch(gc, rgbFace)
if rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha:
attrib['fill-opacity'] = str(rgbFace[3])
else:
if rgbFace is None:
attrib['fill'] = 'none'
else:
if tuple(rgbFace[:3]) != (0, 0, 0):
attrib['fill'] = rgb2hex(rgbFace)
if len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha:
attrib['fill-opacity'] = str(rgbFace[3])
if forced_alpha and gc.get_alpha() != 1.0:
attrib['opacity'] = str(gc.get_alpha())
offset, seq = gc.get_dashes()
if seq is not None:
attrib['stroke-dasharray'] = ','.join(['%f' % val for val in seq])
attrib['stroke-dashoffset'] = six.text_type(float(offset))
linewidth = gc.get_linewidth()
if linewidth:
rgb = gc.get_rgb()
attrib['stroke'] = rgb2hex(rgb)
if not forced_alpha and rgb[3] != 1.0:
attrib['stroke-opacity'] = str(rgb[3])
if linewidth != 1.0:
attrib['stroke-width'] = str(linewidth)
if gc.get_joinstyle() != 'round':
attrib['stroke-linejoin'] = gc.get_joinstyle()
if gc.get_capstyle() != 'butt':
attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()]
return attrib
def _get_style(self, gc, rgbFace):
return generate_css(self._get_style_dict(gc, rgbFace))
def _get_clip(self, gc):
cliprect = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
clippath_trans = self._make_flip_transform(clippath_trans)
dictkey = (id(clippath), str(clippath_trans))
elif cliprect is not None:
x, y, w, h = cliprect.bounds
y = self.height-(y+h)
dictkey = (x, y, w, h)
else:
return None
clip = self._clipd.get(dictkey)
if clip is None:
oid = self._make_id('p', dictkey)
if clippath is not None:
self._clipd[dictkey] = ((clippath, clippath_trans), oid)
else:
self._clipd[dictkey] = (dictkey, oid)
else:
clip, oid = clip
return oid
def _write_clips(self):
if not len(self._clipd):
return
writer = self.writer
writer.start('defs')
for clip, oid in six.itervalues(self._clipd):
writer.start('clipPath', id=oid)
if len(clip) == 2:
clippath, clippath_trans = clip
path_data = self._convert_path(clippath, clippath_trans, simplify=False)
writer.element('path', d=path_data)
else:
x, y, w, h = clip
writer.element('rect', x=six.text_type(x), y=six.text_type(y),
width=six.text_type(w), height=six.text_type(h))
writer.end('clipPath')
writer.end('defs')
def _write_svgfonts(self):
if not rcParams['svg.fonttype'] == 'svgfont':
return
writer = self.writer
writer.start('defs')
for font_fname, chars in six.iteritems(self._fonts):
font = FT2Font(font_fname)
font.set_size(72, 72)
sfnt = font.get_sfnt()
writer.start('font', id=sfnt[(1, 0, 0, 4)])
writer.element(
'font-face',
attrib={
'font-family': font.family_name,
'font-style': font.style_name.lower(),
'units-per-em': '72',
'bbox': ' '.join(six.text_type(x / 64.0) for x in font.bbox)})
for char in chars:
glyph = font.load_char(char, flags=LOAD_NO_HINTING)
verts, codes = font.get_path()
path = Path(verts, codes)
path_data = self._convert_path(path)
# name = font.get_glyph_name(char)
writer.element(
'glyph',
d=path_data,
attrib={
# 'glyph-name': name,
'unicode': unichr(char),
'horiz-adv-x': six.text_type(glyph.linearHoriAdvance / 65536.0)})
writer.end('font')
writer.end('defs')
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group.
"""
if gid:
self.writer.start('g', id=gid)
else:
self._groupd[s] = self._groupd.get(s, 0) + 1
self.writer.start('g', id="%s_%d" % (s, self._groupd[s]))
def close_group(self, s):
self.writer.end('g')
def option_image_nocomposite(self):
"""
if svg.image_noscale is True, compositing multiple images into one is prohibited
"""
return rcParams['svg.image_noscale']
def _convert_path(self, path, transform=None, clip=None, simplify=None):
if clip:
clip = (0.0, 0.0, self.width, self.height)
else:
clip = None
return _path.convert_to_svg(path, transform, clip, simplify, 6)
def draw_path(self, gc, path, transform, rgbFace=None):
trans_and_flip = self._make_flip_transform(transform)
clip = (rgbFace is None and gc.get_hatch_path() is None)
simplify = path.should_simplify and clip
path_data = self._convert_path(
path, trans_and_flip, clip=clip, simplify=simplify)
attrib = {}
attrib['style'] = self._get_style(gc, rgbFace)
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
if gc.get_url() is not None:
self.writer.start('a', {'xlink:href': gc.get_url()})
self.writer.element('path', d=path_data, attrib=attrib)
if gc.get_url() is not None:
self.writer.end('a')
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if not len(path.vertices):
return
writer = self.writer
path_data = self._convert_path(
marker_path,
marker_trans + Affine2D().scale(1.0, -1.0),
simplify=False)
style = self._get_style_dict(gc, rgbFace)
dictkey = (path_data, generate_css(style))
oid = self._markers.get(dictkey)
for key in list(six.iterkeys(style)):
if not key.startswith('stroke'):
del style[key]
style = generate_css(style)
if oid is None:
oid = self._make_id('m', dictkey)
writer.start('defs')
writer.element('path', id=oid, d=path_data, style=style)
writer.end('defs')
self._markers[dictkey] = oid
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
writer.start('g', attrib=attrib)
trans_and_flip = self._make_flip_transform(trans)
attrib = {'xlink:href': '#%s' % oid}
clip = (0, 0, self.width*72, self.height*72)
for vertices, code in path.iter_segments(
trans_and_flip, clip=clip, simplify=False):
if len(vertices):
x, y = vertices[-2:]
attrib['x'] = six.text_type(x)
attrib['y'] = six.text_type(y)
attrib['style'] = self._get_style(gc, rgbFace)
writer.element('use', attrib=attrib)
writer.end('g')
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is
# (len_path + 5) * uses_per_path
# cost of definition+use is
# (len_path + 3) + 9 * uses_per_path
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + 9 * uses_per_path + 3 < (len_path + 5) * uses_per_path
if not should_do_optimization:
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
writer = self.writer
path_codes = []
writer.start('defs')
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
transform = Affine2D(transform.get_matrix()).scale(1.0, -1.0)
d = self._convert_path(path, transform, simplify=False)
oid = 'C%x_%x_%s' % (self._path_collection_id, i,
self._make_id('', d))
writer.element('path', id=oid, d=d)
path_codes.append(oid)
writer.end('defs')
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
clipid = self._get_clip(gc0)
url = gc0.get_url()
if url is not None:
writer.start('a', attrib={'xlink:href': url})
if clipid is not None:
writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})
attrib = {
'xlink:href': '#%s' % path_id,
'x': six.text_type(xo),
'y': six.text_type(self.height - yo),
'style': self._get_style(gc0, rgbFace)
}
writer.element('use', attrib=attrib)
if clipid is not None:
writer.end('g')
if url is not None:
writer.end('a')
self._path_collection_id += 1
def draw_gouraud_triangle(self, gc, points, colors, trans):
# This uses a method described here:
#
# http://www.svgopen.org/2005/papers/Converting3DFaceToSVG/index.html
#
# that uses three overlapping linear gradients to simulate a
# Gouraud triangle. Each gradient goes from fully opaque in
# one corner to fully transparent along the opposite edge.
# The line between the stop points is perpendicular to the
# opposite edge. Underlying these three gradients is a solid
# triangle whose color is the average of all three points.
writer = self.writer
if not self._has_gouraud:
self._has_gouraud = True
writer.start(
'filter',
id='colorAdd')
writer.element(
'feComposite',
attrib={'in': 'SourceGraphic'},
in2='BackgroundImage',
operator='arithmetic',
k2="1", k3="1")
writer.end('filter')
avg_color = np.sum(colors[:, :], axis=0) / 3.0
# Just skip fully-transparent triangles
if avg_color[-1] == 0.0:
return
trans_and_flip = self._make_flip_transform(trans)
tpoints = trans_and_flip.transform(points)
writer.start('defs')
for i in range(3):
x1, y1 = tpoints[i]
x2, y2 = tpoints[(i + 1) % 3]
x3, y3 = tpoints[(i + 2) % 3]
c = colors[i][:]
if x2 == x3:
xb = x2
yb = y1
elif y2 == y3:
xb = x1
yb = y2
else:
m1 = (y2 - y3) / (x2 - x3)
b1 = y2 - (m1 * x2)
m2 = -(1.0 / m1)
b2 = y1 - (m2 * x1)
xb = (-b1 + b2) / (m1 - m2)
yb = m2 * xb + b2
writer.start(
'linearGradient',
id="GR%x_%d" % (self._n_gradients, i),
x1=six.text_type(x1), y1=six.text_type(y1),
x2=six.text_type(xb), y2=six.text_type(yb))
writer.element(
'stop',
offset='0',
style=generate_css({'stop-color': rgb2hex(c),
'stop-opacity': six.text_type(c[-1])}))
writer.element(
'stop',
offset='1',
style=generate_css({'stop-color': rgb2hex(c),
'stop-opacity': "0"}))
writer.end('linearGradient')
writer.element(
'polygon',
id='GT%x' % self._n_gradients,
points=" ".join([six.text_type(x)
for x in (x1, y1, x2, y2, x3, y3)]))
writer.end('defs')
avg_color = np.sum(colors[:, :], axis=0) / 3.0
href = '#GT%x' % self._n_gradients
writer.element(
'use',
attrib={'xlink:href': href,
'fill': rgb2hex(avg_color),
'fill-opacity': str(avg_color[-1])})
for i in range(3):
writer.element(
'use',
attrib={'xlink:href': href,
'fill': 'url(#GR%x_%d)' % (self._n_gradients, i),
'fill-opacity': '1',
'filter': 'url(#colorAdd)'})
self._n_gradients += 1
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
attrib['clip-path'] = 'url(#%s)' % clipid
self.writer.start('g', attrib=attrib)
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
self.writer.end('g')
def option_scale_image(self):
return True
def get_image_magnification(self):
return self.image_dpi / 72.0
def draw_image(self, gc, x, y, im, dx=None, dy=None, transform=None):
attrib = {}
clipid = self._get_clip(gc)
if clipid is not None:
# Can't apply clip-path directly to the image because the
# image has a transformation, which would also be applied
# to the clip-path
self.writer.start('g', attrib={'clip-path': 'url(#%s)' % clipid})
trans = [1,0,0,1,0,0]
if rcParams['svg.image_noscale']:
trans = list(im.get_matrix())
trans[5] = -trans[5]
attrib['transform'] = generate_transform([('matrix', tuple(trans))])
assert trans[1] == 0
assert trans[2] == 0
numrows, numcols = im.get_size()
im.reset_matrix()
im.set_interpolation(0)
im.resize(numcols, numrows)
h,w = im.get_size_out()
if dx is None:
w = 72.0*w/self.image_dpi
else:
w = dx
if dy is None:
h = 72.0*h/self.image_dpi
else:
h = dy
oid = getattr(im, '_gid', None)
url = getattr(im, '_url', None)
if url is not None:
self.writer.start('a', attrib={'xlink:href': url})
if rcParams['svg.image_inline']:
bytesio = io.BytesIO()
im.flipud_out()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, bytesio)
im.flipud_out()
oid = oid or self._make_id('image', bytesio)
attrib['xlink:href'] = (
"data:image/png;base64,\n" +
base64.b64encode(bytesio.getvalue()).decode('ascii'))
else:
self._imaged[self.basename] = self._imaged.get(self.basename,0) + 1
filename = '%s.image%d.png'%(self.basename, self._imaged[self.basename])
verbose.report( 'Writing image file for inclusion: %s' % filename)
im.flipud_out()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, filename)
im.flipud_out()
oid = oid or 'Im_' + self._make_id('image', filename)
attrib['xlink:href'] = filename
alpha = gc.get_alpha()
if alpha != 1.0:
attrib['opacity'] = str(alpha)
attrib['id'] = oid
if transform is None:
self.writer.element(
'image',
x=six.text_type(x/trans[0]),
y=six.text_type((self.height-y)/trans[3]-h),
width=six.text_type(w), height=six.text_type(h),
attrib=attrib)
else:
flipped = self._make_flip_transform(transform)
flipped = np.array(flipped.to_values())
y = y+dy
if dy > 0.0:
flipped[3] *= -1.0
y *= -1.0
attrib['transform'] = generate_transform(
[('matrix', flipped)])
self.writer.element(
'image',
x=six.text_type(x), y=six.text_type(y),
width=six.text_type(dx), height=six.text_type(abs(dy)),
attrib=attrib)
if url is not None:
self.writer.end('a')
if clipid is not None:
self.writer.end('g')
def _adjust_char_id(self, char_id):
return char_id.replace("%20", "_")
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath, mtext=None):
"""
draw the text by converting them to paths using textpath module.
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
writer = self.writer
writer.comment(s)
glyph_map=self._glyph_map
text2path = self._text2path
color = rgb2hex(gc.get_rgb())
fontsize = prop.get_size_in_points()
style = {}
if color != '#000000':
style['fill'] = color
if gc.get_alpha() != 1.0:
style['opacity'] = six.text_type(gc.get_alpha())
if not ismath:
font = text2path._get_font(prop)
_glyphs = text2path.get_glyphs_with_font(
font, s, glyph_map=glyph_map, return_new_glyphs_only=True)
glyph_info, glyph_map_new, rects = _glyphs
if glyph_map_new:
writer.start('defs')
for char_id, glyph_path in six.iteritems(glyph_map_new):
path = Path(*glyph_path)
path_data = self._convert_path(path, simplify=False)
writer.element('path', id=char_id, d=path_data)
writer.end('defs')
glyph_map.update(glyph_map_new)
attrib = {}
attrib['style'] = generate_css(style)
font_scale = fontsize / text2path.FONT_SCALE
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,)),
('scale', (font_scale, -font_scale))])
writer.start('g', attrib=attrib)
for glyph_id, xposition, yposition, scale in glyph_info:
attrib={'xlink:href': '#%s' % glyph_id}
if xposition != 0.0:
attrib['x'] = six.text_type(xposition)
if yposition != 0.0:
attrib['y'] = six.text_type(yposition)
writer.element(
'use',
attrib=attrib)
writer.end('g')
else:
if ismath == "TeX":
_glyphs = text2path.get_glyphs_tex(prop, s, glyph_map=glyph_map,
return_new_glyphs_only=True)
else:
_glyphs = text2path.get_glyphs_mathtext(prop, s, glyph_map=glyph_map,
return_new_glyphs_only=True)
glyph_info, glyph_map_new, rects = _glyphs
# we store the character glyphs w/o flipping. Instead, the
# coordinate will be flipped when this characters are
# used.
if glyph_map_new:
writer.start('defs')
for char_id, glyph_path in six.iteritems(glyph_map_new):
char_id = self._adjust_char_id(char_id)
# Some characters are blank
if not len(glyph_path[0]):
path_data = ""
else:
path = Path(*glyph_path)
path_data = self._convert_path(path, simplify=False)
writer.element('path', id=char_id, d=path_data)
writer.end('defs')
glyph_map.update(glyph_map_new)
attrib = {}
font_scale = fontsize / text2path.FONT_SCALE
attrib['style'] = generate_css(style)
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,)),
('scale', (font_scale, -font_scale))])
writer.start('g', attrib=attrib)
for char_id, xposition, yposition, scale in glyph_info:
char_id = self._adjust_char_id(char_id)
writer.element(
'use',
transform=generate_transform([
('translate', (xposition, yposition)),
('scale', (scale,)),
]),
attrib={'xlink:href': '#%s' % char_id})
for verts, codes in rects:
path = Path(verts, codes)
path_data = self._convert_path(path, simplify=False)
writer.element('path', d=path_data)
writer.end('g')
def _draw_text_as_text(self, gc, x, y, s, prop, angle, ismath, mtext=None):
writer = self.writer
color = rgb2hex(gc.get_rgb())
style = {}
if color != '#000000':
style['fill'] = color
if gc.get_alpha() != 1.0:
style['opacity'] = six.text_type(gc.get_alpha())
if not ismath:
font = self._get_font(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
fontsize = prop.get_size_in_points()
fontfamily = font.family_name
fontstyle = prop.get_style()
attrib = {}
# Must add "px" to workaround a Firefox bug
style['font-size'] = six.text_type(fontsize) + 'px'
style['font-family'] = six.text_type(fontfamily)
style['font-style'] = prop.get_style().lower()
attrib['style'] = generate_css(style)
if mtext and (angle == 0 or mtext.get_rotation_mode() == "anchor"):
# If text anchoring can be supported, get the original
# coordinates and add alignment information.
# Get anchor coordinates.
transform = mtext.get_transform()
ax, ay = transform.transform_point(mtext.get_position())
ay = self.height - ay
# Don't do vertical anchor alignment. Most applications do not
# support 'alignment-baseline' yet. Apply the vertical layout
# to the anchor point manually for now.
angle_rad = angle * np.pi / 180.
dir_vert = np.array([np.sin(angle_rad), np.cos(angle_rad)])
v_offset = np.dot(dir_vert, [(x - ax), (y - ay)])
ax = ax + v_offset * dir_vert[0]
ay = ay + v_offset * dir_vert[1]
ha_mpl_to_svg = {'left': 'start', 'right': 'end',
'center': 'middle'}
style['text-anchor'] = ha_mpl_to_svg[mtext.get_ha()]
attrib['x'] = str(ax)
attrib['y'] = str(ay)
attrib['style'] = generate_css(style)
attrib['transform'] = "rotate(%f, %f, %f)" % (-angle, ax, ay)
writer.element('text', s, attrib=attrib)
else:
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,))])
writer.element('text', s, attrib=attrib)
if rcParams['svg.fonttype'] == 'svgfont':
fontset = self._fonts.setdefault(font.fname, set())
for c in s:
fontset.add(ord(c))
else:
writer.comment(s)
width, height, descent, svg_elements, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
svg_glyphs = svg_elements.svg_glyphs
svg_rects = svg_elements.svg_rects
attrib = {}
attrib['style'] = generate_css(style)
attrib['transform'] = generate_transform([
('translate', (x, y)),
('rotate', (-angle,))])
# Apply attributes to 'g', not 'text', because we likely
# have some rectangles as well with the same style and
# transformation
writer.start('g', attrib=attrib)
writer.start('text')
# Sort the characters by font, and output one tspan for
# each
spans = {}
for font, fontsize, thetext, new_x, new_y, metrics in svg_glyphs:
style = generate_css({
'font-size': six.text_type(fontsize) + 'px',
'font-family': font.family_name,
'font-style': font.style_name.lower()})
if thetext == 32:
thetext = 0xa0 # non-breaking space
spans.setdefault(style, []).append((new_x, -new_y, thetext))
if rcParams['svg.fonttype'] == 'svgfont':
for font, fontsize, thetext, new_x, new_y, metrics in svg_glyphs:
fontset = self._fonts.setdefault(font.fname, set())
fontset.add(thetext)
for style, chars in list(six.iteritems(spans)):
chars.sort()
same_y = True
if len(chars) > 1:
last_y = chars[0][1]
for i in xrange(1, len(chars)):
if chars[i][1] != last_y:
same_y = False
break
if same_y:
ys = six.text_type(chars[0][1])
else:
ys = ' '.join(six.text_type(c[1]) for c in chars)
attrib = {
'style': style,
'x': ' '.join(six.text_type(c[0]) for c in chars),
'y': ys
}
writer.element(
'tspan',
''.join(unichr(c[2]) for c in chars),
attrib=attrib)
writer.end('text')
if len(svg_rects):
for x, y, width, height in svg_rects:
writer.element(
'rect',
x=six.text_type(x), y=six.text_type(-y + height),
width=six.text_type(width), height=six.text_type(height)
)
writer.end('g')
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
clipid = self._get_clip(gc)
if clipid is not None:
# Cannot apply clip-path directly to the text, because
# is has a transformation
self.writer.start(
'g', attrib={'clip-path': 'url(#%s)' % clipid})
if gc.get_url() is not None:
self.writer.start('a', {'xlink:href': gc.get_url()})
if rcParams['svg.fonttype'] == 'path':
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath, mtext)
else:
self._draw_text_as_text(gc, x, y, s, prop, angle, ismath, mtext)
if gc.get_url() is not None:
self.writer.end('a')
if clipid is not None:
self.writer.end('g')
def flipy(self):
return True
def get_canvas_width_height(self):
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
return self._text2path.get_text_width_height_descent(s, prop, ismath)
class FigureCanvasSVG(FigureCanvasBase):
filetypes = {'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'}
fixed_dpi = 72
def print_svg(self, filename, *args, **kwargs):
if is_string_like(filename):
fh_to_close = svgwriter = io.open(filename, 'w', encoding='utf-8')
elif is_writable_file_like(filename):
if not isinstance(filename, io.TextIOBase):
if six.PY3:
svgwriter = io.TextIOWrapper(filename, 'utf-8')
else:
svgwriter = codecs.getwriter('utf-8')(filename)
else:
svgwriter = filename
fh_to_close = None
else:
raise ValueError("filename must be a path or a file-like object")
return self._print_svg(filename, svgwriter, fh_to_close, **kwargs)
def print_svgz(self, filename, *args, **kwargs):
if is_string_like(filename):
fh_to_close = gzipwriter = gzip.GzipFile(filename, 'w')
svgwriter = io.TextIOWrapper(gzipwriter, 'utf-8')
elif is_writable_file_like(filename):
fh_to_close = gzipwriter = gzip.GzipFile(fileobj=filename, mode='w')
svgwriter = io.TextIOWrapper(gzipwriter, 'utf-8')
else:
raise ValueError("filename must be a path or a file-like object")
return self._print_svg(filename, svgwriter, fh_to_close)
def _print_svg(self, filename, svgwriter, fh_to_close=None, **kwargs):
try:
image_dpi = kwargs.pop("dpi", 72)
self.figure.set_dpi(72.0)
width, height = self.figure.get_size_inches()
w, h = width*72, height*72
if rcParams['svg.image_noscale']:
renderer = RendererSVG(w, h, svgwriter, filename, image_dpi)
else:
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(self.figure,
width, height, image_dpi, RendererSVG(w, h, svgwriter, filename, image_dpi),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
finally:
if fh_to_close is not None:
svgwriter.close()
def get_default_filetype(self):
return 'svg'
class FigureManagerSVG(FigureManagerBase):
pass
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasSVG(figure)
manager = FigureManagerSVG(canvas, num)
return manager
svgProlog = """\
<?xml version="1.0" encoding="utf-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Created with matplotlib (http://matplotlib.org/) -->
"""
FigureCanvas = FigureCanvasSVG
FigureManager = FigureManagerSVG
| mit |
MSeifert04/astropy | astropy/time/tests/test_basic.py | 1 | 81893 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import copy
import functools
import datetime
from copy import deepcopy
from decimal import Decimal, localcontext
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import catch_warnings, pytest
from astropy.utils.exceptions import AstropyDeprecationWarning, ErfaWarning
from astropy.utils import isiterable, iers
from astropy.time import (Time, TimeDelta, ScaleValueError, STANDARD_TIME_SCALES,
TimeString, TimezoneInfo)
from astropy.coordinates import EarthLocation
from astropy import units as u
from astropy import _erfa as erfa
from astropy.table import Column, Table
try:
import pytz
HAS_PYTZ = True
except ImportError:
HAS_PYTZ = False
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps * 24 * 3600)
allclose_year = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=0.) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic:
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00']
t = Time(times, format='iso', scale='utc')
assert (repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5+1.4288980208333335e-06,
-0.50000000e+00]))
# Set scale to TAI
t = t.tai
assert (repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5+0.00037179926839122024,
-0.5+0.00039351851851851852]))
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>")
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(t.cxcsec, np.array([31536064.307456788, 378691266.18400002]))
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format='jd')
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000., 2450010.)
t2 = Time(val, format='jd')
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.
t3 = Time(val, val2, format='jd')
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.)/10.).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format='jd')
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize('format_', Time.FORMATS)
def test_empty_value(self, format_):
t = Time([], format=format_)
assert t.size == 0
assert t.shape == (0,)
assert t.format == format_
t_value = t.value
assert t_value.size == 0
assert t_value.shape == (0,)
t2 = Time(t_value, format=format_)
assert t2.size == 0
assert t2.shape == (0,)
assert t2.format == format_
t3 = t2.tai
assert t3.size == 0
assert t3.shape == (0,)
assert t3.format == format_
assert t3.scale == 'tai'
@pytest.mark.parametrize('value', [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format='jd', scale='utc')
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format='iso', scale='tai', precision=1)
assert t2.value == '2010-01-01 00:00:34.0'
t2 = Time(t, format='iso', scale='tai', out_subfmt='date')
assert t2.value == '2010-01-01'
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format='mjd', scale='utc', location=('45d', '50d'))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format='mjd', scale='utc')
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.arange(len(mjd)), np.arange(len(mjd))))
t5 = t4[3]
assert t5.location == t4.location[3]
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0., 0., 0.), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0., 0.999, 0.2)
t7 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=('45d', '50d'))
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == '2010-01-01 00:00:00.000'
assert t.tt.iso == '2010-01-01 00:01:06.184'
assert t.tai.fits == '2010-01-01T00:00:34.000'
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == '2010-01-01T00:01:06.910'
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
# Uses initial class-defined precision=3
assert t.iso == '2010-01-01 00:00:00.000'
# Set instance precision to 9
t.precision = 9
assert t.iso == '2010-01-01 00:00:00.000000000'
assert t.tai.utc.iso == '2010-01-01 00:00:00.000000000'
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(lon, lat))
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == '2006-01-15 21:24:37.500000'
assert t.ut1.iso == '2006-01-15 21:24:37.834100'
assert t.tai.iso == '2006-01-15 21:25:10.500000'
assert t.tt.iso == '2006-01-15 21:25:42.684000'
assert t.tcg.iso == '2006-01-15 21:25:43.322690'
assert t.tdb.iso == '2006-01-15 21:25:42.684373'
assert t.tcb.iso == '2006-01-15 21:25:56.893952'
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(lon, lat))
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=location)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(location.x, location.y, location.z))
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5']*2, format='iso', scale='utc',
precision=6, location=(lon, lat))
assert np.all(t.utc.iso == '2006-01-15 21:24:37.500000')
assert np.all(t.tdb.iso[0] == '2006-01-15 21:25:42.684373')
t2 = Time(['2006-01-15 21:24:37.5']*2, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
assert np.all(t2.utc.iso == '2006-01-15 21:24:37.500000')
assert t2.tdb.iso[0] == '2006-01-15 21:25:42.684373'
assert t2.tdb.iso[1] != '2006-01-15 21:25:42.684373'
with pytest.raises(ValueError): # 1 time, but two locations
Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
with pytest.raises(ValueError): # 3 times, but two locations
Time(['2006-01-15 21:24:37.5']*3, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
# multidimensional
mjd = np.arange(50000., 50008.).reshape(4, 2)
t3 = Time(mjd, format='mjd', scale='utc', location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.array([lon, 0]), np.array([lat, 0])))
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(mjd, format='mjd', scale='utc',
location=(np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]])))
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
with iers.conf.set_temp('auto_download', False):
for scale1 in STANDARD_TIME_SCALES:
t1 = Time('2006-01-15 21:24:37.5', format='iso', scale=scale1,
location=(lon, lat))
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = 'local'
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format='decimalyear')
Time(100.0, format='cxcsec')
Time(100.0, format='unix')
Time(100.0, format='gps')
Time(1950.0, format='byear', scale='tai')
Time(2000.0, format='jyear', scale='tai')
Time('B1950.0', format='byear_str', scale='tai')
Time('J2000.0', format='jyear_str', scale='tai')
Time('2000-01-01 12:23:34.0', format='iso', scale='tai')
Time('2000-01-01 12:23:34.0Z', format='iso', scale='utc')
Time('2000-01-01T12:23:34.0', format='isot', scale='tai')
Time('2000-01-01T12:23:34.0Z', format='isot', scale='utc')
Time('2000-01-01T12:23:34.0', format='fits')
Time('2000-01-01T12:23:34.0', format='fits', scale='tdb')
Time(2400000.5, 51544.0333981, format='jd', scale='tai')
Time(0.0, 51544.0333981, format='mjd', scale='tai')
Time('2000:001:12:23:34.0', format='yday', scale='tai')
Time('2000:001:12:23:34.0Z', format='yday', scale='utc')
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format='datetime', scale='tai')
Time([dt, dt], format='datetime', scale='tai')
dt64 = np.datetime64('2012-06-18T02:00:05.453000000', format='datetime64')
Time(dt64, format='datetime64', scale='tai')
Time([dt64, dt64], format='datetime64', scale='tai')
def test_local_format_transforms(self):
"""
Test trasformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time('2006-01-15 21:24:37.5', scale='local')
assert_allclose(t.jd, 2453751.3921006946, atol=0.001/3600./24., rtol=0.)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001/3600./24., rtol=0.)
assert_allclose(t.decimalyear, 2006.0408002758752, atol=0.001/3600./24./365., rtol=0.)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == '2006-01-15T21:24:37.500'
assert t.yday == '2006:015:21:24:37.500'
assert t.fits == '2006-01-15T21:24:37.500'
assert_allclose(t.byear, 2006.04217888831, atol=0.001/3600./24./365., rtol=0.)
assert_allclose(t.jyear, 2006.0407723496082, atol=0.001/3600./24./365., rtol=0.)
assert t.byear_str == 'B2006.042'
assert t.jyear_str == 'J2006.041'
# epochTimeFormats
with pytest.raises(ScaleValueError):
t2 = t.gps
with pytest.raises(ScaleValueError):
t2 = t.unix
with pytest.raises(ScaleValueError):
t2 = t.cxcsec
with pytest.raises(ScaleValueError):
t2 = t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456000'
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale='utc')
assert t2.datetime == dt
t = Time([dt, dt2], scale='utc')
assert np.all(t.value == [dt, dt2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2-dt)*np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_datetime64(self):
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
dt64_2 = np.datetime64('2000-01-02')
t = Time(dt64, scale='utc', precision=9, format='datetime64')
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64
t = Time(dt64_2, scale='utc', precision=3, format='datetime64')
assert t.iso == '2000-01-02 00:00:00.000'
assert t.datetime64 == dt64_2
assert t.value == dt64_2
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64_2
t = Time([dt64, dt64_2], scale='utc', format='datetime64')
assert np.all(t.value == [dt64, dt64_2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime64 == np.datetime64('2000-01-01T01:01:01.123456789')
# broadcasting
dt3 = (dt64 + (dt64_2-dt64)*np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc', format='datetime64')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1], format='datetime64')
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2], format='datetime64'))
assert Time(t3[2, 0], format='datetime64') == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format='jd', scale='tai', precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == 'B2015.136594'
assert t.jyear_str == 'J2015.134993'
t2 = Time(t.byear, format='byear', scale='tai')
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format='jyear', scale='tai')
assert allclose_jd(t2.jd, jd)
t = Time('J2015.134993', scale='tai', precision=6)
assert np.allclose(t.jd, jd, rtol=1e-10, atol=0) # J2015.134993 has 10 digit precision
assert t.byear_str == 'B2015.136594'
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format='iso', scale='utc')
with pytest.raises(ValueError):
Time('2000:001', format='jd', scale='utc')
with pytest.raises(ValueError): # unguessable
Time([])
with pytest.raises(ValueError):
Time([50000.0], ['bad'], format='mjd', scale='tai')
with pytest.raises(ValueError):
Time(50000.0, 'bad', format='mjd', scale='tai')
with pytest.raises(ValueError):
Time('2005-08-04T00:01:02.000Z', scale='tai')
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format='jd', scale='utc')
with pytest.raises(ValueError):
with pytest.warns(AstropyDeprecationWarning):
Time('2000-01-02T03:04:05(TAI)', scale='utc')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(TAI')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(UT(NIST)')
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = f'{year:04d}-{month:02d}'
yyyy_mm_dd = f'{year:04d}-{month:02d}-{day:02d}'
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm + '-01 23:59:60.0', scale='utc')
assert t1.iso == yyyy_mm + '-02 00:00:00.000'
# Leap second is different
t1 = Time(yyyy_mm_dd + ' 23:59:59.900', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:59.900'
t1 = Time(yyyy_mm_dd + ' 23:59:60.000', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.000'
t1 = Time(yyyy_mm_dd + ' 23:59:60.999', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.999'
if month == 6:
yyyy_mm_dd_plus1 = f'{year:04d}-07-01'
else:
yyyy_mm_dd_plus1 = '{:04d}-01-01'.format(year+1)
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm_dd + ' 23:59:61.0', scale='utc')
assert t1.iso == yyyy_mm_dd_plus1 + ' 00:00:00.000'
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + ' 23:59:59', scale='utc')
t1 = Time(yyyy_mm_dd_plus1 + ' 00:00:00', scale='utc')
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time('2007:001', scale='tai')
t2 = Time(['2007-01-02', '2007-01-03'], scale='utc')
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale='utc')
assert t3.scale == 'utc'
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale='tt')
assert t3.scale == 'tt'
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000., 50006.)
frac = np.arange(0., 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc')
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
t6 = Time(t1, scale='local')
class TestVal2:
"""Tests related to val2"""
@pytest.mark.parametrize("d", [
dict(val="2001:001", val2="ignored", scale="utc"),
dict(val={'year': 2015, 'month': 2, 'day': 3,
'hour': 12, 'minute': 13, 'second': 14.567},
val2="ignored", scale="utc"),
dict(val=np.datetime64('2005-02-25'), val2="ignored", scale="utc"),
dict(val=datetime.datetime(2000, 1, 2, 12, 0, 0),
val2="ignored", scale="utc"),
])
def test_unused_val2_raises(self, d):
"""Test that providing val2 is for string input lets user know we won't use it"""
with pytest.raises(ValueError):
Time(**d)
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format='mjd', scale='tai')
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000., 50007.)
frac = np.arange(0., 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format='mjd', scale='utc')
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format='mjd', scale='tai')
def test_broadcast_not_writable(self):
val = (2458000 + np.arange(3))[:, None]
val2 = np.linspace(0, 1, 4, endpoint=False)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val+0*val2, val2=0*val+val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1, 2] = t_i
t[1, 2] = t_i
assert t_b[1, 2] == t[1, 2], "writing worked"
assert t_b[0, 2] == t[0, 2], "broadcasting didn't cause problems"
assert t_b[1, 1] == t[1, 1], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
def test_broadcast_one_not_writable(self):
val = (2458000 + np.arange(3))
val2 = np.arange(1)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val+0*val2, val2=0*val+val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1] = t_i
t[1] = t_i
assert t_b[1] == t[1], "writing worked"
assert t_b[0] == t[0], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
class TestSubFormat:
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
assert np.all(t.iso == np.array(['2000-01-01 00:00:00.000',
'2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
# Heterogeneous input formats with in_subfmt='date_*'
times = ['2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
in_subfmt='date_*')
assert np.all(t.iso == np.array(['2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='date')
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='doesnt exist')
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
out_subfmt='date_hm')
assert np.all(t.iso == np.array(['2000-01-01 00:00',
'2000-01-01 01:01',
'2000-01-01 01:01',
'2000-01-01 01:01']))
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01T01:01:01', '2000-01-01T01:01:01.123']
t = Time(times, format='fits', scale='tai')
assert np.all(t.fits == np.array(['2000-01-01T00:00:00.000',
'2000-01-01T01:01:01.000',
'2000-01-01T01:01:01.123']))
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format='fits', out_subfmt='long*')
assert np.all(t2.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+02000-01-01T01:01:01.123']))
# Implicit long format for output, because of negative year.
times[2] = '-00594-01-01'
t3 = Time(times, format='fits', scale='tai')
assert np.all(t3.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'-00594-01-01T00:00:00.000']))
# Implicit long format for output, because of large positive year.
times[2] = '+10594-01-01'
t4 = Time(times, format='fits', scale='tai')
assert np.all(t4.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+10594-01-01T00:00:00.000']))
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-12-01', '2001-12-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
t.out_subfmt = 'date_hm'
assert np.all(t.yday == np.array(['2000:336:00:00',
'2001:335:01:01']))
t.out_subfmt = '*'
assert np.all(t.yday == np.array(['2000:336:00:00:00.000',
'2001:335:01:01:01.123']))
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format='cxcsec', scale='utc')
assert t.scale == 'utc'
t = Time(100.0, format='unix', scale='tai')
assert t.scale == 'tai'
t = Time(100.0, format='gps', scale='utc')
assert t.scale == 'utc'
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format='byear', scale='bad scale')
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time('2000:001:00:00:00', scale='bad scale')
def test_fits_scale(self):
"""Test that the previous FITS-string formatting can still be handled
but with a DeprecationWarning."""
for inputs in (("2000-01-02(TAI)", "tai"),
("1999-01-01T00:00:00.123(ET(NIST))", "tt"),
("2014-12-12T01:00:44.1(UTC)", "utc")):
with catch_warnings(AstropyDeprecationWarning):
t = Time(inputs[0])
assert t.scale == inputs[1]
# Create Time using normal ISOT syntax and compare with FITS
t2 = Time(inputs[0][:inputs[0].index("(")], format="isot",
scale=inputs[1])
assert t == t2
# Explicit check that conversions still work despite warning
with catch_warnings(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:00.123456789(UTC)')
t = t.tai
assert t.isot == '1999-01-01T00:00:32.123'
with catch_warnings(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)')
t = t.utc
assert t.isot == '1999-01-01T00:00:00.123'
# Check scale consistency
with catch_warnings(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="tai")
assert t.scale == "tai"
with catch_warnings(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(ET)', scale="tt")
assert t.scale == "tt"
with pytest.raises(ValueError):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="utc")
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format='cxcsec')
assert t.scale == 'tt'
t = Time(100.0, format='unix')
assert t.scale == 'utc'
t = Time(100.0, format='gps')
assert t.scale == 'tai'
for date in ('2000:001', '2000-01-01T00:00:00'):
t = Time(date)
assert t.scale == 'utc'
t = Time(2000.1, format='byear')
assert t.scale == 'tt'
t = Time('J2000')
assert t.scale == 'tt'
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format='cxcsec', scale='tai')
assert t.tt.iso == '1998-01-01 00:00:00.000'
# Create new time object from this one and change scale, format
t2 = Time(t, scale='tt', format='iso')
assert t2.value == '1998-01-01 00:00:00.000'
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format='cxcsec', scale='utc')
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == '2010:001:00:00:00.000'
t = Time('2010:001:00:00:00.000', scale='utc')
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Value from:
# d = datetime.datetime(2000, 1, 1)
# matplotlib.pylab.dates.date2num(d)
t = Time('2000-01-01 00:00:00', scale='utc')
assert np.allclose(t.plot_date, 730120.0, atol=1e-5, rtol=0)
# Round trip through epoch time
for scale in ('utc', 'tt'):
t = Time('2000:001', scale=scale)
t2 = Time(t.unix, scale=scale, format='unix')
assert getattr(t2, scale).iso == '2000-01-01 00:00:00.000'
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time('2013-05-20 21:18:46', scale='utc')
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time('2004-09-16T23:59:59', scale='utc')
assert allclose_sec(t.unix, 1095379199.0)
class TestNumericalSubFormat:
def test_explicit_example(self):
t = Time('54321.000000000001', format='mjd')
assert t == Time(54321, 1e-12, format='mjd')
assert t.mjd == 54321. # Lost precision!
assert t.value == 54321. # Lost precision!
assert t.to_value('mjd') == 54321. # Lost precision!
assert t.to_value('mjd', subfmt='str') == '54321.000000000001'
assert t.to_value('mjd', 'bytes') == b'54321.000000000001'
expected_long = np.longdouble(54321.) + np.longdouble(1e-12)
# Check we're the same to within the double holding jd2
# (which is less precise than longdouble on arm64).
assert np.allclose(t.to_value('mjd', subfmt='long'),
expected_long, rtol=0, atol=np.finfo(float).eps)
t.out_subfmt = 'str'
assert t.value == '54321.000000000001'
assert t.to_value('mjd') == 54321. # Lost precision!
assert t.mjd == '54321.000000000001'
assert t.to_value('mjd', subfmt='bytes') == b'54321.000000000001'
assert t.to_value('mjd', subfmt='float') == 54321. # Lost precision!
t.out_subfmt = 'long'
assert np.allclose(t.value, expected_long,
rtol=0., atol=np.finfo(float).eps)
assert np.allclose(t.to_value('mjd', subfmt=None), expected_long,
rtol=0., atol=np.finfo(float).eps)
assert np.allclose(t.mjd, expected_long,
rtol=0., atol=np.finfo(float).eps)
assert t.to_value('mjd', subfmt='str') == '54321.000000000001'
assert t.to_value('mjd', subfmt='float') == 54321. # Lost precision!
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
def test_explicit_longdouble(self):
i = 54321
# Create a different long double (which will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
f = max(2.**(-np.finfo(np.longdouble).nmant) * 65536,
np.finfo(float).eps)
mjd_long = np.longdouble(i) + np.longdouble(f)
assert mjd_long != i, "longdouble failure!"
t = Time(mjd_long, format='mjd')
expected = Time(i, f, format='mjd')
assert abs(t - expected) <= 20.*u.ps
t_float = Time(i+f, format='mjd')
assert t_float == Time(i, format='mjd')
assert t_float != t
assert t.value == 54321. # Lost precision!
assert np.allclose(t.to_value('mjd', subfmt='long'), mjd_long,
rtol=0., atol=np.finfo(float).eps)
t2 = Time(mjd_long, format='mjd', out_subfmt='long')
assert np.allclose(t2.value, mjd_long,
rtol=0., atol=np.finfo(float).eps)
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
@pytest.mark.parametrize("fmt", ["mjd", "unix", "cxcsec"])
def test_longdouble_for_other_types(self, fmt):
t_fmt = getattr(Time(58000, format="mjd"), fmt) # Get regular float
t_fmt_long = np.longdouble(t_fmt)
# Create a different long double (ensuring it will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
atol = np.finfo(float).eps * (1. if fmt == 'mjd' else 24.*3600.)
t_fmt_long2 = t_fmt_long + max(
t_fmt_long * np.finfo(np.longdouble).eps * 2, atol)
assert t_fmt_long != t_fmt_long2, "longdouble weird!"
tm = Time(t_fmt_long, format=fmt)
tm2 = Time(t_fmt_long2, format=fmt)
assert tm != tm2
tm_long2 = tm2.to_value(fmt, subfmt='long')
assert np.allclose(tm_long2, t_fmt_long2, rtol=0., atol=atol)
def test_subformat_input(self):
s = '54321.01234567890123456789'
i, f = s.split('.') # Note, OK only for fraction < 0.5
t = Time(float(i), float('.'+f), format='mjd')
t_str = Time(s, format='mjd')
t_bytes = Time(s.encode('ascii'), format='mjd')
t_decimal = Time(Decimal(s), format='mjd')
assert t_str == t
assert t_bytes == t
assert t_decimal == t
@pytest.mark.parametrize('out_subfmt', ('str', 'bytes'))
def test_subformat_output(self, out_subfmt):
i = 54321
f = np.array([0., 1e-9, 1e-12])
t = Time(i, f, format='mjd', out_subfmt=out_subfmt)
t_value = t.value
expected = np.array(['54321.0',
'54321.000000001',
'54321.000000000001'], dtype=out_subfmt)
assert np.all(t_value == expected)
assert np.all(Time(expected, format='mjd') == t)
# Explicit sub-format.
t = Time(i, f, format='mjd')
t_mjd_subfmt = t.to_value('mjd', subfmt=out_subfmt)
assert np.all(t_mjd_subfmt == expected)
@pytest.mark.parametrize('fmt,string,val1,val2', [
('jd', '2451544.5333981', 2451544.5, .0333981),
('decimalyear', '2000.54321', 2000., .54321),
('cxcsec', '100.0123456', 100.0123456, None),
('unix', '100.0123456', 100.0123456, None),
('gps', '100.0123456', 100.0123456, None),
('byear', '1950.1', 1950.1, None),
('jyear', '2000.1', 2000.1, None)])
def test_explicit_string_other_formats(self, fmt, string, val1, val2):
t = Time(string, format=fmt)
assert t == Time(val1, val2, format=fmt)
assert t.to_value(fmt, subfmt='str') == string
def test_basic_subformat_setting(self):
t = Time('2001', format='jyear', scale='tai')
t.format = "mjd"
t.out_subfmt = "str"
assert t.value.startswith("5")
def test_basic_subformat_cache_does_not_crash(self):
t = Time('2001', format='jyear', scale='tai')
t.to_value('mjd', subfmt='str')
assert ('mjd', 'str') in t.cache['format']
t.to_value('mjd', 'str')
@pytest.mark.parametrize("fmt", ["jd", "mjd", "cxcsec", "unix", "gps", "jyear"])
def test_decimal_context_does_not_affect_string(self, fmt):
t = Time('2001', format='jyear', scale='tai')
t.format = fmt
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value(fmt, "str")
t2 = Time('2001', format='jyear', scale='tai')
t2.format = fmt
with localcontext() as ctx:
ctx.prec = 40
t2_s_40 = t.to_value(fmt, "str")
assert t_s_2 == t2_s_40, "String representation should not depend on Decimal context"
def test_decimal_context_caching(self):
t = Time(val=58000, val2=1e-14, format='mjd', scale='tai')
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value('mjd', subfmt='decimal')
t2 = Time(val=58000, val2=1e-14, format='mjd', scale='tai')
with localcontext() as ctx:
ctx.prec = 40
t_s_40 = t.to_value('mjd', subfmt='decimal')
t2_s_40 = t2.to_value('mjd', subfmt='decimal')
assert t_s_2 == t_s_40, "Should be the same but cache might make this automatic"
assert t_s_2 == t2_s_40, "Different precision should produce the same results"
@pytest.mark.parametrize("f, s, t", [("sec", "long", np.longdouble),
("sec", "decimal", Decimal),
("sec", "str", str)])
def test_timedelta_basic(self, f, s, t):
dt = (Time("58000", format="mjd", scale="tai")
- Time("58001", format="mjd", scale="tai"))
value = dt.to_value(f, s)
assert isinstance(value, t)
dt.format = f
dt.out_subfmt = s
assert isinstance(dt.value, t)
assert isinstance(dt.to_value(f, None), t)
def test_need_format_argument(self):
t = Time('J2000')
with pytest.raises(TypeError, match="missing.*required.*'format'"):
t.to_value()
with pytest.raises(ValueError, match='format must be one of'):
t.to_value('julian')
def test_wrong_in_subfmt(self):
with pytest.raises(ValueError, match='not among selected'):
Time("58000", format='mjd', in_subfmt='float')
with pytest.raises(ValueError, match='not among selected'):
Time(np.longdouble(58000), format='mjd', in_subfmt='float')
with pytest.raises(ValueError, match='not among selected'):
Time(58000., format='mjd', in_subfmt='str')
with pytest.raises(ValueError, match='not among selected'):
Time(58000., format='mjd', in_subfmt='long')
def test_wrong_out_subfmt(self):
t = Time(58000., format='mjd')
with pytest.raises(ValueError, match='must match one'):
t.to_value('mjd', subfmt='parrot')
t.out_subfmt = 'parrot'
with pytest.raises(ValueError):
t.value
class TestSofaErrors:
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with catch_warnings() as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert 'bad day (JD computed)' in str(w[0].message)
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.])
class TestCopyReplicate:
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format='jd', scale='tai')
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format='mjd', scale='tai')
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(['2000:001'], format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time('2000:001', format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
class TestStardate:
"""Sync chronometers with Starfleet Command"""
def test_iso_to_stardate(self):
assert str(Time('2320-01-01', scale='tai').stardate)[:7] == '1368.99'
assert str(Time('2330-01-01', scale='tai').stardate)[:8] == '10552.76'
assert str(Time('2340-01-01', scale='tai').stardate)[:8] == '19734.02'
@pytest.mark.parametrize('dates',
[(10000, '2329-05-26 03:02'),
(20000, '2340-04-15 19:05'),
(30000, '2351-03-07 11:08')])
def test_stardate_to_iso(self, dates):
stardate, iso = dates
t_star = Time(stardate, format='stardate')
t_iso = Time(t_star, format='iso', out_subfmt='date_hm')
assert t_iso.value == iso
def test_python_builtin_copy():
t = Time('2000:001', format='yday', scale='tai')
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == 'datetime'
assert t.scale == 'utc'
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time('2001:001', format='yday')
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format='decimalyear')
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time('2000:001').jd
jd1 = Time('2001:001').jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd,
jd0 + 0.75 * d_jd])
def test_fits_year0():
t = Time(1721425.5, format='jd', scale='tai')
assert t.fits == '0001-01-01T00:00:00.000'
t = Time(1721425.5 - 366., format='jd', scale='tai')
assert t.fits == '+00000-01-01T00:00:00.000'
t = Time(1721425.5 - 366. - 365., format='jd', scale='tai')
assert t.fits == '-00001-01-01T00:00:00.000'
def test_fits_year10000():
t = Time(5373484.5, format='jd', scale='tai')
assert t.fits == '+10000-01-01T00:00:00.000'
t = Time(5373484.5 - 365., format='jd', scale='tai')
assert t.fits == '9999-01-01T00:00:00.000'
t = Time(5373484.5, -1./24./3600., format='jd', scale='tai')
assert t.fits == '9999-12-31T23:59:59.000'
def test_dir():
t = Time('2000:001', format='yday', scale='tai')
assert 'utc' in dir(t)
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format='mjd', scale='utc')
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert 'Time' in str(err.value)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time('1900-01-01', scale='ut1')
t.delta_ut1_utc = 0.0
with pytest.warns(ErfaWarning):
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion():
Time(Time.now().cxcsec, format='cxcsec', scale='ut1')
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype('>f8')
little_endian = mjd.astype('<f8')
time_mjd = Time(mjd, format='mjd')
time_big = Time(big_endian, format='mjd')
time_little = Time(little_endian, format='mjd')
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = 'longyear'
subfmts = (('date',
r'(?P<year>[+-]\d{5})-%m-%d', # hybrid
'{year:+06d}-{mon:02d}-{day:02d}'),)
t = Time('+02000-02-03', format='longyear')
assert t.value == '+02000-02-03'
assert t.jd == Time('2000-02-03').jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (('jd', 2451577.5),
('mjd', 51577.0),
('cxcsec', 65923264.184), # confirmed with Chandra.Time
('datetime', datetime.datetime(2000, 2, 3, 0, 0)),
('iso', '2000-02-03 00:00:00.000')):
t = Time('+02000-02-03', format='fits')
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='date_hms', precision=5)
tc = t.copy()
t.format = 'isot'
assert t.precision == 5
assert t.out_subfmt == 'date_hms'
assert t.value == '2000-02-03T00:00:00.00000'
t.format = 'fits'
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='longdate')
t.format = 'isot'
assert t.out_subfmt == '*' # longdate_hms not there, goes to default
assert t.value == '2000-02-03T00:00:00.000'
t.format = 'fits'
assert t.out_subfmt == '*'
assert t.value == '2000-02-03T00:00:00.000' # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time('2007:001', scale='tai')
with pytest.raises(ValueError) as err:
t1.replicate(format='definitely_not_a_valid_format')
assert 'format must be one of' in str(err.value)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time('2007:001', scale='tai')
assert 'astropy_time' not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format='astropy_time')
assert 'format must be one of' in str(err.value)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'],
format='iso', scale='utc')
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10*u.hour, tzname='US/Hawaii')
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
with pytest.raises(ValueError, match=r'does not support leap seconds'):
Time('2015-06-30 23:59:60.000').to_datetime()
@pytest.mark.skipif('not HAS_PYTZ')
def test_to_datetime_pytz():
tz = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time('2010-09-03 00:00:00')
t2 = Time('2010-09-03 00:00:00')
# Time starts out without a cache
assert 'cache' not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache['format']['iso'] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache['scale']['tai'] == t2.tai
# New Time object after scale transform does not have a cache yet
assert 'cache' not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert 'cache' not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert 'cache' in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [[[f'{y:04d}-{m:02d}-{d:02d}' for d in range(1, 3)]
for m in range(5, 7)] for y in range(2012, 2014)]
cutf32 = Column(times)
cbytes = cutf32.astype('S')
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(['B1950']))
tbytes = Time(Column([b'B1950']))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b'2012-01-01', b'2012-01-01T00:00:00'])
assert np.all(Time(times) == Time(['2012-01-01', '2012-01-01T00:00:00']))
def test_bytes_input():
tstring = '2011-01-02T03:04:05'
tbytes = b'2011-01-02T03:04:05'
assert tbytes.decode('ascii') == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == 'S'
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format='cxcsec')
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err.value)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err.value)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is writeable because it gets boxed into a zero-d array
t = Time('2000:001', scale='utc')
t[()] = '2000:002'
assert t.value.startswith('2000:002')
# Transformed attribute is not writeable
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = '2005:001'
assert 'Time object is read-only. Make a copy()' in str(err.value)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format='cxcsec')
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location=None'.format(loc[0])) in str(err.value)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format='cxcsec', location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location={}'.format(loc[0], loc[1])) in str(err.value)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format='cxcsec')
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location=None and '
'got location={}'.format(loc[1])) in str(err.value)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
t[0, :] = Time([-3, -4], format='cxcsec', location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format='cxcsec')
assert t.cache == {}
t.iso
assert 'iso' in t.cache['format']
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:00:02.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[3, 4]])
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:01:40.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[200, 200]])
# Array of strings in yday format
t[:, 1] = ['1998:002', '1998:003']
assert allclose_sec(t.value, [[1, 86400 * 1],
[200, 86400 * 2]])
# Incompatible numeric value
t = Time(['2000:001', '2000:002'])
t[0] = '2001:001'
with pytest.raises(ValueError) as err:
t[0] = 100
assert 'cannot convert value to a compatible Time object' in str(err.value)
def test_setitem_from_time_objects():
"""Set from existing Time object.
"""
# Set from time object with different scale
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = Time(['2000:010'], scale='tai')
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(['2000:001', '2000:002'], scale='utc')
t2.format = 'jyear'
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format='cxcsec')
with pytest.raises(IndexError):
t['asdf'] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format='cxcsec')
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, '_delta_tdb_tt')
assert not hasattr(t, '_delta_ut1_utc')
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time('1999-01-01T01:01:01')
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
def test_strftime_scalar():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strftime_array():
tstrings = ['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1995-12-31 23:59:60']
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S').tolist() == tstrings
def test_strftime_array_2():
tstrings = [['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1995-12-31 23:59:60']]
tstrings = np.array(tstrings)
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert np.all(t.strftime('%Y-%m-%d %H:%M:%S') == tstrings)
assert t.strftime('%Y-%m-%d %H:%M:%S').shape == tstrings.shape
def test_strftime_leapsecond():
time_string = '1995-12-31 23:59:60'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strptime_scalar():
"""Test of Time.strptime
"""
time_string = '2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01', '1998-Jan-01 00:00:02'],
['1998-Jan-01 00:00:03', '1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_badinput():
tstrings = [1, 2, 3]
with pytest.raises(TypeError):
Time.strptime(tstrings, '%S')
def test_strptime_input_bytes_scalar():
time_string = b'2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_input_bytes_array():
tstrings = [[b'1998-Jan-01 00:00:01', b'1998-Jan-01 00:00:02'],
[b'1998-Jan-01 00:00:03', b'1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_leapsecond():
time_obj1 = Time('1995-12-31T23:59:60', format='isot')
time_obj2 = Time.strptime('1995-Dec-31 23:59:60', '%Y-%b-%d %H:%M:%S')
assert time_obj1 == time_obj2
def test_strptime_3_digit_year():
time_obj1 = Time('0995-12-31T00:00:00', format='isot', scale='tai')
time_obj2 = Time.strptime('0995-Dec-31 00:00:00', '%Y-%b-%d %H:%M:%S',
scale='tai')
assert time_obj1 == time_obj2
def test_strptime_fracsec_scalar():
time_string = '2007-May-04 21:08:12.123'
time_object = Time('2007-05-04 21:08:12.123')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S.%f')
assert t == time_object
def test_strptime_fracsec_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01.123', '1998-Jan-01 00:00:02.000001'],
['1998-Jan-01 00:00:03.000900', '1998-Jan-01 00:00:04.123456']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01.123', '1998-01-01 00:00:02.000001'],
['1998-01-01 00:00:03.000900', '1998-01-01 00:00:04.123456']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S.%f')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strftime_scalar_fracsec():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00.123'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == time_string
def test_strftime_scalar_fracsec_precision():
time_string = '2010-09-03 06:00:00.123123123'
t = Time(time_string)
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123'
t.precision = 9
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123123123'
def test_strftime_array_fracsec():
tstrings = ['2010-09-03 00:00:00.123000', '2005-09-03 06:00:00.000001',
'1995-12-31 23:59:60.000900']
t = Time(tstrings)
t.precision = 6
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f').tolist() == tstrings
def test_insert_time():
tm = Time([1, 2], format='unix')
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, '1970-01-01 00:01:00')
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert scalar using a Time value
tm2 = tm.insert(1, Time('1970-01-01 00:01:00'))
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=1 array with a Time value
tm2 = tm.insert(1, [Time('1970-01-01 00:01:00')])
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=2 list with float values matching unix format.
# Also actually provide axis=0 unlike all other tests.
tm2 = tm.insert(1, [10, 20], axis=0)
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values matching unix format
tm2 = tm.insert(1, np.array([10, 20]))
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values at the end
tm2 = tm.insert(2, np.array([10, 20]))
assert np.all(tm2 == Time([1, 2, 10, 20], format='unix'))
# Insert length=2 np.array with float values at the beginning
# with a negative index
tm2 = tm.insert(-2, np.array([10, 20]))
assert np.all(tm2 == Time([10, 20, 1, 2], format='unix'))
def test_insert_exceptions():
tm = Time(1, format='unix')
with pytest.raises(TypeError) as err:
tm.insert(0, 50)
assert 'cannot insert into scalar' in str(err.value)
tm = Time([1, 2], format='unix')
with pytest.raises(ValueError) as err:
tm.insert(0, 50, axis=1)
assert 'axis must be 0' in str(err.value)
with pytest.raises(TypeError) as err:
tm.insert(slice(None), 50)
assert 'obj arg must be an integer' in str(err.value)
with pytest.raises(IndexError) as err:
tm.insert(-100, 50)
assert 'index -100 is out of bounds for axis 0 with size 2' in str(err.value)
def test_datetime64_no_format():
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
t = Time(dt64, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
def test_hash_time():
loc1 = EarthLocation(1 * u.m, 2 * u.m, 3 * u.m)
for loc in None, loc1:
t = Time([1, 1, 2, 3], format='cxcsec', location=loc)
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'Time' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'Time' (value is masked)"
t = Time(1, format='cxcsec', location=loc)
t2 = Time(1, format='cxcsec')
assert hash(t) != hash(t2)
t = Time('2000:180', scale='utc')
t2 = Time(t, scale='tai')
assert t == t2
assert hash(t) != hash(t2)
def test_hash_time_delta():
t = TimeDelta([1, 1, 2, 3], format='sec')
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (value is masked)"
def test_get_time_fmt_exception_messages():
with pytest.raises(ValueError) as err:
Time(10)
assert "No time format was given, and the input is" in str(err.value)
with pytest.raises(ValueError) as err:
Time('2000:001', format='not-a-format')
assert "Format 'not-a-format' is not one of the allowed" in str(err.value)
with pytest.raises(ValueError) as err:
Time('200')
assert 'Input values did not match any of the formats where' in str(err.value)
with pytest.raises(ValueError) as err:
Time('200', format='iso')
assert ('Input values did not match the format class iso:' + os.linesep +
'ValueError: Time 200 does not match iso format') == str(err.value)
with pytest.raises(ValueError) as err:
Time(200, format='iso')
assert ('Input values did not match the format class iso:' + os.linesep +
'TypeError: Input values for iso class must be strings') == str(err.value)
def test_ymdhms_defaults():
t1 = Time({'year': 2001}, format='ymdhms')
assert t1 == Time('2001-01-01')
times_dict_ns = {
'year': [2001, 2002],
'month': [2, 3],
'day': [4, 5],
'hour': [6, 7],
'minute': [8, 9],
'second': [10, 11]
}
table_ns = Table(times_dict_ns)
struct_array_ns = table_ns.as_array()
rec_array_ns = struct_array_ns.view(np.recarray)
ymdhms_names = ('year', 'month', 'day', 'hour', 'minute', 'second')
@pytest.mark.parametrize('tm_input', [table_ns, struct_array_ns, rec_array_ns])
@pytest.mark.parametrize('kwargs', [{}, {'format': 'ymdhms'}])
@pytest.mark.parametrize('as_row', [False, True])
def test_ymdhms_init_from_table_like(tm_input, kwargs, as_row):
time_ns = Time(['2001-02-04 06:08:10', '2002-03-05 07:09:11'])
if as_row:
tm_input = tm_input[0]
time_ns = time_ns[0]
tm = Time(tm_input, **kwargs)
assert np.all(tm == time_ns)
assert tm.value.dtype.names == ymdhms_names
def test_ymdhms_init_from_dict_array():
times_dict_shape = {
'year': [[2001, 2002],
[2003, 2004]],
'month': [2, 3],
'day': 4
}
time_shape = Time(
[['2001-02-04', '2002-03-04'],
['2003-02-04', '2004-03-04']]
)
time = Time(times_dict_shape, format='ymdhms')
assert np.all(time == time_shape)
assert time.ymdhms.shape == time_shape.shape
@pytest.mark.parametrize('kwargs', [{}, {'format': 'ymdhms'}])
def test_ymdhms_init_from_dict_scalar(kwargs):
"""
Test YMDHMS functionality for a dict input. This includes ensuring that
key and attribute access work. For extra fun use a time within a leap
second.
"""
time_dict = {
'year': 2016,
'month': 12,
'day': 31,
'hour': 23,
'minute': 59,
'second': 60.123456789}
tm = Time(time_dict, **kwargs)
assert tm == Time('2016-12-31T23:59:60.123456789')
for attr in time_dict:
for value in (tm.value[attr], getattr(tm.value, attr)):
if attr == 'second':
assert allclose_sec(time_dict[attr], value)
else:
assert time_dict[attr] == value
# Now test initializing from a YMDHMS format time using the object
tm_rt = Time(tm)
assert tm_rt == tm
assert tm_rt.format == 'ymdhms'
# Test initializing from a YMDHMS value (np.void, i.e. recarray row)
# without specified format.
tm_rt = Time(tm.ymdhms)
assert tm_rt == tm
assert tm_rt.format == 'ymdhms'
def test_ymdhms_exceptions():
with pytest.raises(ValueError, match='input must be dict or table-like'):
Time(10, format='ymdhms')
match = "'wrong' not allowed as YMDHMS key name(s)"
# NB: for reasons unknown, using match=match in pytest.raises() fails, so we
# fall back to old school ``match in str(err.value)``.
with pytest.raises(ValueError) as err:
Time({'year': 2019, 'wrong': 1}, format='ymdhms')
assert match in str(err.value)
match = "for 2 input key names you must supply 'year', 'month'"
with pytest.raises(ValueError, match=match):
Time({'year': 2019, 'minute': 1}, format='ymdhms')
def test_ymdhms_masked():
tm = Time({'year': [2000, 2001]}, format='ymdhms')
tm[0] = np.ma.masked
assert isinstance(tm.value[0], np.ma.core.mvoid)
for name in ymdhms_names:
assert tm.value[0][name] is np.ma.masked
# Converted from doctest in astropy/test/formats.py for debugging
def test_ymdhms_output():
t = Time({'year': 2015, 'month': 2, 'day': 3,
'hour': 12, 'minute': 13, 'second': 14.567},
scale='utc')
# NOTE: actually comes back as np.void for some reason
# NOTE: not necessarily a python int; might be an int32
assert t.ymdhms.year == 2015
# There are two stages of validation now - one on input into a format, so that
# the format conversion code has tidy matched arrays to work with, and the
# other when object construction does not go through a format object. Or at
# least, the format object is constructed with "from_jd=True". In this case the
# normal input validation does not happen but the new input validation does,
# and can ensure that strange broadcasting anomalies can't happen.
# This form of construction uses from_jd=True.
def test_broadcasting_writeable():
t = Time('J2015') + np.linspace(-1, 1, 10)*u.day
t[2] = Time(58000, format="mjd")
| bsd-3-clause |
jakob-skinner/Orbit-Fitting | fitter.py | 2 | 8223 | #import-libraries-and-data---------------------------------------------------------------------------------------#
import os, numpy as np, functions as f
from matplotlib.gridspec import GridSpec
from matplotlib import pyplot as plt, rcParams
#rcParams.update({'figure.autolayout' : True})
# Select the file.
file = 'data/2144+4211/2144+4211.tbl'
# Create the data variable.
data = np.genfromtxt(file, skip_header=1, usecols=(1, 2, 3, 4, 5))
# Extract the shorthand name.
system = file.replace('.tbl', '')[5:14]
#define-variables------------------------------------------------------------------------------------------------#
JD, RVp, RVs = [datum[0] for datum in data], [datum[1] for datum in data], [datum[3] for datum in data]
p_err, s_err = [datum[2] for datum in data], [datum[4] for datum in data]
JDp, JDs = JD, JD
period_samples = 10000
max_period = 3.32
nwalkers, nsteps= 4000, 2000 #minimum nwalker: 14, minimum nsteps determined by the convergence cutoff
cutoff = 1000
#define-functions------------------------------------------------------------------------------------------------#
periodogram, dataWindow, phases, wilson = f.periodogram, f.dataWindow, f.phases, f.wilson
adjustment, RV, residuals, MCMC, walkers = f.adjustment, f.RV, f.residuals, f.MCMC, f.walkers
corner, massLimit, coverage, transform = f.corner, f.massLimit, f.coverage, f.transform
#now-do-things!--------------------------------------------------------------------------------------------------#
#plot Wilson plot (mass ratio)
mass_ratio, intercept, standard_error = wilson(data)
fig = plt.figure(figsize=(5,5))
x, y = np.array([np.nanmin(RVs), np.nanmax(RVs)]), -mass_ratio*np.array([np.nanmin(RVs),np.nanmax(RVs)])+intercept
plt.errorbar(RVs, RVp, p_err, s_err, 'k.')
plt.plot(x, y)
#ax.set_title('Wilson plot for 2M17204248+4205070')
plt.text(0, 20, 'q = %s $\pm$ %s' %(round(mass_ratio, 3), round(standard_error, 3)))
plt.ylabel('Primary Velocity ($\\frac{km}{s}$)')#, size='15')
plt.xlabel('Secondary Velocity ($\\frac{km}{s}$)')#, size='15')
plt.title('q = %s $\pm$ %s'%(round(mass_ratio, 3), round(standard_error, 3)))
plt.savefig(file + ' mass ratio.pdf', bbox_inches='tight')
#plt.show()
#check for invalid values
JDp, RVp, p_err = adjustment(JD, RVp, p_err)
JDs, RVs, s_err = adjustment(JD, RVs, s_err)
print(coverage(RVp, RVs))
#calculate periodograms
x, y, delta_x = periodogram(JDp, RVp, period_samples, max_period)
y2 = periodogram(JDs, RVs, period_samples, max_period)[1]
y3,y4 = dataWindow(JDp, period_samples, max_period)[1], dataWindow(JDs, period_samples, max_period)[1]
#plot periodogram - data window
fig = plt.figure(figsize=(8,3))
#plt.plot(x, y*y2, 'b', alpha = 0.5)
#plt.plot(x, y3*y4, 'r', alpha = 0.5)
plt.plot(x, (y*y2-y3*y4), 'k')
plt.ylabel('Periodogram Power')#, size='15')
plt.xlabel('Period (days)')#, size='15')
plt.ylim(0,1)
plt.xscale('log')
plt.xlim(1,max_period)
plt.title(system)
plt.savefig(file + ' adjusted periodogram.pdf', bbox_inches='tight')
#plt.show()
plt.close('all')
#-----------------------MCMC------------------------#
import time
start = time.time() #start timer
#constrain parameters
lower_bounds = [0, -0.1, 0, np.median(np.asarray(JD))-0.5*max_period, 3.28, min(min(RVs), min(RVp))]
upper_bounds = [100, 0.2, 4, np.median(np.asarray(JD))+0.5*max_period, 3.32, max(max(RVs), max(RVp))]
#take a walk
print('\nwalking...')
sampler = MCMC(mass_ratio, RVp, p_err, RVs, s_err, JDp, JDs, lower_bounds, upper_bounds, 6, nwalkers, nsteps, 4)
print('Walk complete.\n')
print('Acceptance Fraction: ', np.mean(sampler.acceptance_fraction), '\n')
#save the results of the walk
samples = transform(sampler.chain[:, cutoff:, :].reshape((-1, 6)))
results = np.asarray(list(map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84], axis=0)))))
parms = [x for x in np.transpose(results)[0]]
print('RMS error: ', round(residuals([results[0][0], results[1][0], results[2][0],
results[3][0], results[4][0], results[5][0]], mass_ratio, RVp, RVs, JDp, JDs), 3))
print('BIC = %s'%(np.log(len(RVp)+len(RVs))*7 - 2*f.logLikelihood(parms, mass_ratio, RVp, p_err, RVs, s_err, JDp, JDs, lower_bounds, upper_bounds)))
print('Minimum primary mass: ', massLimit(mass_ratio, parms[0], parms[1], parms[-2]), ' Solar masses.\n')
#create walkers plot
print('plotting walk...')
walkers(nsteps, 6, cutoff, sampler).savefig(file + ' 6 dimension walk plot.png', bbox_inches='tight', dpi=300)
plt.close()
print('Walk Plotted\n')
del sampler
#create the corner plot
print('cornering...')
corner(6, samples, parms).savefig(file + ' 6 dimension corner plot.pdf', bbox_inches='tight')
plt.close()
print('Corner plotted.\n')
# Write the samples to disk.
print('writing samples to disk...')
np.savetxt(file + ' %s error samples.gz'%(round(residuals(parms, mass_ratio, RVp, RVs, JDp, JDs), 3)),
samples, delimiter=',')
print('Samples written!\n')
del samples
#write results to console
#print('Results:')
#for i in range(6):
# print(results[i][0], '+',results[i][1], '-',results[i][2])
#write results to log file
table = open('log.txt', 'a+')
labels = ('K', 'e', 'w', 'T', 'P', 'y')
print('\n' , system, " Results:", file = table)
print('RMS error: ', residuals(np.transpose(results)[0], mass_ratio, RVp, RVs, JDp, JDs), file = table)
print('q = ', mass_ratio, ' +/- ', standard_error , file = table)
for i in range(6):
print(labels[i], ' = ', results[i][0], ' +', results[i][1], ' -', results[i][2], file = table)
table.close()
#end timer
end = time.time()
elapsed = end-start
print('Fitting time was ', int(elapsed), ' seconds.\n')
#-------------circular---MCMC---------------#
start = time.time() #start timer
#take a walk
print('walking...')
sampler = MCMC(mass_ratio, RVp, p_err, RVs, s_err, JDp, JDs, lower_bounds, upper_bounds, 4, nwalkers, nsteps, 4)
print('Walk complete.\n')
print('Acceptance Fraction: ', np.mean(sampler.acceptance_fraction), '\n')
#save the results of the walk
samples = sampler.chain[:, cutoff:, :].reshape((-1, 4))
results = np.asarray(list(map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84], axis=0)))))
parms = [x for x in np.transpose(results)[0]]
print('RMS error: ', round(residuals([parms[0], 0, 0,
parms[1],parms[2], parms[3]], mass_ratio, RVp, RVs, JDp, JDs), 3))
print('BIC = %s'%(np.log(len(RVp)+len(RVs))*7 - 2*f.logLikelihood(parms, mass_ratio, RVp, p_err, RVs, s_err, JDp, JDs, lower_bounds, upper_bounds)))
print('Minimum primary mass: ', massLimit(mass_ratio, parms[0], 0, parms[-2]), ' Solar masses.\n')
#create the walkers plot
print('plotting walk...')
walkers(nsteps, 4, cutoff, sampler).savefig(file + ' 4 dimension walk plot.png', bbox_inches='tight', dpi=300)
plt.close()
print('Walk plotted.\n')
del sampler
# Write the samples to disk.
#create the corner plot
print('cornerning...')
corner(4, samples, parms).savefig(file + ' 4 dimension corner plot.pdf', bbox_inches='tight')
plt.close()
print('Corner plotted.\n')
print('writing samples to disk...')
np.savetxt(file + ' %s error samples.gz'%(round(residuals([parms[0], 0, 0, parms[1],parms[2],
parms[3]], mass_ratio, RVp, RVs, JDp, JDs), 3)),
samples, delimiter=',')
print('Samples written!\n')
del samples
#write results to console
#print('Results:')
#for i in range(4):
# print(results[i][0], '+',results[i][1], '-',results[i][2])
#write results to log file
table = open('log.txt', 'a+')
labels = ('K', 'T', 'P', 'y')
print('\n' , system, " Results:", file = table)
print('RMS error: ', residuals([parms[0],0, 0,
parms[1],parms[2], parms[3]], mass_ratio, RVp, RVs, JDp, JDs), file = table)
print('q = ', mass_ratio, ' +/- ', standard_error , file = table)
for i in range(4):
print(labels[i], ' = ', results[i][0], ' +', results[i][1], ' -', results[i][2], file = table)
table.close()
#end timer
end = time.time()
elapsed = end-start
print('Fitting time was ', int(elapsed), ' seconds.\n') | gpl-3.0 |
hitszxp/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 33 | 6189 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
"""Test NNDSVD behaviour on negative input"""
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
"""Test that NNDSVD does not return negative values"""
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
"""Test NNDSVD error
Test that _initialize_nmf error is less than the standard deviation of the
entries in the matrix.
"""
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
"""Test NNDSVD variants correctness
Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
the basic version has zeros.
"""
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
"""Test model fit behaviour on negative input"""
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
"""Test that the decomposition does not contain negative values"""
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
"""Test that the fit is not too far away"""
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
"""Test that NLS solver doesn't return negative values"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
"""Test that the NLS results should be close"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
"""Test that NMF.transform returns close values
(transform uses scipy.optimize.nnls for now)
"""
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
"""Smoke test for the case of more components than features."""
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
"""Test sparseness
Test that sparsity constraints actually increase sparseness in the
part where they are applied.
"""
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
"""Test that sparse matrices are accepted as input"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
"""Test that transform works on sparse data. Issue #2124"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF()
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
davek44/Basset | src/basset_sick_gain.py | 1 | 10809 | #!/usr/bin/env python
from __future__ import print_function
from optparse import OptionParser
import os
import random
import subprocess
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import pysam
from scipy.stats.mstats import mquantiles
import seaborn as sns
import stats
################################################################################
# basset_sick_gain.py
#
# Shuffle SNPs outside of DNase sites and compare the SAD distributions.
#
# Todo:
# -Properly handle indels.
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <vcf_file> <excl_bed_file> <model_file>'
parser = OptionParser(usage)
parser.add_option('-c', dest='cuda', default=False, action='store_true', help='Run on GPU [Default: %default]')
parser.add_option('-e', dest='add_excl_bed', default='%s/assembly/hg19_gaps.bed'%os.environ['HG19'], help='Additional genomic regions to exclude from the shuffle [Default: %default]')
parser.add_option('-f', dest='genome_fasta', default='%s/assembly/hg19.fa'%os.environ['HG19'], help='Genome FASTA [Default: %default]')
parser.add_option('-g', dest='genome_file', default='%s/assembly/human.hg19.core.genome'%os.environ['HG19'], help='Genome file for shuffling [Default: %default]')
parser.add_option('-l', dest='seq_len', type='int', default=600, help='Sequence length provided to the model [Default: %default]')
parser.add_option('-o', dest='out_dir', default='sad_shuffle', help='Output directory')
parser.add_option('-r', dest='replot', default=False, action='store_true', help='Re-plot only, without re-computing [Default: %default]')
parser.add_option('-s', dest='num_shuffles', default=1, type='int', help='Number of SNP shuffles [Default: %default]')
parser.add_option('-t', dest='targets_file', default=None, help='Target index, sample name table for targets to plot [Default: %default]')
(options,args) = parser.parse_args()
if len(args) != 3:
parser.error('Must provide VCF file, excluded BED file, and model file')
else:
vcf_file = args[0]
excl_bed_file = args[1]
model_file = args[2]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
#########################################
# supplement the excluded sites
#########################################
if options.add_excl_bed is not None:
supp_excl_bed_file = '%s/excl.bed' % options.out_dir
supp_excl_bed_out = open(supp_excl_bed_file, 'w')
# copy exclusion BED file
for line in open(excl_bed_file):
a = line.split()
print('\t'.join(a[:3]), file=supp_excl_bed_out)
# add on additional sites
for line in open(options.add_excl_bed):
a = line.split()
print('\t'.join(a[:3]), file=supp_excl_bed_out)
supp_excl_bed_out.close()
excl_bed_file = supp_excl_bed_file
#########################################
# compute SAD
#########################################
# filter VCF to excluded SNPs
excl_vcf_file = '%s/excl.vcf' % options.out_dir
if not options.replot:
exclude_vcf(vcf_file, excl_bed_file, excl_vcf_file)
# compute SADs
true_sad = compute_sad(excl_vcf_file, model_file, '%s/excl_sad'%options.out_dir, options.seq_len, options.cuda, options.replot)
#########################################
# compute shuffled SAD
#########################################
# open reference genome
genome_open = pysam.Fastafile(options.genome_fasta)
shuffle_sad = np.zeros((true_sad.shape[0],true_sad.shape[1],options.num_shuffles))
for ni in range(options.num_shuffles):
# shuffle the SNPs
shuf_vcf_file = '%s/shuf%d.vcf' % (options.out_dir, ni)
shuffle_snps(excl_vcf_file, shuf_vcf_file, excl_bed_file, options.genome_file, genome_open)
# compute SAD scores for shuffled SNPs
shuffle_sad[:,:,ni] = compute_sad(shuf_vcf_file, model_file, '%s/shuf%d_sad'%(options.out_dir,ni), options.seq_len, options.cuda, options.replot)
# compute shuffle means
shuffle_sad_mean = shuffle_sad.mean(axis=2)
#########################################
# stats and plots
#########################################
targets = {}
if options.targets_file:
for line in open(options.targets_file):
a = line.split()
targets[int(a[0])] = a[1]
else:
for ti in range(true_sad.shape[1]):
targets[ti] = 't%d' % ti
mw_out = open('%s/mannwhitney.txt' % options.out_dir, 'w')
# plot defaults
sns.set(font_scale=1.5, style='ticks')
for ti in targets:
# plot CDFs
sns_colors = sns.color_palette('deep')
plt.figure()
plt.hist(true_sad[:,ti], 1000, normed=1, histtype='step', cumulative=True, color=sns_colors[0], linewidth=1, label='SNPs')
plt.hist(shuffle_sad[:,ti,:].flatten(), 1000, normed=1, histtype='step', cumulative=True, color=sns_colors[2], linewidth=1, label='Shuffle')
ax = plt.gca()
ax.grid(True, linestyle=':')
ax.set_xlim(-.15, .15)
plt.legend()
plt.savefig('%s/%s_cdf.pdf' % (options.out_dir,targets[ti]))
plt.close()
# plot Q-Q
true_q = mquantiles(true_sad[:,ti], np.linspace(0,1,min(10000,true_sad.shape[0])))
shuf_q = mquantiles(shuffle_sad_mean[:,ti], np.linspace(0,1,min(10000,true_sad.shape[0])))
plt.figure()
plt.scatter(true_q, shuf_q, color=sns_colors[0])
pmin = 1.05*min(true_q[0], shuf_q[0])
pmax = 1.05*max(true_q[-1], shuf_q[-1])
plt.plot([pmin,pmax], [pmin,pmax], color='black', linewidth=1)
ax = plt.gca()
ax.set_xlim(pmin,pmax)
ax.set_ylim(pmin,pmax)
ax.set_xlabel('True SAD')
ax.set_ylabel('Shuffled SAD')
ax.grid(True, linestyle=':')
plt.savefig('%s/%s_qq.pdf' % (options.out_dir,targets[ti]))
plt.close()
# compute Mann-Whitney
mw_z, mw_p = stats.mannwhitneyu(true_sad[:,ti], shuffle_sad[:,ti,:].flatten())
cols = (ti, targets[ti], true_sad.shape[0], true_sad[:,ti].mean(), shuffle_sad[:,ti,:].mean(), mw_z, mw_p)
print('%3d %20s %5d %7.4f %7.4f %6.2f %6.1e' % cols, file=mw_out)
mw_out.close()
def compute_sad(vcf_file, model_file, out_dir, seq_len, gpu, replot):
''' Run basset_sad.py to compute scores. '''
cuda_str = ''
if gpu:
cuda_str = '--cudnn'
cmd = 'basset_sad.py %s -l %d -o %s %s %s' % (cuda_str, seq_len, out_dir, model_file, vcf_file)
if not replot:
subprocess.call(cmd, shell=True)
sad_table = []
sad_table_in = open('%s/sad_table.txt' % out_dir)
sad_table_in.readline()
last_snpid = None
for line in sad_table_in:
a = line.split()
snpid = a[0]
sad = float(a[-1])
if last_snpid == snpid:
sad_table[-1].append(sad)
else:
sad_table.append([sad])
last_snpid = snpid
return np.array(sad_table)
def exclude_vcf(vcf_file, excl_bed_file, excl_vcf_file):
''' Filter for SNPs outside of the excluded regions
and remove indels. '''
# copy header
excl_vcf_out = open(excl_vcf_file, 'w')
for line in open(vcf_file):
if line.startswith('#'):
print(line, file=excl_vcf_out, end='')
else:
break
# intersect
p = subprocess.Popen('bedtools intersect -v -a %s -b %s' % (vcf_file, excl_bed_file), stdout=subprocess.PIPE, shell=True)
for line in p.stdout:
a = line.split()
# filter for SNPs only
if len(a[3]) == len(a[4]) == 1:
print(line, file=excl_vcf_out, end='')
excl_vcf_out.close()
def shuffle_snps(vcf_file, shuf_vcf_file, excl_bed_file, genome_file, genome_open):
''' Shuffle the given SNPs. '''
# extract header
header_lines = []
for line in open(vcf_file):
if line.startswith('#'):
header_lines.append(line)
else:
break
# open shuffled VCF
shuf_vcf_out = open(shuf_vcf_file, 'w')
# unset SNPs
unset_vcf_file = vcf_file
unset = 1 # anything > 0
si = 0
while unset > 0:
print('Shuffle %d, %d remain' % (si, unset))
# shuffle w/ BEDtools
cmd = 'bedtools shuffle -excl %s -i %s -g %s' % (excl_bed_file, unset_vcf_file, genome_file)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
# update and open next unset VCF
unset_vcf_file = '%s.%d' % (shuf_vcf_file,si)
unset_vcf_out = open(unset_vcf_file, 'w')
# print header
for line in header_lines:
print(line, file=unset_vcf_out, end='')
# zero unset counter
unset = 0
# fix alleles before printing
for line in p.stdout:
a = line.split()
chrom = a[0]
pos = int(a[1])
snp_nt = a[3]
# get reference allele
ref_nt = genome_open.fetch(chrom, pos-1, pos)
if ref_nt == snp_nt:
# save to final VCF
print(line, file=shuf_vcf_out, end='')
else:
# write to next unset
print(line, file=unset_vcf_out, end='')
unset += 1
unset_vcf_out.close()
si += 1
shuf_vcf_out.close()
# clean up temp files
for ci in range(si):
os.remove('%s.%d' % (shuf_vcf_file,ci))
def shuffle_snps_old(vcf_file, shuf_vcf_file, excl_bed_file, genome_file, genome_open):
''' Shuffle the given SNPs. '''
# open shuffled VCF
shuf_vcf_out = open(shuf_vcf_file, 'w')
# shuffle w/ BEDtools
cmd = 'bedtools shuffle -excl %s -i %s -g %s' % (excl_bed_file, vcf_file, genome_file)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
# fix alleles before printing
for line in p.stdout:
a = line.split()
chrom = a[0]
pos = int(a[1])
snp_nt = a[3]
# set reference allele
ref_nt = genome_open.fetch(chrom, pos-1, pos)
# I accidentally deleted sampling the alt_nt
# write into column
a[3] = ref_nt
a[4] = alt_nt
print('\t'.join(a), file=shuf_vcf_out)
shuf_vcf_out.close()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| mit |
shravya-ks/ECN-ns3 | src/flow-monitor/examples/wifi-olsr-flowmon.py | 59 | 7427 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <[email protected]>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.WifiMacHelper()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
fbagirov/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 85 | 8565 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) | bsd-3-clause |
jdfekete/progressivis | tests/test_02_table_eval.py | 1 | 3593 | from . import ProgressiveTest, skip
from progressivis import Scheduler
from progressivis.table.table import Table
import numpy as np
import pandas as pd
class TestTableEval(ProgressiveTest):
def setUp(self):
super(TestTableEval, self).setUp()
self.scheduler = Scheduler.default
def test_filtering(self):
t = Table('table_filtering', dshape="{a: int, b: float32}", create=True)
t.resize(20)
ivalues = np.random.randint(100,size=20)
t['a'] = ivalues
fvalues = np.random.rand(20)*100
t['b'] = fvalues
df = pd.DataFrame(t.to_dict())
def small_fun(expr, r):
te = t.eval(expr, result_object=r)
dfe = df.eval(expr)
self.assertTrue(np.array_equal(te['a'].loc[:], df[dfe]['a']))
self.assertTrue(np.allclose(te['b'].loc[:], df[dfe]['b']))
def small_fun_ne(expr):
r = 'raw_numexpr'
te = t.eval(expr, result_object=r)
dfe = df.eval(expr)
self.assertTrue(np.array_equal(te, dfe.values))
small_fun_ne('(a>10) & (a <80)')
small_fun_ne('(b>10) & (b <80)')
small_fun_ne('a>=b')
small_fun('(a>10) & (a <80)', 'table')
small_fun('(b>10) & (b <80)', 'table')
small_fun('a>=b', 'table')
small_fun('(a>10) & (a <80)', 'view')
def test_filtering2(self):
t = Table('table_filtering', dshape="{a: int, b: float32}", create=True)
sz = 1000
sz_del = 100
t.resize(sz)
np.random.seed(42)
ivalues = np.random.randint(100,size=sz)
t['a'] = ivalues
fvalues = np.random.rand(sz)*100
t['b'] = fvalues
df = pd.DataFrame(t.to_dict())
to_del = np.random.randint(len(t)-1, size=sz_del)
del t.loc[to_del]
df = df.drop(to_del)
self.assertListEqual(list(t.index), list(df.index))
def small_fun_index(expr):
ix = t.eval(expr)
dfe = df.eval(expr)
self.assertSetEqual(set(ix), set(df.index[dfe]))
small_fun_index('(a>10) & (a <80)')
def test_assign(self):
t = Table('table_eval_assign', dshape="{a: int, b: float32}", create=True)
t.resize(20)
ivalues = np.random.randint(100,size=20)
t['a'] = ivalues
fvalues = np.random.rand(20)*100
t['b'] = fvalues
df = pd.DataFrame(t.to_dict())
t2 = t.eval('a = a+2*b', inplace=False)
df2 = df.eval('a = a+2*b', inplace=False)
self.assertTrue(np.allclose(t2['a'], df2['a']))
self.assertTrue(np.allclose(t2['b'], df2['b']))
t.eval('b = a+2*b', inplace=True)
df.eval('b = a+2*b', inplace=True)
self.assertTrue(np.allclose(t['a'], df['a']))
self.assertTrue(np.allclose(t['b'], df['b']))
@skip
def test_user_dict(self):
t = Table('table_user_dict', dshape="{a: int, b: float32}", create=True)
t.resize(20)
ivalues = np.random.randint(100,size=20)
t['a'] = ivalues
fvalues = np.random.rand(20)*100
t['b'] = fvalues
df = pd.DataFrame(t.to_dict())
t2 = t.eval('a = a+2*b', inplace=False)
df2 = df.eval('x = a.loc[3]+2*b.loc[3]', inplace=False)
#print(df2.x)
#self.assertTrue(np.allclose(t2['a'], df2['a']))
#self.assertTrue(np.allclose(t2['b'], df2['b']))
#t.eval('b = a+2*b', inplace=True)
#df.eval('b = a+2*b', inplace=True)
#self.assertTrue(np.allclose(t['a'], df['a']))
#self.assertTrue(np.allclose(t['b'], df['b']))
| bsd-2-clause |
tdhopper/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
pramitchoudhary/Experiments | modelinterpretation/random_forest_intepretation_treeinterpreter.py | 2 | 1585 |
# coding: utf-8
# In[1]:
# Reference: https://github.com/andosa/treeinterpreter
# Blog: http://blog.datadive.net/random-forest-interpretation-with-scikit-learn/
from treeinterpreter import treeinterpreter as ti
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
import numpy as np
# In[2]:
from sklearn.datasets import load_boston
boston = load_boston()
rf = RandomForestRegressor()
# In[13]:
boston.data[:300,].shape
# In[16]:
rf = RandomForestRegressor()
fit1 = rf.fit(boston.data[:300], boston.target[:300])
# In[17]:
fit1
# In[37]:
instances = boston.data[[300, 309]]
print "Instance 0 prediction:", rf.predict(instances[0].reshape(1,13))
print "Instance 1 prediction:", rf.predict(instances[1].reshape(1,13))
# In[38]:
prediction, bias, contributions = ti.predict(rf, instances)
# In[40]:
for i in range(len(instances)):
print "Instance", i
print "Bias (trainset mean)", bias[i]
print "Feature contributions:"
for c, feature in sorted(zip(contributions[i],
boston.feature_names),
key=lambda x: -abs(x[0])):
print feature, round(c, 2)
print "-"*20
# In[42]:
print prediction
print bias + np.sum(contributions, axis=1)
# In[43]:
# the basic feature importance feature provided by sklearn
fit1.feature_importances_
# In[44]:
# treeinterpreter uses the apply function to retrieve the leave indicies with the help of which,
# the tree path is retrieved
rf.apply
# In[47]:
rf.apply(instances)
# In[ ]:
| unlicense |
Windy-Ground/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
chrsrds/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 127 | 1270 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
plt.figure('scikit-learn parallel %s benchmark results' % func.__name__)
plt.plot(sample_sizes, one_core, label="one core")
plt.plot(sample_sizes, multi_core, label="multi core")
plt.xlabel('n_samples')
plt.ylabel('Time (s)')
plt.title('Parallel %s' % func.__name__)
plt.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
plt.show()
| bsd-3-clause |
nicolas998/Op_Radar | 06_Codigos/Actualiza_MeanStorage_Hist.py | 2 | 4715 | #!/usr/bin/env python
from wmf import wmf
import numpy as np
import pickle
import pandas as pnd
import pylab as pl
import argparse
import textwrap
import netCDF4
from multiprocessing import Pool
import os
#-------------------------------------------------------------------
#FUNCIONES LOCALES
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#PARSEADOR DE ARGUMENTOS
#-------------------------------------------------------------------
#Parametros de entrada del trazador
parser=argparse.ArgumentParser(
prog='Actualiza_Caudales_Hist',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Actualiza las series de almacenamiento medio estimadas por el modelo
para la cuenca en la cual se este simulando, existen tantos archivos de
almacenamiento medio como calibraciones para el proyecto de la cuenca
existan.
'''))
#Parametros obligatorios
parser.add_argument("rutaShist",help="(Obligatorio) Carpeta con la serie historica de almacenamientos .StoHist simulados por el modelo ")
parser.add_argument("rutaSsim",help="(Obligatorio) Carpeta con las series de almacenamientos .StOhdr simulados en el ultimo intervalo")
parser.add_argument("-n", "--newhist", help="(Opcional) Con esta opcion el script genera un nuevo punto de generacion de historicos",
action = 'store_true', default = False)
parser.add_argument("-i", "--fechai", help="(Opcional) Fecha de inicio de nuevo punto de historicos (YYYY-MM-DD HH:MM)")
parser.add_argument("-f", "--fechaf", help="(Opcional) Fecha de fin de nuevo punto de historicos (YYYY-MM-DD HH:MM)")
parser.add_argument("-v","--verbose",help="(Opcional) Hace que el modelo indique en que porcentaje de ejecucion va",
action = 'store_true', default = False)
args=parser.parse_args()
#-------------------------------------------------------------------
# LISTA DE CAUDALES SIMULADOS
#-------------------------------------------------------------------
#Lista caudales simulados sin repetir
L = os.listdir(args.rutaSsim)
L = [i for i in L if i.endswith('StOhdr')]
Lhist = [i[:-7] + '.StoHist' for i in L]
#-------------------------------------------------------------------
# NUEVO PUNTO DE HISTORIA (solo si se habilita)
#-------------------------------------------------------------------
if args.newhist:
#Fechas de inicio y fin
FechaI = args.fechai
FechaF = args.fechaf
#Genera el Data Frame vacio desde el inicio hasta el punto de ejecucion
DifIndex = pnd.date_range(FechaI, FechaF, freq='5min')
Sh = pnd.DataFrame(np.zeros((DifIndex.size, 5))*np.nan,
index=pnd.date_range(FechaI, FechaF, freq='5min'),
columns = ['Tanque_'+str(i) for i in range(1,6)])
Lold = os.listdir(args.rutaShist)
for i in Lhist:
#Pregunta si esta
try:
pos = Lold.index(i)
flag = raw_input('Aviso: El archivo historico : '+i+' ya existe, desea sobre-escribirlo, perdera la historia de este!! (S o N): ')
if flag == 'S':
flag = True
else:
flag = False
except:
flag = True
#Guardado
if flag:
Sh.to_msgpack(args.rutaShist + i)
#-------------------------------------------------------------------
# ACTUALIZACION DE CAUDALES
#-------------------------------------------------------------------
#busca que este el archivo base en la carpeta
for act, hist in zip(L, Lhist):
try:
#Lee el almacenamiento actual
Sactual = pnd.read_csv(args.rutaSsim+act, header = 4, index_col = 5, parse_dates = True, usecols=(1,2,3,4,5,6))
St = pnd.DataFrame(Sactual[Sactual.index == Sactual.index[0]].values, index=[Sactual.index[0],],
columns = ['Tanque_'+str(i) for i in range(1,6)])
#Lee el historico
Shist = pnd.read_msgpack(args.rutaShist + hist)
# encuentra el pedazo que falta entre ambos
Gap = pnd.date_range(Shist.index[-1], Sactual.index[0], freq='5min')
#Genera el pedazo con faltantes
GapData = pnd.DataFrame(np.zeros((Gap.size - 2, 5))*np.nan,
index= Gap[1:-1],
columns = ['Tanque_'+str(i) for i in range(1,6)])
#pega la informacion
Shist = Shist.append(GapData)
Shist = Shist.append(St)
#Guarda el archivo historico
Shist.to_msgpack(args.rutaShist + hist)
#Aviso
print 'Aviso: Se ha actualizado el archivo de estados historicos: '+hist
except:
print 'Aviso: No se encuentra el historico de estados: '+hist+' Por lo tanto no se actualiza'
| gpl-3.0 |
jschuecker/nest-simulator | topology/doc/user_manual_scripts/connections.py | 8 | 18562 | # -*- coding: utf-8 -*-
#
# connections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# create connectivity figures for topology manual
import nest
import nest.topology as tp
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
import numpy as np
# seed NumPy RNG to ensure identical results for runs with random placement
np.random.seed(7654321)
def beautify_layer(l, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
top = nest.GetStatus(l)[0]['topology']
ctr = top['center']
ext = top['extent']
if xticks is None:
if 'rows' in top:
dx = float(ext[0]) / top['columns']
dy = float(ext[1]) / top['rows']
xticks = ctr[0] - ext[0] / 2. + dx / 2. + dx * np.arange(
top['columns'])
yticks = ctr[1] - ext[1] / 2. + dy / 2. + dy * np.arange(
top['rows'])
if xlim is None:
xlim = [ctr[0] - ext[0] / 2. - dx / 2., ctr[0] + ext[
0] / 2. + dx / 2.] # extra space so extent is visible
ylim = [ctr[1] - ext[1] / 2. - dy / 2., ctr[1] + ext[1] / 2. + dy / 2.]
else:
ext = [xlim[1] - xlim[0], ylim[1] - ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
def conn_figure(fig, layer, connd, targets=None, showmask=True, showkern=False,
xticks=range(-5, 6), yticks=range(-5, 6),
xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]):
if targets is None:
targets = ((tp.FindCenterElement(layer), 'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=60)
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=20,
kernel_color='green')
beautify_layer(layer, fig,
xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks,
xlabel='', ylabel='')
fig.gca().grid(False)
# -----------------------------------------------
# Simple connection
#{ conn1 #}
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]}}}
tp.ConnectLayers(l, l, conndict)
#{ end #}
fig = plt.figure()
fig.add_subplot(121)
conn_figure(fig, l, conndict,
targets=((tp.FindCenterElement(l), 'red'),
(tp.FindNearestElement(l, [4., 5.]), 'yellow')))
# same another time, with periodic bcs
lpbc = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha', 'edge_wrap': True})
tp.ConnectLayers(lpbc, lpbc, conndict)
fig.add_subplot(122)
conn_figure(fig, lpbc, conndict, showmask=False,
targets=((tp.FindCenterElement(lpbc), 'red'),
(tp.FindNearestElement(lpbc, [4., 5.]), 'yellow')))
plt.savefig('../user_manual_figures/conn1.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def free_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2))
fig = plt.figure()
#{ conn2r #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]}}}
#{ end #}
free_mask_fig(fig, 231, conndict)
#{ conn2ro #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]},
'anchor': [-1.5, -1.5]}}
#{ end #}
free_mask_fig(fig, 234, conndict)
#{ conn2c #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0}}}
#{ end #}
free_mask_fig(fig, 232, conndict)
#{ conn2co #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0},
'anchor': [-2.0, 0.0]}}
#{ end #}
free_mask_fig(fig, 235, conndict)
#{ conn2d #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.}}}
#{ end #}
free_mask_fig(fig, 233, conndict)
#{ conn2do #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.},
'anchor': [1.5, 1.5]}}
#{ end #}
free_mask_fig(fig, 236, conndict)
plt.savefig('../user_manual_figures/conn2.png', bbox_inches='tight')
# -----------------------------------------------
# 3d masks
def conn_figure_3d(fig, layer, connd, targets=None, showmask=True,
showkern=False,
xticks=range(-5, 6), yticks=range(-5, 6),
xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]):
if targets is None:
targets = ((tp.FindCenterElement(layer), 'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=20, nodecolor=(.5, .5, 1.))
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=60,
kernel_color='green')
ax = fig.gca()
ax.set_aspect('equal', 'box')
plt.draw()
def free_mask_3d_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer(
{'rows': 11, 'columns': 11, 'layers': 11, 'extent': [11., 11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc, projection='3d')
conn_figure_3d(fig, l, cdict, xticks=range(-5, 6, 2),
yticks=range(-5, 6, 2))
fig = plt.figure()
#{ conn_3d_a #}
conndict = {'connection_type': 'divergent',
'mask': {'box': {'lower_left': [-2., -1., -1.],
'upper_right': [2., 1., 1.]}}}
#{ end #}
free_mask_3d_fig(fig, 121, conndict)
#{ conn_3d_b #}
conndict = {'connection_type': 'divergent',
'mask': {'spherical': {'radius': 2.5}}}
#{ end #}
free_mask_3d_fig(fig, 122, conndict)
plt.savefig('../user_manual_figures/conn_3d.png', bbox_inches='tight')
# -----------------------------------------------
# grid masks
def grid_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2),
showmask=False)
fig = plt.figure()
#{ conn3 #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5}}}
#{ end #}
grid_mask_fig(fig, 131, conndict)
#{ conn3c #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': 1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 132, conndict)
#{ conn3x #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': -1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 133, conndict)
plt.savefig('../user_manual_figures/conn3.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def kernel_fig(fig, loc, cdict, showkern=True):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_psc_alpha'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2),
showkern=showkern)
fig = plt.figure()
#{ conn4cp #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': 0.5}
#{ end #}
kernel_fig(fig, 231, conndict)
#{ conn4g #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.}}}
#{ end #}
kernel_fig(fig, 232, conndict)
#{ conn4gx #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}, 'anchor': [1.5, 1.5]},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'anchor': [1.5, 1.5]}}}
#{ end #}
kernel_fig(fig, 233, conndict)
plt.draw()
#{ conn4cut #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'cutoff': 0.5}}}
#{ end #}
kernel_fig(fig, 234, conndict)
#{ conn42d #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian2D': {'p_center': 1.0,
'sigma_x': 1., 'sigma_y': 3.}}}
#{ end #}
kernel_fig(fig, 235, conndict, showkern=False)
plt.savefig('../user_manual_figures/conn4.png', bbox_inches='tight')
# -----------------------------------------------
def wd_fig(fig, loc, ldict, cdict, what, rpos=None,
xlim=[-1, 51], ylim=[0, 1], xticks=range(0, 51, 5),
yticks=np.arange(0., 1.1, 0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
if rpos is None:
rn = nest.GetLeaves(l)[0][:1] # first node
else:
rn = tp.FindNearestElement(l, rpos)
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
vals = np.array([sd[what] for sd in cstat])
tgts = [sd['target'] for sd in cstat]
locs = np.array(tp.GetPosition(tgts))
ax.plot(locs[:, 0], vals, 'o', mec='none', mfc=clr, label=label)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
fig = plt.figure()
#{ conn5lin #}
ldict = {'rows': 1, 'columns': 51,
'extent': [51., 1.], 'center': [25., 0.],
'elements': 'iaf_psc_alpha'}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 311, ldict, cdict, 'weight', label='Weight')
wd_fig(fig, 311, ldict, cdict, 'delay', label='Delay', clr='red')
fig.gca().legend()
lpdict = {'rows': 1, 'columns': 51, 'extent': [51., 1.], 'center': [25., 0.],
'elements': 'iaf_psc_alpha', 'edge_wrap': True}
#{ conn5linpbc #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 312, lpdict, cdict, 'weight', label='Weight')
wd_fig(fig, 312, lpdict, cdict, 'delay', label='Delay', clr='red')
fig.gca().legend()
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}}}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Linear',
rpos=[25., 0.], clr='orange')
#{ conn5exp #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'exponential': {'a': 1., 'tau': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Exponential',
rpos=[25., 0.])
#{ conn5gauss #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'gaussian': {'p_center': 1., 'sigma': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Gaussian', clr='green',
rpos=[25., 0.])
#{ conn5uniform #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'uniform': {'min': 0.2, 'max': 0.8}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Uniform', clr='red',
rpos=[25., 0.])
fig.gca().legend()
plt.savefig('../user_manual_figures/conn5.png', bbox_inches='tight')
# --------------------------------
def pn_fig(fig, loc, ldict, cdict,
xlim=[0., .5], ylim=[0, 3.5], xticks=range(0, 51, 5),
yticks=np.arange(0., 1.1, 0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
rn = nest.GetLeaves(l)[0]
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
srcs = [sd['source'] for sd in cstat]
tgts = [sd['target'] for sd in cstat]
dist = np.array(tp.Distance(srcs, tgts))
ax.hist(dist, bins=50, histtype='stepfilled', normed=True)
r = np.arange(0., 0.51, 0.01)
plt.plot(r, 2 * np.pi * r * (1 - 2 * r) * 12 / np.pi, 'r-', lw=3,
zorder=-10)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
"""ax.set_xticks(xticks)
ax.set_yticks(yticks)"""
# ax.set_aspect(100, 'box')
ax.set_xlabel('Source-target distance d')
ax.set_ylabel('Connection probability pconn(d)')
fig = plt.figure()
#{ conn6 #}
pos = [[np.random.uniform(-1., 1.), np.random.uniform(-1., 1.)]
for j in range(1000)]
ldict = {'positions': pos, 'extent': [2., 2.],
'elements': 'iaf_psc_alpha', 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.0}},
'kernel': {'linear': {'c': 1., 'a': -2., 'cutoff': 0.0}},
'number_of_connections': 50,
'allow_multapses': True, 'allow_autapses': False}
#{ end #}
pn_fig(fig, 111, ldict, cdict)
plt.savefig('../user_manual_figures/conn6.png', bbox_inches='tight')
# -----------------------------
#{ conn7 #}
nest.ResetKernel()
nest.CopyModel('iaf_psc_alpha', 'pyr')
nest.CopyModel('iaf_psc_alpha', 'in')
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'}}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.2],
'upper_right': [0.2, 0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'}}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn8 #}
nest.ResetKernel()
nest.CopyModel('iaf_psc_alpha', 'pyr')
nest.CopyModel('iaf_psc_alpha', 'in')
nest.CopyModel('static_synapse', 'exc', {'weight': 2.0})
nest.CopyModel('static_synapse', 'inh', {'weight': -8.0})
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'},
'synapse_model': 'exc'}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.2],
'upper_right': [0.2, 0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'},
'synapse_model': 'inh'}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn9 #}
nrn_layer = tp.CreateLayer({'rows': 20,
'columns': 20,
'elements': 'iaf_psc_alpha'})
stim = tp.CreateLayer({'rows': 1,
'columns': 1,
'elements': 'poisson_generator'})
cdict_stim = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.1},
'anchor': [0.2, 0.2]}}
tp.ConnectLayers(stim, nrn_layer, cdict_stim)
#{ end #}
# ----------------------------
#{ conn10 #}
rec = tp.CreateLayer({'rows': 1,
'columns': 1,
'elements': 'spike_detector'})
cdict_rec = {'connection_type': 'convergent',
'mask': {'circular': {'radius': 0.1},
'anchor': [-0.2, 0.2]}}
tp.ConnectLayers(nrn_layer, rec, cdict_rec)
#{ end #}
# ----------------------------
#{ conn11 #}
rec = nest.Create('spike_detector')
nrns = nest.GetLeaves(nrn_layer, local_only=True)[0]
nest.Connect(nrns, rec)
#{ end #}
| gpl-2.0 |
chengjun/iching | iching/iching.py | 2 | 4912 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 22 14:19:41 2015
@author: chengjun
"""
import random
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from collections import defaultdict
def ichingDate(d):
random.seed(d)
try:
print 'Your birthday & your prediction time: ', str(d)
except:
print('Your birthday & your prediction time: ', str(d))
def sepSkyEarth(data):
sky = random.randint(1, data-2)
earth = data - sky
earth -= 1
return sky , earth
def getRemainder(num):
rm = num % 4
if rm == 0:
rm = 4
return rm
def getChange(data):
sky, earth = sepSkyEarth(data)
skyRemainder = getRemainder(sky)
earthRemainder = getRemainder(earth)
change = skyRemainder + earthRemainder + 1
data = data - change
return sky, earth, change, data
def getYao(data):
sky, earth, firstChange, data = getChange(data)
sky, earth, secondChange, data = getChange(data)
sky, earth, thirdChange, data = getChange(data)
yao = data/4
return yao, firstChange, secondChange, thirdChange
def sixYao():
yao1 = getYao(data = 50 - 1)[0]
yao2 = getYao(data = 50 - 1)[0]
yao3 = getYao(data = 50 - 1)[0]
yao4 = getYao(data = 50 - 1)[0]
yao5 = getYao(data = 50 - 1)[0]
yao6 = getYao(data = 50 - 1)[0]
return[yao1, yao2, yao3, yao4, yao5, yao6]
def fixYao(num):
if num == 6 or num == 9:
try: # for python 2.x
print "there is a changing predict! Also run changePredict()"
except: # for python 3.x
print("there is a changing predict! Also run changePredict()")
return num % 2
def changeYao(num):
if num == 6:
num = 1
elif num == 9:
num = 2
num = num % 2
return(num)
def fixPredict(pred):
fixprd = [fixYao(i) for i in pred]
fixprd = list2str(fixprd)
return fixprd
def list2str(l):
si = ''
for i in l:
si = si + str(i)
return si
def changePredict(pred):
changeprd = [changeYao(i) for i in pred]
changeprd = list2str(changeprd)
return changeprd
def getPredict():
pred = sixYao()
fixPred = fixPredict(pred)
if 6 in pred or 9 in pred:
changePred = changePredict(pred)
else:
changePred = None
return fixPred, changePred
def ichingName(now, future):
dt = {'111111':u'乾','011111':u'夬','000000':u'坤','010001':u'屯','100010':u'蒙','010111':u'需','111010':u'讼','000010': u'师',
'010000':u'比','110111':u'小畜','111011':u'履','000111':u'泰','111000':u'否','111101':u'同人','101111':u'大有','000100':u'谦',
'001000':u'豫','011001':u'随','100110':u'蛊','000011':u'临','110000':u'观','101001':u'噬嗑','100101':u'贲','100000':u'剥',
'000001':u'复','111001':u'无妄','100111':u'大畜','100001':u'颐','011110':u'大过','010010':u'坎','101101':u'离','011100':u'咸',
'001110':u'恒','111100':u'遁','001111':u'大壮','101000':u'晋','000101':u'明夷','110101':u'家人','101011':u'睽','010100':u'蹇',
'001010':u'解','100011':u'损','110001':u'益','111110':u'姤','011000':u'萃','000110':u'升','011010':u'困','010110':u'井',
'011101':u'革','101110':u'鼎','001001':u'震','100100':u'艮','110100':u'渐','001011':u'归妹','001101':u'丰','101100':u'旅',
'110110':u'巽','011011':u'兑','110010':u'涣','010011':u'节','110011':u'中孚','001100':u'小过','010101':u'既济','101010':u'未济'}
if future:
name = dt[now] + ' & ' + dt[future]
else:
name = dt[now]
return name
def ichingText(k, iching):
path = iching.__file__
path = path.split('iching')[0]
import json
dat = json.load(open(path + 'iching/package_data.dat'), encoding = 'utf-8')
return dat[k]
def plotTransition(N, w):
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from collections import defaultdict
changes = {}
for i in range(N):
sky, earth, firstChange, data = getChange(data = 50 -1)
sky, earth, secondChange, data = getChange(data)
sky, earth, thirdChange, data = getChange(data)
changes[i]=[firstChange, secondChange, thirdChange, data/4]
ichanges = changes.values()
firstTransition = defaultdict(int)
for i in ichanges:
firstTransition[i[0], i[1]]+=1
secondTransition = defaultdict(int)
for i in ichanges:
secondTransition[i[1], i[2]]+=1
thirdTransition = defaultdict(int)
for i in ichanges:
thirdTransition[i[2], i[3]]+=1
cmap = cm.get_cmap('Accent_r', len(ichanges))
for k, v in firstTransition.iteritems():
plt.plot([1, 2], k, linewidth = v*w/N)
for k, v in secondTransition.iteritems():
plt.plot([2, 3], k, linewidth = v*w/N)
for k, v in thirdTransition.iteritems():
plt.plot([3, 4], k, linewidth = v*w/N)
plt.xlabel(u'Time')
plt.ylabel(u'Changes')
| mit |
vybstat/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
francesco-mannella/dmp-esn | parametric/parametric_dmp/bin/tr_datasets/e_cursive_curves_angles/data/plot.py | 8 | 2377 | #!/usr/bin/env python
import glob
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os
import sys
matplotlib.use("cairo")
pathname = os.path.dirname(sys.argv[0])
if pathname:
os.chdir(pathname)
coords = None
if len(sys.argv) == 5:
try :
coords = [ float(sys.argv[x]) for x in range(1,5) ]
except ValueError:
pass
n_dim = None
def get_trajectories(pattern):
trs = []
names = glob.glob(pattern)
names.sort()
for fname in names:
t = np.loadtxt(fname)
trs.append(t)
return trs
trains = get_trajectories("trajectories/tl*")
tests = get_trajectories("trajectories/tt*")
train_results = get_trajectories("results/rtl*")
test_results = get_trajectories("results/rtt*")
ltrain = None
ltest = None
lall = []
idcs = np.arange(len(trains))
theo_train = {
'color': [.6,.6,1],
'lw': 5,
'zorder': 2,
'label': "Training"
}
repr_train = {
'color': [0,0,.3],
'lw': 1.5,
'zorder': 3,
'label': "Training repr."
}
theo_test = {
'color': [1,.6,.6],
'lw': 5,
'zorder': 2,
'label': "Test"
}
repr_test = {
'color': [.3,0,0],
'lw': 1.5,
'zorder': 3,
'label': "Test repr"
}
def common_plot(ax, d, label, color, lw, zorder):
h, = ax.plot(d[:,1]+d[:,7]*6, d[:,2]+d[:,8]*6,
color=color, lw=lw, zorder=zorder,
label=label)
return h
def plot_trajectories(ax, ttype, lall, **kargs):
idcs = np.arange(len(ttype))
for d,i in zip(ttype, idcs):
if i == 0:
lplot = common_plot(ax, d, **kargs)
lall.append(lplot)
else:
common_plot(ax, d, **kargs)
fig = plt.figure("DMP Stulp", figsize=(8,8))
ax = fig.add_subplot(111, aspect="equal")
plot_trajectories(ax, trains, lall, **theo_train)
plot_trajectories(ax, train_results, lall, **repr_train)
plot_trajectories(ax, tests, lall, **theo_test)
plot_trajectories(ax, test_results, lall, **repr_test)
print coords
if coords == None:
ax.set_xlim([-0.5,10.2])
ax.set_ylim([-0.5,7.2])
else:
ax.set_xlim([coords[0], coords[1]])
ax.set_ylim([coords[2], coords[3]])
ax.set_xticks([])
ax.set_yticks([])
ax.legend(handles=lall)
plt.tight_layout()
plt.show()
| gpl-2.0 |
daodaoliang/bokeh | bokeh/models/sources.py | 9 | 11157 | from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import HasProps
from ..properties import Any, Int, String, Instance, List, Dict, Either, Bool, Enum
from ..validation.errors import COLUMN_LENGTHS
from .. import validation
from ..util.serialization import transform_column_source_data
from .callbacks import Callback
from bokeh.deprecate import deprecated
class DataSource(PlotObject):
""" A base class for data source types. ``DataSource`` is
not generally useful to instantiate on its own.
"""
column_names = List(String, help="""
An list of names for all the columns in this DataSource.
""")
selected = Dict(String, Dict(String, Any), default={
'0d': {'flag': False, 'indices': []},
'1d': {'indices': []},
'2d': {'indices': []}
}, help="""
A dict to indicate selected indices on different dimensions on this DataSource. Keys are:
- 0d: indicates whether a Line or Patch glyphs have been hit. Value is a
dict with the following keys:
- flag (boolean): true if glyph was with false otherwise
- indices (list): indices hit (if applicable)
- 1d: indicates whether any of all other glyph (except [multi]line or
patches) was hit:
- indices (list): indices that were hit/selected
- 2d: indicates whether a [multi]line or patches) were hit:
- indices (list(list)): indices of the lines/patches that were
hit/selected
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the selection is changed.
""")
def columns(self, *columns):
""" Returns a ColumnsRef object for a column or set of columns
on this data source.
Args:
*columns
Returns:
ColumnsRef
"""
return ColumnsRef(source=self, columns=list(columns))
class ColumnsRef(HasProps):
""" A utility object to allow referring to a collection of columns
from a specified data source, all together.
"""
source = Instance(DataSource, help="""
A data source to reference.
""")
columns = List(String, help="""
A list of column names to reference from ``source``.
""")
class ColumnDataSource(DataSource):
""" Maps names of columns to sequences or arrays.
If the ColumnDataSource initializer is called with a single argument that
is a dict or pandas.DataFrame, that argument is used as the value for the
"data" attribute. For example::
ColumnDataSource(mydict) # same as ColumnDataSource(data=mydict)
ColumnDataSource(df) # same as ColumnDataSource(data=df)
.. note::
There is an implicit assumption that all the columns in a
a given ColumnDataSource have the same length.
"""
data = Dict(String, Any, help="""
Mapping of column names to sequences of data. The data can be, e.g,
Python lists or tuples, NumPy arrays, etc.
""")
def __init__(self, *args, **kw):
""" If called with a single argument that is a dict or
pandas.DataFrame, treat that implicitly as the "data" attribute.
"""
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
import pandas as pd
if isinstance(raw_data, pd.DataFrame):
raw_data = self._data_from_df(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
for name, data in raw_data.items():
self.add(data, name)
super(ColumnDataSource, self).__init__(**kw)
@staticmethod
def _data_from_df(df):
""" Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
df (DataFrame) : data to convert
Returns:
dict(str, list)
"""
index = df.index
new_data = {}
for colname in df:
new_data[colname] = df[colname].tolist()
if index.name:
new_data[index.name] = index.tolist()
elif index.names and not all([x is None for x in index.names]):
new_data["_".join(index.names)] = index.tolist()
else:
new_data["index"] = index.tolist()
return new_data
@classmethod
@deprecated("Bokeh 0.9.3", "ColumnDataSource initializer")
def from_df(cls, data):
""" Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
data (DataFrame) : data to convert
Returns:
dict(str, list)
"""
import warnings
warnings.warn("Method deprecated in Bokeh 0.9.3")
return cls._data_from_df(data)
def to_df(self):
""" Convert this data source to pandas dataframe.
If ``column_names`` is set, use those. Otherwise let Pandas
infer the column names. The ``column_names`` property can be
used both to order and filter the columns.
Returns:
DataFrame
"""
import pandas as pd
if self.column_names:
return pd.DataFrame(self.data, columns=self.column_names)
else:
return pd.DataFrame(self.data)
def add(self, data, name=None):
""" Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name go the form "Series ####"
Returns:
str: the column name used
"""
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.column_names.append(name)
self.data[name] = data
return name
def vm_serialize(self, changed_only=True):
attrs = super(ColumnDataSource, self).vm_serialize(changed_only=changed_only)
if 'data' in attrs:
attrs['data'] = transform_column_source_data(attrs['data'])
return attrs
def remove(self, name):
""" Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued.
"""
try:
self.column_names.remove(name)
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name)
def push_notebook(self):
""" Update date for a plot in the IPthon notebook in place.
This function can be be used to update data in plot data sources
in the IPython notebook, without having to use the Bokeh server.
Returns:
None
.. warning::
The current implementation leaks memory in the IPython notebook,
due to accumulating JS code. This function typically works well
with light UI interactions, but should not be used for continuously
updating data. See :bokeh-issue:`1732` for more details and to
track progress on potential fixes.
"""
from IPython.core import display
from bokeh.protocol import serialize_json
id = self.ref['id']
model = self.ref['type']
json = serialize_json(self.vm_serialize())
js = """
var ds = Bokeh.Collections('{model}').get('{id}');
var data = {json};
ds.set(data);
""".format(model=model, id=id, json=json)
display.display_javascript(js, raw=True)
@validation.error(COLUMN_LENGTHS)
def _check_column_lengths(self):
lengths = set(len(x) for x in self.data.values())
if len(lengths) > 1:
return str(self)
class RemoteSource(DataSource):
data_url = String(help="""
The URL to the endpoint for the data.
""")
data = Dict(String, Any, help="""
Additional data to include directly in this data source object. The
columns provided here are merged with those from the Bokeh server.
""")
polling_interval = Int(help="""
polling interval for updating data source in milliseconds
""")
class AjaxDataSource(RemoteSource):
method = Enum('POST', 'GET', help="http method - GET or POST")
mode = Enum("replace", "append", help="""
Whether to append new data to existing data (up to ``max_size``),
or to replace existing data entirely.
""")
max_size = Int(help="""
Maximum size of the data array being kept after each pull requests.
Larger than that size, the data will be right shifted.
""")
if_modified = Bool(False, help="""
Whether to include an ``If-Modified-Since`` header in AJAX requests
to the server. If this header is supported by the server, then only
new data since the last request will be returned.
""")
class BlazeDataSource(RemoteSource):
#blaze parts
expr = Dict(String, Any(), help="""
blaze expression graph in json form
""")
namespace = Dict(String, Any(), help="""
namespace in json form for evaluating blaze expression graph
""")
local = Bool(help="""
Whether this data source is hosted by the bokeh server or not.
""")
def from_blaze(self, remote_blaze_obj, local=True):
from blaze.server import to_tree
# only one Client object, can hold many datasets
assert len(remote_blaze_obj._leaves()) == 1
leaf = remote_blaze_obj._leaves()[0]
blaze_client = leaf.data
json_expr = to_tree(remote_blaze_obj, {leaf : ':leaf'})
self.data_url = blaze_client.url + "/compute.json"
self.local = local
self.expr = json_expr
def to_blaze(self):
from blaze.server.client import Client
from blaze.server import from_tree
from blaze import Data
# hacky - blaze urls have `compute.json` in it, but we need to strip it off
# to feed it into the blaze client lib
c = Client(self.data_url.rsplit('compute.json', 1)[0])
d = Data(c)
return from_tree(self.expr, {':leaf' : d})
class ServerDataSource(BlazeDataSource):
""" A data source that referes to data located on a Bokeh server.
The data from the server is loaded on-demand by the client.
"""
# Paramters of data transformation operations
# The 'Any' is used to pass primtives around.
# TODO: (jc) Find/create a property type for 'any primitive/atomic value'
transform = Dict(String,Either(Instance(PlotObject), Any), help="""
Paramters of the data transformation operations.
The associated valuse is minimally a tag that says which downsample routine
to use. For some downsamplers, parameters are passed this way too.
""")
| bsd-3-clause |
IamJeffG/geopandas | geopandas/tools/util.py | 11 | 1552 | import pandas as pd
import geopandas as gpd
from shapely.geometry import (
Point,
LineString,
Polygon,
MultiPoint,
MultiLineString,
MultiPolygon
)
from shapely.geometry.base import BaseGeometry
_multi_type_map = {
'Point': MultiPoint,
'LineString': MultiLineString,
'Polygon': MultiPolygon
}
def collect(x, multi=False):
"""
Collect single part geometries into their Multi* counterpart
Parameters
----------
x : an iterable or Series of Shapely geometries, a GeoSeries, or
a single Shapely geometry
multi : boolean, default False
if True, force returned geometries to be Multi* even if they
only have one component.
"""
if isinstance(x, BaseGeometry):
x = [x]
elif isinstance(x, pd.Series):
x = list(x)
# We cannot create GeometryCollection here so all types
# must be the same. If there is more than one element,
# they cannot be Multi*, i.e., can't pass in combination of
# Point and MultiPoint... or even just MultiPoint
t = x[0].type
if not all(g.type == t for g in x):
raise ValueError('Geometry type must be homogenous')
if len(x) > 1 and t.startswith('Multi'):
raise ValueError(
'Cannot collect {0}. Must have single geometries'.format(t))
if len(x) == 1 and (t.startswith('Multi') or not multi):
# If there's only one single part geom and we're not forcing to
# multi, then just return it
return x[0]
return _multi_type_map[t](x)
| bsd-3-clause |
nickgentoo/scikit-learn-graph | scripts/cross_validation_from_matrix_norm.py | 1 | 3317 | import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '',''))
import numpy as np
#from skgraph import datasets
from sklearn import svm
#from skgraph.ioskgraph import *
from math import sqrt
import sys
#"sys.path.append('..\\..\\Multiple Kernel Learning\\Framework')"
if len(sys.argv)<4:
sys.exit("python cross_validation_from_matrix_norm.py inputMatrix.libsvm C outfile")
c=float(sys.argv[2])
##TODO read from libsvm format
from sklearn.datasets import load_svmlight_file
km, target_array = load_svmlight_file(sys.argv[1])
#print km
#tolgo indice
kmgood=km[:,1:].todense()
gram=km[:,1:].todense()
for i in xrange(len(target_array)):
for j in xrange(0,len(target_array)):
#print i,j,kmgood[i,j],kmgood[i,i],kmgood[j,j]
if kmgood[i,i]*kmgood[j,j]==0:
print "WARNING: avoided divizion by zero"
gram[i,j]=0
else:
gram[i,j]=kmgood[i,j]/sqrt(kmgood[i,i]*kmgood[j,j])
#print gram
from sklearn import cross_validation
for rs in range(42,53):
f=open(str(sys.argv[3]+".seed"+str(rs)+".c"+str(c)),'w')
kf = cross_validation.StratifiedKFold(target_array, n_folds=10, shuffle=True,random_state=rs)
#print kf
#remove column zero because
#first entry of each line is the index
#gram=km[:,1:].todense()
f.write("Total examples "+str(len(gram))+"\n")
f.write("CV\t test_acc\n")
#print gram
# normalization
from math import sqrt
#for i in range(len(gram)):
# for j in range(len(gram)):
# gram[i,j]=gram[i,j]/sqrt(gram[i,i]+gram[j,j])
sc=[]
for train_index, test_index in kf:
#print("TRAIN:", train_index, "TEST:", test_index)
#generated train and test lists, incuding indices of the examples in training/test
#for the specific fold. Indices starts from 0 now
clf = svm.SVC(C=c, kernel='precomputed')
train_gram = [] #[[] for x in xrange(0,len(train))]
test_gram = []# [[] for x in xrange(0,len(test))]
#generate train matrix and test matrix
index=-1
for row in gram:
index+=1
if index in train_index:
train_gram.append([gram[index,i] for i in train_index])
else:
test_gram.append([gram[index,i] for i in train_index])
#print gram
X_train, X_test, y_train, y_test = np.array(train_gram), np.array(test_gram), target_array[train_index], target_array[test_index]
#COMPUTE INNERKFOLD
kf = cross_validation.StratifiedKFold(y_train, n_folds=10, shuffle=True,random_state=rs)
inner_scores= cross_validation.cross_val_score(
clf, X_train, y_train, cv=kf)
#print "inner scores", inner_scores
print "Inner Accuracy: %0.4f (+/- %0.4f)" % (inner_scores.mean(), inner_scores.std() / 2)
f.write(str(inner_scores.mean())+"\t")
clf.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
# predict on test examples
y_test_predicted=clf.predict(X_test)
sc.append(accuracy_score(y_test, y_test_predicted))
f.write(str(accuracy_score(y_test, y_test_predicted))+"\n")
f.close()
scores=np.array(sc)
print "Accuracy: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std() / 2)
| gpl-3.0 |
legacysurvey/obiwan | py/obiwan/common.py | 1 | 4465 | """
Commonly used functions
"""
import matplotlib
import matplotlib.pyplot as plt
import os
import pandas as pd
import fitsio
# Sphinx build would crash
# try:
from astrometry.util.fits import fits_table, merge_tables
# except ImportError:
# pass
def inJupyter():
return 'inline' in matplotlib.get_backend()
def save_png(outdir,fig_id, tight=True):
path= os.path.join(outdir,fig_id + ".png")
if not os.path.isdir(outdir):
os.makedirs(dirname)
print("Saving figure", path)
if tight:
plt.tight_layout()
plt.savefig(path, format='png', dpi=150)
#plt.savefig(path, format='png',box_extra_artists=[xlab,ylab],
# bbox_inches='tight',dpi=150)
if not inJupyter():
plt.close()
def dobash(cmd):
print('UNIX cmd: %s' % cmd)
if os.system(cmd): raise ValueError
def stack_tables(fn_list,textfile=True,
shuffle=None):
'''concatenates fits tables
Args:
shuffle: set to an integer to randomly reads up to the first "shuffle" cats only
'''
if shuffle:
assert( isinstance(shuffle, int))
if textfile:
fns=read_lines(fn_list)
else:
fns= fn_list
if len(fns) < 1: raise ValueError('Error: fns=',fns)
if shuffle:
print('shuffling %d' % shuffle)
seed=7
np.random.seed(seed)
inds= np.arange(len(fns))
np.random.shuffle(inds)
fns= fns[inds]
cats= []
for i,fn in enumerate(fns):
print('reading %s %d/%d' % (fn,i+1,len(fns)))
if shuffle and i >= shuffle:
print('shuffle_1000 turned ON, stopping read')
break
try:
tab= fits_table(fn)
cats.append( tab )
except IOError:
print('Fits file does not exist: %s' % fn)
return merge_tables(cats, columns='fillzero')
def writelist(lis,fn):
if os.path.exists(fn):
os.remove(fn)
with open(fn,'w') as foo:
for li in lis:
foo.write('%s\n' % li)
print('Wrote %s' % fn)
if len(lis) == 0:
print('Warning: %s is empty list' % fn)
def to_csv(d,fn='test.csv'):
df= pd.dataframe(d)
#df= df.round({})
df.to_csv(fn,index=False)
print('Wrote %s' % fn)
def fits2pandas(tab,attrs=None):
"""converts a fits_table into a pandas DataFrame
Args:
tab: fits_table()
attrs: attributes or column names want in the DF
"""
d={}
if attrs is None:
attrs= tab.get_columns()
for col in attrs:
d[col]= tab.get(col)
df= pd.DataFrame(d)
# Fix byte ordering from fits
# https://stackoverflow.com/questions/18599579/pulling-multiple-non-consecutive-index-values-from-a-pandas-dataframe
df= df.apply(lambda x: x.values.byteswap().newbyteorder())
return df
def get_brickdir(outdir,obj,brick):
return os.path.join(outdir,obj,brick[:3],brick)
def get_rsdir(rowstart,
do_skipids='no',do_more='no'):
"""Returns string like rs0 or skip_rs0
Args:
rowstart: 0, 300, etc
do_skipids,do_more: yes or no
"""
# Either rs or skip_rs
if do_skipids == 'no':
final_dir= "rs%s" % str(rowstart)
elif do_skipids == 'yes':
final_dir= "skip_rs%s" % str(rowstart)
# if specified minimum id, running more randoms
if do_more == 'yes':
final_dir= "more_"+final_dir
return final_dir
def get_outdir_runbrick(outdir,brick,rowstart,
do_skipids='no',do_more='no'):
"""diretory obiwan/runbrick will write results to
Returns path to like outdir/obj/bri/brick/rs0
"""
return os.path.join(outdir,brick[:3],brick,
get_rsdir(rowstart,
do_skipids=do_skipids,
do_more=do_more))
def get_brickinfo_hack(survey,brickname):
"""when in ipython and reading single row survey-bricks table,
astroometry.net's fits_table() can break, handle this case
Returns:
brickinfo: the single row fits_table
"""
try:
brickinfo = survey.get_brick_by_name(brickname)
except AttributeError:
# can happen inside: ipython %run
hdu=fitsio.FITS(survey.find_file('bricks'))
data= hdu[1].read()
data= data[data['brickname'] == brickname][0]
brickinfo= fits_table()
for col in data.dtype.fields.keys():
brickinfo.set(col,data[col])
return brickinfo
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/qtconsole/rich_jupyter_widget.py | 7 | 17139 | # Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from base64 import decodestring
import os
import re
from warnings import warn
from qtconsole.qt import QtCore, QtGui
from ipython_genutils.path import ensure_dir_exists
from traitlets import Bool
from qtconsole.svg import save_svg, svg_to_clipboard, svg_to_image
from .jupyter_widget import JupyterWidget
try:
from IPython.lib.latextools import latex_to_png
except ImportError:
latex_to_png = None
class LatexError(Exception):
"""Exception for Latex errors"""
class RichIPythonWidget(JupyterWidget):
"""Dummy class for config inheritance. Destroyed below."""
class RichJupyterWidget(RichIPythonWidget):
""" An JupyterWidget that supports rich text, including lists, images, and
tables. Note that raw performance will be reduced compared to the plain
text version.
"""
# RichJupyterWidget protected class variables.
_payload_source_plot = 'ipykernel.pylab.backend_payload.add_plot_payload'
_jpg_supported = Bool(False)
# Used to determine whether a given html export attempt has already
# displayed a warning about being unable to convert a png to svg.
_svg_warning_displayed = False
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
def __init__(self, *args, **kw):
""" Create a RichJupyterWidget.
"""
kw['kind'] = 'rich'
super(RichJupyterWidget, self).__init__(*args, **kw)
# Configure the ConsoleWidget HTML exporter for our formats.
self._html_exporter.image_tag = self._get_image_tag
# Dictionary for resolving document resource names to SVG data.
self._name_to_svg_map = {}
# Do we support jpg ?
# it seems that sometime jpg support is a plugin of QT, so try to assume
# it is not always supported.
_supported_format = map(str, QtGui.QImageReader.supportedImageFormats())
self._jpg_supported = 'jpeg' in _supported_format
#---------------------------------------------------------------------------
# 'ConsoleWidget' public interface overides
#---------------------------------------------------------------------------
def export_html(self):
""" Shows a dialog to export HTML/XML in various formats.
Overridden in order to reset the _svg_warning_displayed flag prior
to the export running.
"""
self._svg_warning_displayed = False
super(RichJupyterWidget, self).export_html()
#---------------------------------------------------------------------------
# 'ConsoleWidget' protected interface
#---------------------------------------------------------------------------
def _context_menu_make(self, pos):
""" Reimplemented to return a custom context menu for images.
"""
format = self._control.cursorForPosition(pos).charFormat()
name = format.stringProperty(QtGui.QTextFormat.ImageName)
if name:
menu = QtGui.QMenu()
menu.addAction('Copy Image', lambda: self._copy_image(name))
menu.addAction('Save Image As...', lambda: self._save_image(name))
menu.addSeparator()
svg = self._name_to_svg_map.get(name, None)
if svg is not None:
menu.addSeparator()
menu.addAction('Copy SVG', lambda: svg_to_clipboard(svg))
menu.addAction('Save SVG As...',
lambda: save_svg(svg, self._control))
else:
menu = super(RichJupyterWidget, self)._context_menu_make(pos)
return menu
#---------------------------------------------------------------------------
# 'BaseFrontendMixin' abstract interface
#---------------------------------------------------------------------------
def _pre_image_append(self, msg, prompt_number):
"""Append the Out[] prompt and make the output nicer
Shared code for some the following if statement
"""
self._append_plain_text(self.output_sep, True)
self._append_html(self._make_out_prompt(prompt_number), True)
self._append_plain_text('\n', True)
def _handle_execute_result(self, msg):
"""Overridden to handle rich data types, like SVG."""
self.log.debug("execute_result: %s", msg.get('content', ''))
if self.include_output(msg):
self.flush_clearoutput()
content = msg['content']
prompt_number = content.get('execution_count', 0)
data = content['data']
metadata = msg['content']['metadata']
if 'image/svg+xml' in data:
self._pre_image_append(msg, prompt_number)
self._append_svg(data['image/svg+xml'], True)
self._append_html(self.output_sep2, True)
elif 'image/png' in data:
self._pre_image_append(msg, prompt_number)
png = decodestring(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png', None))
self._append_html(self.output_sep2, True)
elif 'image/jpeg' in data and self._jpg_supported:
self._pre_image_append(msg, prompt_number)
jpg = decodestring(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg', None))
self._append_html(self.output_sep2, True)
elif 'text/latex' in data:
self._pre_image_append(msg, prompt_number)
try:
self._append_latex(data['text/latex'], True)
except LatexError:
return super(RichJupyterWidget, self)._handle_display_data(msg)
self._append_html(self.output_sep2, True)
else:
# Default back to the plain text representation.
return super(RichJupyterWidget, self)._handle_execute_result(msg)
def _handle_display_data(self, msg):
"""Overridden to handle rich data types, like SVG."""
self.log.debug("display_data: %s", msg.get('content', ''))
if self.include_output(msg):
self.flush_clearoutput()
data = msg['content']['data']
metadata = msg['content']['metadata']
# Try to use the svg or html representations.
# FIXME: Is this the right ordering of things to try?
self.log.debug("display: %s", msg.get('content', ''))
if 'image/svg+xml' in data:
svg = data['image/svg+xml']
self._append_svg(svg, True)
elif 'image/png' in data:
# PNG data is base64 encoded as it passes over the network
# in a JSON structure so we decode it.
png = decodestring(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png', None))
elif 'image/jpeg' in data and self._jpg_supported:
jpg = decodestring(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg', None))
elif 'text/latex' in data and latex_to_png:
try:
self._append_latex(data['text/latex'], True)
except LatexError:
return super(RichJupyterWidget, self)._handle_display_data(msg)
else:
# Default back to the plain text representation.
return super(RichJupyterWidget, self)._handle_display_data(msg)
#---------------------------------------------------------------------------
# 'RichJupyterWidget' protected interface
#---------------------------------------------------------------------------
def _is_latex_math(self, latex):
"""
Determine if a Latex string is in math mode
This is the only mode supported by qtconsole
"""
basic_envs = ['math', 'displaymath']
starable_envs = ['equation', 'eqnarray' 'multline', 'gather', 'align',
'flalign', 'alignat']
star_envs = [env + '*' for env in starable_envs]
envs = basic_envs + starable_envs + star_envs
env_syntax = [r'\begin{{{0}}} \end{{{0}}}'.format(env).split() for env in envs]
math_syntax = [
(r'\[', r'\]'), (r'\(', r'\)'),
('$$', '$$'), ('$', '$'),
]
for start, end in math_syntax + env_syntax:
inner = latex[len(start):-len(end)]
if start in inner or end in inner:
return False
if latex.startswith(start) and latex.endswith(end):
return True
return False
def _append_latex(self, latex, before_prompt=False, metadata=None):
""" Append latex data to the widget."""
png = None
if self._is_latex_math(latex):
png = latex_to_png(latex, wrap=False, backend='dvipng')
if png is None and latex.startswith('$') and latex.endswith('$'):
# matplotlib only supports strings enclosed in dollar signs
png = latex_to_png(latex, wrap=False, backend='matplotlib')
if png:
self._append_png(png, before_prompt, metadata)
else:
raise LatexError
def _append_jpg(self, jpg, before_prompt=False, metadata=None):
""" Append raw JPG data to the widget."""
self._append_custom(self._insert_jpg, jpg, before_prompt, metadata=metadata)
def _append_png(self, png, before_prompt=False, metadata=None):
""" Append raw PNG data to the widget.
"""
self._append_custom(self._insert_png, png, before_prompt, metadata=metadata)
def _append_svg(self, svg, before_prompt=False):
""" Append raw SVG data to the widget.
"""
self._append_custom(self._insert_svg, svg, before_prompt)
def _add_image(self, image):
""" Adds the specified QImage to the document and returns a
QTextImageFormat that references it.
"""
document = self._control.document()
name = str(image.cacheKey())
document.addResource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name), image)
format = QtGui.QTextImageFormat()
format.setName(name)
return format
def _copy_image(self, name):
""" Copies the ImageResource with 'name' to the clipboard.
"""
image = self._get_image(name)
QtGui.QApplication.clipboard().setImage(image)
def _get_image(self, name):
""" Returns the QImage stored as the ImageResource with 'name'.
"""
document = self._control.document()
image = document.resource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name))
return image
def _get_image_tag(self, match, path = None, format = "png"):
""" Return (X)HTML mark-up for the image-tag given by match.
Parameters
----------
match : re.SRE_Match
A match to an HTML image tag as exported by Qt, with
match.group("Name") containing the matched image ID.
path : string|None, optional [default None]
If not None, specifies a path to which supporting files may be
written (e.g., for linked images). If None, all images are to be
included inline.
format : "png"|"svg"|"jpg", optional [default "png"]
Format for returned or referenced images.
"""
if format in ("png","jpg"):
try:
image = self._get_image(match.group("name"))
except KeyError:
return "<b>Couldn't find image %s</b>" % match.group("name")
if path is not None:
ensure_dir_exists(path)
relpath = os.path.basename(path)
if image.save("%s/qt_img%s.%s" % (path, match.group("name"), format),
"PNG"):
return '<img src="%s/qt_img%s.%s">' % (relpath,
match.group("name"),format)
else:
return "<b>Couldn't save image!</b>"
else:
ba = QtCore.QByteArray()
buffer_ = QtCore.QBuffer(ba)
buffer_.open(QtCore.QIODevice.WriteOnly)
image.save(buffer_, format.upper())
buffer_.close()
return '<img src="data:image/%s;base64,\n%s\n" />' % (
format,re.sub(r'(.{60})',r'\1\n',str(ba.toBase64())))
elif format == "svg":
try:
svg = str(self._name_to_svg_map[match.group("name")])
except KeyError:
if not self._svg_warning_displayed:
QtGui.QMessageBox.warning(self, 'Error converting PNG to SVG.',
'Cannot convert PNG images to SVG, export with PNG figures instead. '
'If you want to export matplotlib figures as SVG, add '
'to your ipython config:\n\n'
'\tc.InlineBackend.figure_format = \'svg\'\n\n'
'And regenerate the figures.',
QtGui.QMessageBox.Ok)
self._svg_warning_displayed = True
return ("<b>Cannot convert PNG images to SVG.</b> "
"You must export this session with PNG images. "
"If you want to export matplotlib figures as SVG, add to your config "
"<span>c.InlineBackend.figure_format = 'svg'</span> "
"and regenerate the figures.")
# Not currently checking path, because it's tricky to find a
# cross-browser way to embed external SVG images (e.g., via
# object or embed tags).
# Chop stand-alone header from matplotlib SVG
offset = svg.find("<svg")
assert(offset > -1)
return svg[offset:]
else:
return '<b>Unrecognized image format</b>'
def _insert_jpg(self, cursor, jpg, metadata=None):
""" Insert raw PNG data into the widget."""
self._insert_img(cursor, jpg, 'jpg', metadata=metadata)
def _insert_png(self, cursor, png, metadata=None):
""" Insert raw PNG data into the widget.
"""
self._insert_img(cursor, png, 'png', metadata=metadata)
def _insert_img(self, cursor, img, fmt, metadata=None):
""" insert a raw image, jpg or png """
if metadata:
width = metadata.get('width', None)
height = metadata.get('height', None)
else:
width = height = None
try:
image = QtGui.QImage()
image.loadFromData(img, fmt.upper())
if width and height:
image = image.scaled(width, height, transformMode=QtCore.Qt.SmoothTransformation)
elif width and not height:
image = image.scaledToWidth(width, transformMode=QtCore.Qt.SmoothTransformation)
elif height and not width:
image = image.scaledToHeight(height, transformMode=QtCore.Qt.SmoothTransformation)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid %s data.'%fmt)
else:
format = self._add_image(image)
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _insert_svg(self, cursor, svg):
""" Insert raw SVG data into the widet.
"""
try:
image = svg_to_image(svg)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid SVG data.')
else:
format = self._add_image(image)
self._name_to_svg_map[format.name()] = svg
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _save_image(self, name, format='PNG'):
""" Shows a save dialog for the ImageResource with 'name'.
"""
dialog = QtGui.QFileDialog(self._control, 'Save Image')
dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
dialog.setDefaultSuffix(format.lower())
dialog.setNameFilter('%s file (*.%s)' % (format, format.lower()))
if dialog.exec_():
filename = dialog.selectedFiles()[0]
image = self._get_image(name)
image.save(filename, format)
# clobber RichIPythonWidget above:
class RichIPythonWidget(RichJupyterWidget):
"""Deprecated class. Use RichJupyterWidget"""
def __init__(self, *a, **kw):
warn("RichIPythonWidget is deprecated, use RichJupyterWidget")
super(RichIPythonWidget, self).__init__(*a, **kw)
| gpl-3.0 |
adammenges/statsmodels | statsmodels/tsa/filters/filtertools.py | 25 | 12438 | # -*- coding: utf-8 -*-
"""Linear Filters for time series analysis and testing
TODO:
* check common sequence in signature of filter functions (ar,ma,x) or (x,ar,ma)
Created on Sat Oct 23 17:18:03 2010
Author: Josef-pktd
"""
#not original copied from various experimental scripts
#version control history is there
from statsmodels.compat.python import range
import numpy as np
import scipy.fftpack as fft
from scipy import signal
from scipy.signal.signaltools import _centered as trim_centered
from ._utils import _maybe_get_pandas_wrapper
def _pad_nans(x, head=None, tail=None):
if np.ndim(x) == 1:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[np.nan] * head, x, [np.nan] * tail]
elif tail is None:
return np.r_[[np.nan] * head, x]
elif head is None:
return np.r_[x, [np.nan] * tail]
elif np.ndim(x) == 2:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[[np.nan] * x.shape[1]] * head, x,
[[np.nan] * x.shape[1]] * tail]
elif tail is None:
return np.r_[[[np.nan] * x.shape[1]] * head, x]
elif head is None:
return np.r_[x, [[np.nan] * x.shape[1]] * tail]
else:
raise ValueError("Nan-padding for ndim > 2 not implemented")
#original changes and examples in sandbox.tsa.try_var_convolve
# don't do these imports, here just for copied fftconvolve
#get rid of these imports
#from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
# ifftn, fftfreq
#from numpy import product,array
def fftconvolveinv(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
copied from scipy.signal.signaltools, but here used to try out inverse filter
doesn't work or I can't get it to work
2010-10-23:
looks ok to me for 1d,
from results below with padded data array (fftp)
but it doesn't work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
IN1 = fft.fftn(in1,fsize)
#IN1 *= fftn(in2,fsize) #JP: this looks like the only change I made
IN1 /= fft.fftn(in2,fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO doesn't seem to work for VARMA
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#code duplication with fftconvolveinv
def fftconvolve3(in1, in2=None, in3=None, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
for use with arma (old version: in1=num in2=den in3=data
* better for consistency with other functions in1=data in2=num in3=den
* note in2 and in3 need to have consistent dimension/shape
since I'm using max of in2, in3 shapes and not the sum
copied from scipy.signal.signaltools, but here used to try out inverse
filter doesn't work or I can't get it to work
2010-10-23
looks ok to me for 1d,
from results below with padded data array (fftp)
but it doesn't work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
if (in2 is None) and (in3 is None):
raise ValueError('at least one of in2 and in3 needs to be given')
s1 = np.array(in1.shape)
if not in2 is None:
s2 = np.array(in2.shape)
else:
s2 = 0
if not in3 is None:
s3 = np.array(in3.shape)
s2 = max(s2, s3) # try this looks reasonable for ARMA
#s2 = s3
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
#convolve shorter ones first, not sure if it matters
if not in2 is None:
IN1 = fft.fftn(in2, fsize)
if not in3 is None:
IN1 /= fft.fftn(in3, fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO doesn't seem to work for VARMA
IN1 *= fft.fftn(in1, fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#original changes and examples in sandbox.tsa.try_var_convolve
#examples and tests are there
def recursive_filter(x, ar_coeff, init=None):
'''
Autoregressive, or recursive, filtering.
Parameters
----------
x : array-like
Time-series data. Should be 1d or n x 1.
ar_coeff : array-like
AR coefficients in reverse time order. See Notes
init : array-like
Initial values of the time-series prior to the first value of y.
The default is zero.
Returns
-------
y : array
Filtered array, number of columns determined by x and ar_coeff. If a
pandas object is given, a pandas object is returned.
Notes
-----
Computes the recursive filter ::
y[n] = ar_coeff[0] * y[n-1] + ...
+ ar_coeff[n_coeff - 1] * y[n - n_coeff] + x[n]
where n_coeff = len(n_coeff).
'''
_pandas_wrapper = _maybe_get_pandas_wrapper(x)
x = np.asarray(x).squeeze()
ar_coeff = np.asarray(ar_coeff).squeeze()
if x.ndim > 1 or ar_coeff.ndim > 1:
raise ValueError('x and ar_coeff have to be 1d')
if init is not None: # integer init are treated differently in lfiltic
if len(init) != len(ar_coeff):
raise ValueError("ar_coeff must be the same length as init")
init = np.asarray(init, dtype=float)
if init is not None:
zi = signal.lfiltic([1], np.r_[1, -ar_coeff], init, x)
else:
zi = None
y = signal.lfilter([1.], np.r_[1, -ar_coeff], x, zi=zi)
if init is not None:
result = y[0]
else:
result = y
if _pandas_wrapper:
return _pandas_wrapper(result)
return result
def convolution_filter(x, filt, nsides=2):
'''
Linear filtering via convolution. Centered and backward displaced moving
weighted average.
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
filt : array_like
Linear filter coefficients in reverse time-order. Should have the
same number of dimensions as x though if 1d and ``x`` is 2d will be
coerced to 2d.
nsides : int, optional
If 2, a centered moving average is computed using the filter
coefficients. If 1, the filter coefficients are for past values only.
Both methods use scipy.signal.convolve.
Returns
-------
y : ndarray, 2d
Filtered array, number of columns determined by x and filt. If a
pandas object is given, a pandas object is returned. The index of
the return is the exact same as the time period in ``x``
Notes
-----
In nsides == 1, x is filtered ::
y[n] = filt[0]*x[n-1] + ... + filt[n_filt-1]*x[n-n_filt]
where n_filt is len(filt).
If nsides == 2, x is filtered around lag 0 ::
y[n] = filt[0]*x[n - n_filt/2] + ... + filt[n_filt / 2] * x[n]
+ ... + x[n + n_filt/2]
where n_filt is len(filt). If n_filt is even, then more of the filter
is forward in time than backward.
If filt is 1d or (nlags,1) one lag polynomial is applied to all
variables (columns of x). If filt is 2d, (nlags, nvars) each series is
independently filtered with its own lag polynomial, uses loop over nvar.
This is different than the usual 2d vs 2d convolution.
Filtering is done with scipy.signal.convolve, so it will be reasonably
fast for medium sized data. For large data fft convolution would be
faster.
'''
# for nsides shift the index instead of using 0 for 0 lag this
# allows correct handling of NaNs
if nsides == 1:
trim_head = len(filt) - 1
trim_tail = None
elif nsides == 2:
trim_head = int(np.ceil(len(filt)/2.) - 1) or None
trim_tail = int(np.ceil(len(filt)/2.) - len(filt) % 2) or None
else: # pragma : no cover
raise ValueError("nsides must be 1 or 2")
_pandas_wrapper = _maybe_get_pandas_wrapper(x)
x = np.asarray(x)
filt = np.asarray(filt)
if x.ndim > 1 and filt.ndim == 1:
filt = filt[:, None]
if x.ndim > 2:
raise ValueError('x array has to be 1d or 2d')
if filt.ndim == 1 or min(filt.shape) == 1:
result = signal.convolve(x, filt, mode='valid')
elif filt.ndim == 2:
nlags = filt.shape[0]
nvar = x.shape[1]
result = np.zeros((x.shape[0] - nlags + 1, nvar))
if nsides == 2:
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:, i] = signal.convolve(x[:, i], filt[:, i],
mode='valid')
elif nsides == 1:
for i in range(nvar):
result[:, i] = signal.convolve(x[:, i], np.r_[0, filt[:, i]],
mode='valid')
result = _pad_nans(result, trim_head, trim_tail)
if _pandas_wrapper:
return _pandas_wrapper(result)
return result
#copied from sandbox.tsa.garch
def miso_lfilter(ar, ma, x, useic=False): #[0.1,0.1]):
'''
use nd convolution to merge inputs,
then use lfilter to produce output
arguments for column variables
return currently 1d
Parameters
----------
ar : array_like, 1d, float
autoregressive lag polynomial including lag zero, ar(L)y_t
ma : array_like, same ndim as x, currently 2d
moving average lag polynomial ma(L)x_t
x : array_like, 2d
input data series, time in rows, variables in columns
Returns
-------
y : array, 1d
filtered output series
inp : array, 1d
combined input series
Notes
-----
currently for 2d inputs only, no choice of axis
Use of signal.lfilter requires that ar lag polynomial contains
floating point numbers
does not cut off invalid starting and final values
miso_lfilter find array y such that::
ar(L)y_t = ma(L)x_t
with shapes y (nobs,), x (nobs,nvars), ar (narlags,), ma (narlags,nvars)
'''
ma = np.asarray(ma)
ar = np.asarray(ar)
#inp = signal.convolve(x, ma, mode='valid')
#inp = signal.convolve(x, ma)[:, (x.shape[1]+1)//2]
#Note: convolve mixes up the variable left-right flip
#I only want the flip in time direction
#this might also be a mistake or problem in other code where I
#switched from correlate to convolve
# correct convolve version, for use with fftconvolve in other cases
#inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
inp = signal.correlate(x, ma[::-1,:])[:, (x.shape[1]+1)//2]
#for testing 2d equivalence between convolve and correlate
#np.testing.assert_almost_equal(inp2, inp)
nobs = x.shape[0]
# cut of extra values at end
#todo initialize also x for correlate
if useic:
return signal.lfilter([1], ar, inp,
#zi=signal.lfilter_ic(np.array([1.,0.]),ar, ic))[0][:nobs], inp[:nobs]
zi=signal.lfiltic(np.array([1.,0.]),ar, useic))[0][:nobs], inp[:nobs]
else:
return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs]
#return signal.lfilter([1], ar, inp), inp
| bsd-3-clause |
lo-co/atm-py | atmPy/aerosols/size_distr/sizedistribution.py | 6 | 80435 | import datetime
import warnings
from copy import deepcopy
import numpy as np
import pandas as pd
import pylab as plt
import scipy.optimize as optimization
from matplotlib.colors import LogNorm
from scipy import integrate
from scipy import stats
from atmPy.atmos import vertical_profile, timeseries
from atmPy.aerosols import hygroscopic_growth as hg
from atmPy.for_removal.mie import bhmie
from atmPy.tools import pandas_tools
from atmPy.tools import plt_tools, math_functions, array_tools
# Todo: rotate the plots of the layerseries (e.g. plot_particle_concentration) to have the altitude as the y-axes
# TODO: Fix distrTypes so they are consistent with our understanding.
distTypes = {'log normal': ['dNdlogDp', 'dSdlogDp', 'dVdlogDp'],
'natural': ['dNdDp', 'dSdDp', 'dVdDp'],
'number': ['dNdlogDp', 'dNdDp'],
'surface': ['dSdlogDp', 'dSdDp'],
'volume': ['dVdlogDp', 'dVdDp']}
axes_types = ('AxesSubplot', 'AxesHostAxes')
def fit_normal_dist(x, y, log=True, p0=[10, 180, 0.2]):
"""Fits a normal distribution to a """
param = p0[:]
x = x[~ np.isnan(y)]
y = y[~ np.isnan(y)]
if log:
x = np.log10(x)
param[1] = np.log10(param[1])
# todo: write a bug report for the fact that I have to call the y.max() function to make the fit to work!!!!!
y.max()
############
para = optimization.curve_fit(math_functions.gauss, x, y, p0=param)
amp = para[0][0]
sigma = para[0][2]
if log:
pos = 10 ** para[0][1]
sigma_high = 10 ** (para[0][1] + para[0][2])
sigma_low = 10 ** (para[0][1] - para[0][2])
else:
pos = para[0][1]
sigma_high = (para[0][1] + para[0][2])
sigma_low = (para[0][1] - para[0][2])
return [amp, pos, sigma, sigma_high, sigma_low]
def read_csv(fname, fixGaps=True):
headerNo = 50
rein = open(fname, 'r')
nol = ['distributionType', 'objectType']
outDict = {}
for i in range(headerNo):
split = rein.readline().split('=')
variable = split[0].strip()
if split[0][0] == '#':
break
value = split[1].strip()
if variable in nol:
outDict[variable] = value
else:
outDict[variable] = np.array(eval(value))
if i == headerNo - 1:
raise TypeError('Sure this is a size distribution?')
rein.close()
data = pd.read_csv(fname, header=i + 1, index_col=0)
data.index = pd.to_datetime(data.index)
if outDict['objectType'] == 'SizeDist_TS':
distRein = SizeDist_TS(data, outDict['bins'], outDict['distributionType'], fixGaps=fixGaps)
elif outDict['objectType'] == 'SizeDist':
distRein = SizeDist(data, outDict['bins'], outDict['distributionType'], fixGaps=fixGaps)
elif outDict['objectType'] == 'SizeDist_LS':
distRein = SizeDist_LS(data, outDict['bins'], outDict['distributionType'], fixGaps=fixGaps)
else:
raise TypeError('not a valid object type')
return distRein
def read_hdf(f_name, keep_open = False, populate_namespace = False):
hdf = pd.HDFStore(f_name)
content = hdf.keys()
out = []
for i in content:
# print(i)
storer = hdf.get_storer(i)
attrs = storer.attrs.atmPy_attrs
if not attrs:
continue
elif attrs['type'].__name__ == 'SizeDist_TS':
dist_new = SizeDist_TS(hdf[i], attrs['bins'], attrs['distributionType'])
elif attrs['type'].__name__ == 'SizeDist':
dist_new = SizeDist(hdf[i], attrs['bins'], attrs['distributionType'])
elif attrs['type'].__name__ == 'SizeDist_LS':
dist_new = SizeDist_LS(hdf[i], attrs['bins'], attrs['distributionType'], attrs['layerbounderies'])
else:
txt = 'Unknown data type: %s'%attrs['type'].__name__
raise TypeError(txt)
fit_res = i+'/data_fit_normal'
if fit_res in content:
dist_new.data_fit_normal = hdf[fit_res]
if populate_namespace:
if attrs['variable_name']:
populate_namespace[attrs['variable_name']] = dist_new
out.append(dist_new)
if keep_open:
return hdf,out
else:
hdf.close()
return out
def get_label(distType):
""" Return the appropriate label for a particular distribution type
"""
if distType == 'dNdDp':
label = '$\mathrm{d}N\,/\,\mathrm{d}D_{P}$ (nm$^{-1}\,$cm$^{-3}$)'
elif distType == 'dNdlogDp':
label = '$\mathrm{d}N\,/\,\mathrm{d}log(D_{P})$ (cm$^{-3}$)'
elif distType == 'dSdDp':
label = '$\mathrm{d}S\,/\,\mathrm{d}D_{P}$ (nm$\,$cm$^{-3}$)'
elif distType == 'dSdlogDp':
label = '$\mathrm{d}S\,/\,\mathrm{d}log(D_{P})$ (nm$^2\,$cm$^{-3}$)'
elif distType == 'dVdDp':
label = '$\mathrm{d}V\,/\,\mathrm{d}D_{P}$ (nm$^2\,$cm$^{-3}$)'
elif distType == 'dVdlogDp':
label = '$\mathrm{d}V\,/\,\mathrm{d}log(D_{P})$ (nm$^3\,$cm$^{-3}$)'
elif distType == 'calibration':
label = '$\mathrm{d}N\,/\,\mathrm{d}Amp$ (bin$^{-1}\,$cm$^{-3}$)'
elif distType == 'numberConcentration':
label = 'Particle number in bin'
else:
raise ValueError('%s is not really an option!?!' % distType)
return label
# Todo: Docstring is wrong
# Todo: implement into the Layer Series
def _calculate_optical_properties(sd, wavelength, n, aod=False, noOfAngles=100):
"""
!!!Tis Docstring need fixn
Calculates the extinction crossection, AOD, phase function, and asymmetry Parameter for each layer.
plotting the layer and diameter dependent extinction coefficient gives you an idea what dominates the overall AOD.
Parameters
----------
wavelength: float.
wavelength of the scattered light, unit: nm
n: float.
Index of refraction of the scattering particles
noOfAngles: int, optional.
Number of scattering angles to be calculated. This mostly effects calculations which depend on the phase
function.
Returns
-------
OpticalProperty instance
"""
out = {}
out['n'] = n
out['wavelength'] = wavelength
sdls = sd.convert2numberconcentration()
index = sdls.data.index
if isinstance(n, pd.DataFrame):
n_multi = True
else:
n_multi = False
if not n_multi:
mie, angular_scatt_func = _perform_Miecalculations(np.array(sdls.bincenters / 1000.), wavelength / 1000., n,
noOfAngles=noOfAngles)
if aod:
AOD_layer = np.zeros((len(sdls.layercenters)))
extCoeffPerLayer = np.zeros((len(sdls.data.index.values), len(sdls.bincenters)))
angular_scatt_func_effective = pd.DataFrame()
asymmetry_parameter_LS = np.zeros((len(sdls.data.index.values)))
# print('\n oben mie.extinction_crossection: %s \n'%(mie.extinction_crossection))
for i, lc in enumerate(sdls.data.index.values):
laydata = sdls.data.iloc[i].values
# print('laydata: ',laydata.shape)
# print(laydata)
if n_multi:
mie, angular_scatt_func = _perform_Miecalculations(np.array(sdls.bincenters / 1000.), wavelength / 1000., n.iloc[i].values[0],
noOfAngles=noOfAngles)
extinction_coefficient = _get_coefficients(mie.extinction_crossection, laydata)
# print('\n oben ext_coef %s \n'%extinction_coefficient)
# print('mie.extinction_crossection ', mie.extinction_crossection.shape)
# print('extinction_coefficient: ', extinction_coefficient.shape)
# scattering_coefficient = _get_coefficients(mie.scattering_crossection, laydata)
if aod:
layerThickness = sdls.layerbounderies[i][1] - sdls.layerbounderies[i][0]
AOD_perBin = extinction_coefficient * layerThickness
AOD_layer[i] = AOD_perBin.values.sum()
extCoeffPerLayer[i] = extinction_coefficient
# return laydata, mie.scattering_crossection
scattering_cross_eff = laydata * mie.scattering_crossection
pfe = (laydata * angular_scatt_func).sum(axis=1) # sum of all angular_scattering_intensities
# pfe2 = pfe.copy()
# angular_scatt_func_effective[lc] = pfe
# asymmetry_parameter_LS[i] = (pfe.values*np.cos(pfe.index.values)).sum()/pfe.values.sum()
x_2p = pfe.index.values
y_2p = pfe.values
# limit to [0,pi]
y_1p = y_2p[x_2p < np.pi]
x_1p = x_2p[x_2p < np.pi]
# integ = integrate.simps(y_1p*np.sin(x_1p),x_1p)
# y_phase_func = y_1p/integ
y_phase_func = y_1p * 4 * np.pi / scattering_cross_eff.sum()
asymmetry_parameter_LS[i] = .5 * integrate.simps(np.cos(x_1p) * y_phase_func * np.sin(x_1p), x_1p)
# return mie,phase_fct, laydata, scattering_cross_eff, phase_fct_effective[lc], y_phase_func, asymmetry_parameter_LS[i]
angular_scatt_func_effective[
lc] = pfe * 1e-12 * 1e6 # equivalent to extCoeffPerLayer # similar to _get_coefficients (converts everthing to meter)
# return mie.extinction_crossection, angular_scatt_func, laydata, layerThickness # correct integrales match
# return extinction_coefficient, angular_scatt_func_effective
# return AOD_layer, pfe, angular_scatt_func_effective[lc]
# print(mie.extinction_crossection)
if aod:
out['AOD'] = AOD_layer[~ np.isnan(AOD_layer)].sum()
out['AOD_layer'] = pd.DataFrame(AOD_layer, index=sdls.layercenters, columns=['AOD per Layer'])
out['AOD_cum'] = out['AOD_layer'].iloc[::-1].cumsum().iloc[::-1]
extCoeff_perrow_perbin = pd.DataFrame(extCoeffPerLayer, index=index, columns=sdls.data.columns)
out['extCoeff_perrow_perbin'] = extCoeff_perrow_perbin
extCoeff_perrow = pd.DataFrame(extCoeff_perrow_perbin.sum(axis=1), columns=['ext_coeff'])
if index.dtype == '<M8[ns]':
out['extCoeff_perrow'] = timeseries.TimeSeries(extCoeff_perrow)
else:
out['extCoeff_perrow'] = extCoeff_perrow
out['asymmetry_param'] = pd.DataFrame(asymmetry_parameter_LS, index=index,
columns=['asymmetry_param'])
# out['asymmetry_param_alt'] = pd.DataFrame(asymmetry_parameter_LS_alt, index=sdls.layercenters, columns = ['asymmetry_param_alt'])
# out['OptPropInstance']= OpticalProperties(out, self.bins)
out['wavelength'] = wavelength
out['index_of_refraction'] = n
out['bin_centers'] = sdls.bincenters
out['angular_scatt_func'] = angular_scatt_func_effective
# opt_properties = OpticalProperties(out, self.bins)
# opt_properties.wavelength = wavelength
# opt_properties.index_of_refractio = n
# opt_properties.angular_scatt_func = angular_scatt_func_effective # This is the formaer phase_fct, but since it is the angular scattering intensity, i changed the name
# opt_properties.parent_dist_LS = self
return out
class SizeDist(object):
"""
Object defining a log normal aerosol size distribution
Arguments
----------
bincenters: NumPy array, optional
this is if you actually want to pass the bincenters, if False they will be calculated
distributionType:
log normal: 'dNdlogDp','dSdlogDp','dVdlogDp'
natural: 'dNdDp','dSdDp','dVdDp'
number: 'dNdlogDp', 'dNdDp', 'numberConcentration'
surface: 'dSdlogDp','dSdDp'
volume: 'dVdlogDp','dVdDp'
data: pandas dataFrame, optional
None, will generate an empty pandas data frame with columns defined by bins
- pandas dataFrame with
- column names (each name is something like this: '150-200')
- index is time (at some point this should be arbitrary, convertable to altitude for example?)
unit conventions:
- diameters: nanometers
- flowrates: cc (otherwise, axis label need to be adjusted an caution needs to be taken when dealing is AOD)
Notes
------
* Diameters are specified in nanometers
"""
# todo: write setters and getters for bins and bincenter, so when one is changed the otherone is automatically
# changed too
def __init__(self, data, bins, distrType,
# bincenters=False,
fixGaps=True):
if type(data).__name__ == 'NoneType':
self.data = pd.DataFrame()
else:
self.data = data
self.bins = bins
self.__index_of_refraction = None
self.__growth_factor = None
# if type(bincenters) == np.ndarray:
# self.bincenters = bincenters
# else:
# self.bincenters = (bins[1:] + bins[:-1]) / 2.
# self.binwidth = (bins[1:] - bins[:-1])
self.distributionType = distrType
if fixGaps:
self.fillGaps()
@property
def bins(self):
return self.__bins
@bins.setter
def bins(self,array):
bins_st = array.astype(int).astype(str)
col_names = []
for e,i in enumerate(bins_st):
if e == len(bins_st) - 1:
break
col_names.append(bins_st[e] + '-' + bins_st[e+1])
self.data.columns = col_names
self.__bins = array
self.__bincenters = (array[1:] + array[:-1]) / 2.
self.__binwidth = (array[1:] - array[:-1])
@property
def bincenters(self):
return self.__bincenters
@property
def binwidth(self):
return self.__binwidth
@property
def index_of_refraction(self):
return self.__index_of_refraction
@index_of_refraction.setter
def index_of_refraction(self,n):
# if not self.__index_of_refraction:
self.__index_of_refraction = n
# elif self.__index_of_refraction:
# txt = """Security stop. This is to prevent you from unintentionally changing this value.
# The index of refraction is already set to %.2f, either by you or by another function, e.g. apply_hygro_growth.
# If you really want to change the value do it by setting the __index_of_refraction attribute."""%self.index_of_refraction
# raise ValueError(txt)
@property
def growth_factor(self):
return self.__growth_factor
def apply_hygro_growth(self, kappa, RH, how = 'shift_bins'):
"""
how: string ['shift_bins', 'shift_data']
If the shift_bins the growth factor has to be the same for all lines in
data (important for timeseries and vertical profile.
If gf changes (as probably the case in TS and LS) you want to use
'shift_data'
"""
if not self.index_of_refraction:
txt = '''The index_of_refraction attribute of this sizedistribution has not been set yet, please do so first!'''
raise ValueError(txt)
# out_I = {}
dist_g = self.copy()
dist_g.convert2numberconcentration()
gf,n_mix = hg.kappa_simple(kappa, RH, n = dist_g.index_of_refraction)
# out_I['growth_factor'] = gf
nat = ['int', 'float']
if type(kappa).__name__ in nat or type(RH).__name__ in nat:
if how != 'shift_bins':
txt = "When kappa or RH ar not arrays 'how' has to be equal to 'shift_bins'"
raise ValueError(txt)
if how == 'shift_bins':
if not isinstance(gf, (float,int)):
txt = '''If how is equal to 'shift_bins' RH has to be of type int or float.
It is %s'''%(type(RH).__name__)
raise TypeError(txt)
dist_g.bins = dist_g.bins * gf
dist_g.__index_of_refraction = n_mix
elif how == 'shift_data':
test = dist_g._hygro_growht_shift_data(dist_g.data.values[0],dist_g.bins,gf.max())
bin_num = test['data'].shape[0]
data_new = np.zeros((dist_g.data.shape[0],bin_num))
for e,i in enumerate(dist_g.data.values):
out = dist_g._hygro_growht_shift_data(i,dist_g.bins,gf[e])
dt = out['data']
diff = bin_num - dt.shape[0]
dt = np.append(dt, np.zeros(diff))
data_new[e] = dt
df = pd.DataFrame(data_new)
df.index = dist_g.data.index
# return df
dist_g = SizeDist(df, test['bins'], dist_g.distributionType)
df = pd.DataFrame(n_mix, columns = ['index_of_refraction'])
df.index = dist_g.data.index
dist_g.index_of_refraction = df
else:
txt = '''How has to be either 'shift_bins' or 'shift_data'.'''
raise ValueError(txt)
dist_g.__growth_factor = pd.DataFrame(gf, index = dist_g.data.index, columns = ['Growth_factor'])
# out_I['size_distribution'] = dist_g
return dist_g
def _hygro_growht_shift_data(self, data, bins, gf):
"""data: 1D array
bins: 1D array
gf: float"""
bins = bins.copy()
if np.any(gf < 1):
txt = 'Growth factor must be equal or larger than 1. No shrinking!!'
raise ValueError(txt)
shifted = bins*gf
ml = array_tools.find_closest(bins, shifted, how='closest_low')
mh = array_tools.find_closest(bins, shifted, how='closest_high')
if np.any((mh - ml) > 1):
raise ValueError('shifted bins spans over more than two of the original bins, programming required ;-)')
no_extra_bins = bins[ml].shape[0] - np.unique(bins[ml]).shape[0] + 1
######### Ad bins to shift data into
last_two = np.log10(bins[- (no_extra_bins + 1):])
step_width = last_two[-1] - last_two[-2]
new_bins = np.zeros(no_extra_bins)
for i in range(no_extra_bins):
new_bins[i] = np.log10(bins[-1]) + ((i + 1) * step_width)
newbins = 10**new_bins
bins = np.append(bins,newbins)
shifted = (bins * gf)[:-no_extra_bins]
######## and again ########################
ml = array_tools.find_closest(bins, shifted, how='closest_low')
mh = array_tools.find_closest(bins, shifted, how='closest_high')
if np.any((mh - ml) > 1):
raise ValueError('shifted bins spans over more than two of the original bins, programming required ;-)')
##### percentage of particles moved to next bin ...')
shifted_w = shifted[1:] - shifted[:-1]
fract_first = (bins[mh] - shifted)[:-1]/shifted_w
fract_last = (shifted - bins[ml])[1:]/shifted_w
data_new = np.zeros(data.shape[0]+ no_extra_bins)
data_new[no_extra_bins - 1:-1] += fract_first * data
data_new[no_extra_bins:] += fract_last * data
# data = np.append(data, np.zeros(no_extra_bins))
out = {}
out['bins'] = bins
out['data'] = data_new
out['num_extr_bins'] = no_extra_bins
return out
# def grow_particles(self, shift=1):
# """This function shifts the data by "shift" columns to the right
# Argurments
# ----------
# shift: int.
# number of columns to shift.
#
# Returns
# -------
# New dist_LS instance
# Growth ratio (mean,std) """
#
# dist_grow = self.copy()
# gf = dist_grow.bincenters[shift:] / dist_grow.bincenters[:-shift]
# gf_mean = gf.mean()
# gf_std = gf.std()
#
# shape = dist_grow.data.shape[1]
# dist_grow.data[:] = 0
# dist_grow.data.iloc[:, shift:] = self.data.values[:, :shape - shift]
#
# return dist_grow, (gf_mean, gf_std)
def calculate_optical_properties(self, wavelength, n):
out = _calculate_optical_properties(self, wavelength, n)
return out
def fillGaps(self, scale=1.1):
"""
Finds gaps in dataset (e.g. when instrument was shut of) and fills them with zeros.
It adds one line of zeros to the beginning and one to the end of the gap.
Therefore the gap is visible as zeros instead of the interpolated values
Parameters
----------
scale: float, optional
This is a scale.
"""
diff = self.data.index[1:].values - self.data.index[0:-1].values
threshold = np.median(diff) * scale
where = np.where(diff > threshold)[0]
if len(where) != 0:
warnings.warn('The dataset provided had %s gaps' % len(where))
gap_start = self.data.index[where]
gap_end = self.data.index[where + 1]
for gap_s in gap_start:
self.data.loc[gap_s + threshold] = np.zeros(self.bincenters.shape)
for gap_e in gap_end:
self.data.loc[gap_e - threshold] = np.zeros(self.bincenters.shape)
self.data = self.data.sort_index()
return
def fit_normal(self, log=True, p0=[10, 180, 0.2]):
""" Fits a single normal distribution to each line in the data frame.
Returns
-------
pandas DataFrame instance (also added to namespace as data_fit_normal)
"""
sd = self.copy()
if sd.distributionType != 'dNdlogDp':
if sd.distributionType == 'calibration':
pass
else:
warnings.warn(
"Size distribution is not in 'dNdlogDp'. I temporarily converted the distribution to conduct the fitting. If that is not what you want, change the code!")
sd = sd.convert2dNdlogDp()
n_lines = sd.data.shape[0]
amp = np.zeros(n_lines)
pos = np.zeros(n_lines)
sigma = np.zeros(n_lines)
sigma_high = np.zeros(n_lines)
sigma_low = np.zeros(n_lines)
for e, lay in enumerate(sd.data.values):
try:
fit_res = fit_normal_dist(sd.bincenters, lay, log=log, p0=p0)
except (ValueError, RuntimeError):
fit_res = [np.nan, np.nan, np.nan, np.nan, np.nan]
amp[e] = fit_res[0]
pos[e] = fit_res[1]
sigma[e] = fit_res[2]
sigma_high[e] = fit_res[3]
sigma_low[e] = fit_res[4]
df = pd.DataFrame()
df['Amp'] = pd.Series(amp)
df['Pos'] = pd.Series(pos)
df['Sigma'] = pd.Series(sigma)
df['Sigma_high'] = pd.Series(sigma_high)
df['Sigma_low'] = pd.Series(sigma_low)
# df.index = self.layercenters
self.data_fit_normal = df
return self.data_fit_normal
def get_particle_concentration(self):
""" Returns the sum of particles per line in data
Returns
-------
int: if data has only one line
pandas.DataFrame: else """
sd = self.convert2numberconcentration()
particles = np.zeros(sd.data.shape[0])
for e, line in enumerate(sd.data.values):
particles[e] = line.sum()
if sd.data.shape[0] == 1:
return particles[0]
else:
df = pd.DataFrame(particles, index=sd.data.index, columns=['Count_rate'])
return df
def plot(self,
showMinorTickLabels=True,
removeTickLabels=["700", "900"],
fit_res=True,
fit_res_scale = 'log',
ax=None,
):
"""
Plots and returns f,a (figure, axis).
Arguments
---------
showMinorTickLabels: bool [True], optional
if minor tick labels are labled
removeTickLabels: list of string ["700", "900"], optional
list of tick labels aught to be removed (in case there are overlapping)
fit_res: bool [True], optional
allows plotting of fitresults if fit_normal was previously executed
fit_res: string
If fit_normal was done using log = False, you want to set this to linear!
ax: axis object [None], optional
option to provide axis to plot on
Returns
-------
Handles to the figure and axes of the figure.
"""
if type(ax).__name__ in axes_types:
a = ax
f = a.get_figure()
else:
f, a = plt.subplots()
g, = a.plot(self.bincenters, self.data.loc[0], color=plt_tools.color_cycle[0], linewidth=2, label='exp.')
g.set_drawstyle('steps-mid')
a.set_xlabel('Particle diameter (nm)')
label = get_label(self.distributionType)
a.set_ylabel(label)
a.set_xscale('log')
if fit_res:
if 'data_fit_normal' in dir(self):
amp, pos, sigma = self.data_fit_normal.values[0, :3]
if fit_res_scale == 'log':
normal_dist = math_functions.gauss(np.log10(self.bincenters), amp, np.log10(pos), sigma)
elif fit_res_scale =='linear':
normal_dist = math_functions.gauss(self.bincenters, amp, pos, sigma)
else:
txt = '"fit_res_scale has to be either log or linear'
raise ValueError(txt)
a.plot(self.bincenters, normal_dist, color=plt_tools.color_cycle[1], linewidth=2,
label='fit with norm. dist.')
a.legend()
return f, a
def convert2dNdDp(self):
return self._convert2otherDistribution('dNdDp')
def convert2dNdlogDp(self):
return self._convert2otherDistribution('dNdlogDp')
def convert2dSdDp(self):
return self._convert2otherDistribution('dSdDp')
def convert2dSdlogDp(self):
return self._convert2otherDistribution('dSdlogDp')
def convert2dVdDp(self):
return self._convert2otherDistribution('dVdDp')
def convert2dVdlogDp(self):
return self._convert2otherDistribution('dVdlogDp')
def convert2numberconcentration(self):
return self._convert2otherDistribution('numberConcentration')
def copy(self):
return deepcopy(self)
def save_csv(self, fname, header=True):
if header:
raus = open(fname, 'w')
raus.write('bins = %s\n' % self.bins.tolist())
raus.write('distributionType = %s\n' % self.distributionType)
raus.write('objectType = %s\n' % (type(self).__name__))
raus.write('#\n')
raus.close()
self.data.to_csv(fname, mode='a')
return
def save_hdf(self, hdf, variable_name = None, info = '', force = False):
if variable_name:
table_name = '/atmPy/aerosols/sizedistribution/'+variable_name
if table_name in hdf.keys():
if not force:
txt = 'Table name (variable_name) exists. If you want to overwrite it set force to True.'
raise KeyError(txt)
else:
e = 0
while 1:
table_name = '/atmPy/aerosols/sizedistribution/'+ type(self).__name__ + '_%.3i'%e
if table_name in hdf.keys():
e+=1
else:
break
hdf.put(table_name, self.data)
storer = hdf.get_storer(table_name)
attrs = {}
attrs['variable_name'] = variable_name
attrs['info'] = info
attrs['type'] = type(self)
attrs['bins'] = self.bins
attrs['index_of_refraction'] = self.index_of_refraction
attrs['distributionType'] = self.distributionType
if 'layerbounderies' in dir(self):
attrs['layerbounderies'] = self.layerbounderies
storer.attrs.atmPy_attrs = attrs
if 'data_fit_normal' in dir(self):
table_name = table_name + '/data_fit_normal'
hdf.put(table_name, self.data_fit_normal)
storer = hdf.get_storer(table_name)
storer.attrs.atmPy_attrs = None
return hdf
def zoom_diameter(self, start=None, end=None):
sd = self.copy()
if start:
startIdx = array_tools.find_closest(sd.bins, start)
else:
startIdx = 0
if end:
endIdx = array_tools.find_closest(sd.bins, end)
else:
endIdx = len(self.bincenters)
# size_distr.binwidth = self.binwidth[startIdx:endIdx]
sd.data = self.data.iloc[:, startIdx:endIdx]
sd.bins = self.bins[startIdx:endIdx + 1]
# size_distr.bincenters = self.bincenters[startIdx:endIdx]
return sd
def _normal2log(self):
trans = (self.bincenters * np.log(10.))
return trans
def _2Surface(self):
trans = 4. * np.pi * (self.bincenters / 2.) ** 2
return trans
def _2Volume(self):
trans = 4. / 3. * np.pi * (self.bincenters / 2.) ** 3
return trans
def _convert2otherDistribution(self, distType, verbose=False):
dist = self.copy()
if dist.distributionType == distType:
if verbose:
warnings.warn(
'Distribution type is already %s. Output is an unchanged copy of the distribution' % distType)
return dist
if dist.distributionType == 'numberConcentration':
pass
elif distType == 'numberConcentration':
pass
elif dist.distributionType in distTypes['log normal']:
if distType in distTypes['log normal']:
if verbose:
print('both log normal')
else:
dist.data = dist.data / self._normal2log()
elif dist.distributionType in distTypes['natural']:
if distType in distTypes['natural']:
if verbose:
print('both natural')
else:
dist.data = dist.data * self._normal2log()
else:
raise ValueError('%s is not an option' % distType)
if dist.distributionType == 'numberConcentration':
pass
elif distType == 'numberConcentration':
pass
elif dist.distributionType in distTypes['number']:
if distType in distTypes['number']:
if verbose:
print('both number')
else:
if distType in distTypes['surface']:
dist.data *= self._2Surface()
elif distType in distTypes['volume']:
dist.data *= self._2Volume()
else:
raise ValueError('%s is not an option' % distType)
elif dist.distributionType in distTypes['surface']:
if distType in distTypes['surface']:
if verbose:
print('both surface')
else:
if distType in distTypes['number']:
dist.data /= self._2Surface()
elif distType in distTypes['volume']:
dist.data *= self._2Volume() / self._2Surface()
else:
raise ValueError('%s is not an option' % distType)
elif dist.distributionType in distTypes['volume']:
if distType in distTypes['volume']:
if verbose:
print('both volume')
else:
if distType in distTypes['number']:
dist.data /= self._2Volume()
elif distType in distTypes['surface']:
dist.data *= self._2Surface() / self._2Volume()
else:
raise ValueError('%s is not an option' % distType)
else:
raise ValueError('%s is not an option' % distType)
if distType == 'numberConcentration':
dist = dist.convert2dNdDp()
dist.data *= self.binwidth
elif dist.distributionType == 'numberConcentration':
dist.data = dist.data / self.binwidth
dist.distributionType = 'dNdDp'
dist = dist._convert2otherDistribution(distType)
dist.distributionType = distType
if verbose:
print('converted from %s to %s' % (self.distributionType, dist.distributionType))
return dist
class SizeDist_TS(SizeDist):
"""Returns a SizeDistribution_TS instance.
Parameters:
-----------
data: pandas dataFrame with
- column names (each name is something like this: '150-200')
- index is time (at some point this should be arbitrary, convertable to altitude for example?)
unit conventions:
- diameters: nanometers
- flowrates: cc (otherwise, axis label need to be adjusted an caution needs to be taken when dealing is AOD)
distributionType:
log normal: 'dNdlogDp','dSdlogDp','dVdlogDp'
natural: 'dNdDp','dSdDp','dVdDp'
number: 'dNdlogDp', 'dNdDp', 'numberConcentration'
surface: 'dSdlogDp','dSdDp'
volume: 'dVdlogDp','dVdDp'
"""
def fit_normal(self, log=True, p0=[10, 180, 0.2]):
""" Fits a single normal distribution to each line in the data frame.
Returns
-------
pandas DataFrame instance (also added to namespace as data_fit_normal)
"""
super(SizeDist_TS, self).fit_normal(log=log, p0=p0)
self.data_fit_normal.index = self.data.index
return self.data_fit_normal
def _getXYZ(self):
"""
This will create three arrays, so when plotted with pcolor each pixel will represent the exact bin width
"""
binArray = np.repeat(np.array([self.bins]), self.data.index.shape[0], axis=0)
timeArray = np.repeat(np.array([self.data.index.values]), self.bins.shape[0], axis=0).transpose()
ext = np.array([np.zeros(self.data.index.values.shape)]).transpose()
Z = np.append(self.data.values, ext, axis=1)
return timeArray, binArray, Z
def get_timespan(self):
return self.data.index.min(), self.data.index.max()
# TODO: Fix plot options such as showMinorTickLabels
def plot(self,
vmax=None,
vmin=None,
norm='linear',
showMinorTickLabels=True,
# removeTickLabels=["700", "900"],
ax=None,
fit_pos=True,
cmap=plt_tools.get_colorMap_intensity(),
colorbar=True):
""" plots an intensity plot of all data
Arguments
---------
scale (optional): ('log',['linear']) - defines how the z-direction is scaled
vmax
vmin
show_minor_tickLabels:
cma:
fit_pos: bool[True]. Optional
plots the position of a fitted normal distribution onto the plot.
in order for this to work execute fit_normal
ax (optional): axes instance [None] - option to plot on existing axes
Returns
-------
f,a,pc,cb (figure, axis, pcolormeshInstance, colorbar)
"""
X, Y, Z = self._getXYZ()
Z = np.ma.masked_invalid(Z)
if type(ax).__name__ in axes_types:
a = ax
f = a.get_figure()
else:
f, a = plt.subplots()
f.autofmt_xdate()
if norm == 'log':
norm = LogNorm()
elif norm == 'linear':
norm = None
pc = a.pcolormesh(X, Y, Z, vmin=vmin, vmax=vmax, norm=norm, cmap=cmap)
a.set_yscale('log')
a.set_ylim((self.bins[0], self.bins[-1]))
a.set_xlabel('Time (UTC)')
a.get_yaxis().set_tick_params(direction='out', which='both')
a.get_xaxis().set_tick_params(direction='out', which='both')
if self.distributionType == 'calibration':
a.set_ylabel('Amplitude (digitizer bins)')
else:
a.set_ylabel('Diameter (nm)')
if colorbar:
cb = f.colorbar(pc)
label = get_label(self.distributionType)
cb.set_label(label)
else:
cb = get_label(self.distributionType)
# if self.distributionType != 'calibration':
# a.yaxis.set_major_formatter(plt.FormatStrFormatter("%i"))
# f.canvas.draw() # this is important, otherwise the ticks (at least in case of minor ticks) are not created yet
if showMinorTickLabels:
minf = plt_tools.get_formatter_minor_log()
a.yaxis.set_minor_formatter(minf)
# a.yaxis.set_minor_formatter(plt.FormatStrFormatter("%i"))
# ticks = a.yaxis.get_minor_ticks()
# for i in ticks:
# if i.label.get_text() in removeTickLabels:
# i.label.set_visible(False)
if fit_pos:
if 'data_fit_normal' in dir(self):
a.plot(self.data.index, self.data_fit_normal.Pos, color='m', linewidth=2, label='normal dist. center')
leg = a.legend(fancybox=True, framealpha=0.5)
leg.draw_frame(True)
return f, a, pc, cb
def plot_fitres(self):
""" Plots the results from fit_normal"""
f, a = plt.subplots()
data = self.data_fit_normal.dropna()
a.fill_between(data.index, data.Sigma_high, data.Sigma_low,
color=plt_tools.color_cycle[0],
alpha=0.5,
)
a.plot(data.index.values, data.Pos.values, color=plt_tools.color_cycle[0], linewidth=2, label='center')
# data.Pos.plot(ax=a, color=plt_tools.color_cycle[0], linewidth=2, label='center')
a.legend(loc=2)
a.set_ylabel('Particle diameter (nm)')
a.set_xlabel('Altitude (m)')
a2 = a.twinx()
# data.Amp.plot(ax=a2, color=plt_tools.color_cycle[1], linewidth=2, label='amplitude')
a2.plot(data.index.values, data.Amp.values, color=plt_tools.color_cycle[1], linewidth=2, label='amplitude')
a2.legend()
a2.set_ylabel('Amplitude - %s' % (get_label(self.distributionType)))
f.autofmt_xdate()
return f, a, a2
def plot_particle_concentration(self, ax=None, label=None):
"""Plots the particle rate as a function of time.
Parameters
----------
ax: matplotlib.axes instance, optional
perform plot on these axes.
Returns
-------
matplotlib.axes instance
"""
if type(ax).__name__ in axes_types:
color = plt_tools.color_cycle[len(ax.get_lines())]
f = ax.get_figure()
else:
f, ax = plt.subplots()
color = plt_tools.color_cycle[0]
# layers = self.convert2numberconcentration()
particles = self.get_particle_concentration().dropna()
ax.plot(particles.index.values, particles.Count_rate.values, color=color, linewidth=2)
if label:
ax.get_lines()[-1].set_label(label)
ax.legend()
ax.set_xlabel('Time (UTC)')
ax.set_ylabel('Particle number concentration (cm$^{-3})$')
if particles.index.dtype.type.__name__ == 'datetime64':
f.autofmt_xdate()
return ax
def zoom_time(self, start=None, end=None):
"""
2014-11-24 16:02:30
"""
dist = self.copy()
dist.data = dist.data.truncate(before=start, after=end)
return dist
def average_overTime(self, window='1S'):
"""returns a copy of the sizedistribution_TS with reduced size by averaging over a given window
Arguments
---------
window: str ['1S']. Optional
window over which to average. For aliases see
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
Returns
-------
SizeDistribution_TS instance
copy of current instance with resampled data frame
"""
dist = self.copy()
window = window
dist.data = dist.data.resample(window, closed='right', label='right')
if dist.distributionType == 'calibration':
dist.data.values[np.where(np.isnan(self.data.values))] = 0
return dist
def average_overAllTime(self):
"""
averages over the entire dataFrame and returns a single sizedistribution (numpy.ndarray)
"""
singleHist = np.zeros(self.data.shape[1])
for i in range(self.data.shape[1]):
line = self.data.values[:, i]
singleHist[i] = np.average(line[~np.isnan(line)])
data = pd.DataFrame(np.array([singleHist]), columns=self.data.columns)
avgDist = SizeDist(data, self.bins, self.distributionType)
return avgDist
def convert2layerseries(self, hk, layer_thickness=10, force=False):
"""convertes the time series to a layer series.
Note
----
nan values are excluded when an average is taken over a the time that corresponds to the particular layer
(altitude). If there are only nan values nan is returned and there is a gap in the Layerseries.
The the housekeeping instance has to have a column called "Altitude" and which is monotonicly in- or decreasing
Arguments
---------
hk: housekeeping instance
layer_thickness (optional): [10] thickness of each generated layer in meter"""
if any(np.isnan(hk.data.Altitude)):
txt = """The Altitude contains nan values. Either fix this first, eg. with pandas interpolate function"""
raise ValueError(txt)
if ((hk.data.Altitude.values[1:] - hk.data.Altitude.values[:-1]).min() < 0) and (
(hk.data.Altitude.values[1:] - hk.data.Altitude.values[:-1]).max() > 0):
if force:
hk.data = hk.data.sort(columns='Altitude')
else:
txt = '''Given altitude data is not monotonic. This is not possible (yet). Use force if you
know what you are doing'''
raise ValueError(txt)
start_h = round(hk.data.Altitude.values.min() / layer_thickness) * layer_thickness
end_h = round(hk.data.Altitude.values.max() / layer_thickness) * layer_thickness
layer_edges = np.arange(start_h, end_h, layer_thickness)
empty_frame = pd.DataFrame(columns=self.data.columns)
lays = SizeDist_LS(empty_frame, self.bins, self.distributionType, None)
for e, end_h_l in enumerate(layer_edges[1:]):
start_h_l = layer_edges[e]
layer = hk.data.Altitude.iloc[
np.where(np.logical_and(start_h_l < hk.data.Altitude.values, hk.data.Altitude.values < end_h_l))]
start_t = layer.index.min()
end_t = layer.index.max()
dist_tmp = self.zoom_time(start=start_t, end=end_t)
avrg = dist_tmp.average_overAllTime()
# return avrg,lays
lays.add_layer(avrg, (start_h_l, end_h_l))
lays.parent_dist_TS = self
lays.parent_timeseries = hk
data = hk.data.copy()
data['Time_UTC'] = data.index
data.index = data.Altitude
data = data.sort_index()
if not data.index.is_unique: #this is needed in case there are duplicate indeces
grouped = data.groupby(level = 0)
data = grouped.last()
lays.housekeeping = data
data = data.reindex(lays.layercenters,method = 'nearest')
lays.housekeeping = vertical_profile.VerticalProfile(data)
return lays
class SizeDist_LS(SizeDist):
"""
Parameters
----------
data: pandas DataFrame ...
bins: array
distributionType: str
layerbounderies: array shape(n_layers,2)
OLD
---
data: pandas dataFrame with
- column names (each name is something like this: '150-200')
- altitude (at some point this should be arbitrary, convertable to altitude for example?)
unit conventions:
- diameters: nanometers
- flowrates: cc (otherwise, axis label need to be adjusted an caution needs to be taken when dealing is AOD)
distributionType:
log normal: 'dNdlogDp','dSdlogDp','dVdlogDp'
natural: 'dNdDp','dSdDp','dVdDp'
number: 'dNdlogDp', 'dNdDp', 'numberConcentration'
surface: 'dSdlogDp','dSdDp'
volume: 'dVdlogDp','dVdDp'
"""
def __init__(self, data, bins, distributionType, layerbounderies, fixGaps=True):
super(SizeDist_LS, self).__init__(data, bins, distributionType, fixGaps=True)
if type(layerbounderies).__name__ == 'NoneType':
self.layerbounderies = np.empty((0, 2))
# self.layercenters = np.array([])
else:
self.layerbounderies = layerbounderies
@property
def layercenters(self):
return self.__layercenters
@property
def layerbounderies(self):
return self.__layerbouderies
@layerbounderies.setter
def layerbounderies(self,lb):
self.__layerbouderies = lb
# newlb = np.unique(self.layerbounderies.flatten()) # the unique is sorting the data, which is not reallyt what we want!
# self.__layercenters = (newlb[1:] + newlb[:-1]) / 2.
self.__layercenters = (self.layerbounderies[:,0] + self.layerbounderies[:,1]) / 2.
self.data.index = self.layercenters
def apply_hygro_growth(self, kappa, RH = None, how='shift_data'):
""" see docstring of atmPy.sizedistribution.SizeDist for more information
Parameters
----------
kappa: float
RH: bool, float, or array.
If None, RH from self.housekeeping will be taken"""
if not np.any(RH):
pandas_tools.ensure_column_exists(self.housekeeping.data, 'Relative_humidity')
RH = self.housekeeping.data.Relative_humidity.values
# return kappa,RH,how
sd = super(SizeDist_LS,self).apply_hygro_growth(kappa,RH,how = how)
# size_distr = out['size_distribution']
# gf = out['growth_factor']
sd_LS = SizeDist_LS(sd.data, sd.bins, sd.distributionType, self.layerbounderies, fixGaps=False)
sd_LS.index_of_refraction = sd.index_of_refraction
sd_LS._SizeDist__growth_factor = sd.growth_factor
# out['size_distribution'] = sd_LS
return sd_LS
def calculate_angstromex(self, wavelengths=[460.3, 550.4, 671.2, 860.7], n=1.455):
"""Calculates the Anstrome coefficience (overall, layerdependent)
Parameters
----------
wavelengths: array-like, optional.
the angstrom coefficient will be calculated based on the AOD of these wavelength values (in nm)
n: float, optional.
index of refraction used in the underlying mie calculation.
Returns
-------
Angstrom exponent, float
List containing the OpticalProperties instances for the different wavelengths
New Attributes
--------------
angstromexp: float
the resulting angstrom exponent
angstromexp_fit: pandas instance.
AOD and fit result as a function of wavelength
angstromexp_LS: pandas instance.
angstrom exponent as a function of altitude
"""
AOD_list = []
AOD_dict = {}
for w in wavelengths:
AOD = self.calculate_optical_properties(w, n) # calculate_AOD(wavelength=w, n=n)
# opt= sizedistribution.OpticalProperties(AOD, dist_LS.bins)
AOD_list.append({'wavelength': w, 'opt_inst': AOD})
AOD_dict['%.1f' % w] = AOD
eg = AOD_dict[list(AOD_dict.keys())[0]]
wls = AOD_dict.keys()
wls_a = np.array(list(AOD_dict.keys())).astype(float)
ang_exp = []
ang_exp_std = []
ang_exp_r_value = []
for e, el in enumerate(eg.layercenters):
AODs = np.array([AOD_dict[wl].data_orig['AOD_layer'].values[e][0] for wl in wls])
slope, intercept, r_value, p_value, std_err = stats.linregress(np.log10(wls_a), np.log10(AODs))
ang_exp.append(-slope)
ang_exp_std.append(std_err)
ang_exp_r_value.append(r_value)
# break
ang_exp = np.array(ang_exp)
ang_exp_std = np.array(ang_exp_std)
ang_exp_r_value = np.array(ang_exp_r_value)
tmp = np.array([[float(i), AOD_dict[i].AOD] for i in AOD_dict.keys()])
wavelength, AOD = tmp[np.argsort(tmp[:, 0])].transpose()
slope, intercept, r_value, p_value, std_err = stats.linregress(np.log10(wavelength), np.log10(AOD))
self.angstromexp = -slope
aod_fit = np.log10(wavelengths) * slope + intercept
self.angstromexp_fit = pd.DataFrame(np.array([AOD, 10 ** aod_fit]).transpose(), index=wavelength,
columns=['data', 'fit'])
self.angstromexp_LS = pd.DataFrame(np.array([ang_exp, ang_exp_std, ang_exp_r_value]).transpose(),
index=self.layercenters,
columns=['ang_exp', 'standard_dif', 'correlation_coef'])
self.angstromexp_LS.index.name = 'layercenter'
return -slope, AOD_dict
def calculate_optical_properties(self, wavelength, n = None, noOfAngles=100):
if not n:
n = self.index_of_refraction
if not n:
txt = 'Refractive index is not specified. Either set self.index_of_refraction or set optional parameter n.'
raise ValueError(txt)
out = _calculate_optical_properties(self, wavelength, n, aod = True, noOfAngles=noOfAngles)
opt_properties = OpticalProperties(out, self.bins)
opt_properties.wavelength = wavelength
opt_properties.index_of_refractio = n
opt_properties.angular_scatt_func = out['angular_scatt_func'] # This is the formaer phase_fct, but since it is the angular scattering intensity, i changed the name
opt_properties.parent_dist_LS = self
return opt_properties
def add_layer(self, sd, layerboundery):
"""
Adds a sizedistribution instance to the layerseries.
layerboundery
Parameters
----------
sd:
layerboundary:
"""
if len(layerboundery) != 2:
raise ValueError('layerboundery has to be of length 2')
sd = sd._convert2otherDistribution(self.distributionType)
layerbounderies = np.append(self.layerbounderies, np.array([layerboundery]), axis=0)
layerbounderiesU = np.unique(layerbounderies)
if (np.where(layerbounderiesU == layerboundery[1])[0] - np.where(layerbounderiesU == layerboundery[0])[0])[
0] != 1:
raise ValueError('The new layer is overlapping with an existing layer!')
self.data = self.data.append(sd.data)
self.layerbounderies = layerbounderies
# self.layerbounderies.sort(axis=0)
#
# layercenter = np.array(layerboundery).sum() / 2.
# self.layercenters = np.append(self.layercenters, layercenter)
# self.layercenters.sort()
# size_distr.data.index = np.array([layercenter])
# self.data = self.data.append(size_distr.data)
return
def _getXYZ(self):
"""
This will create three arrays, so when plotted with pcolor each pixel will represent the exact bin width
"""
binArray = np.repeat(np.array([self.bins]), self.data.index.shape[0], axis=0)
layerArray = np.repeat(np.array([self.data.index.values]), self.bins.shape[0], axis=0).transpose()
ext = np.array([np.zeros(self.data.index.values.shape)]).transpose()
Z = np.append(self.data.values, ext, axis=1)
return layerArray, binArray, Z
def plot_eachLayer(self, a=None, normalize=False):
"""
Plots the distribution of each layer in one plot.
Returns
-------
Handles to the figure and axes of the plot
"""
if not a:
f, a = plt.subplots()
else:
f = None
pass
for iv in self.data.index.values:
if normalize:
a.plot(self.bincenters, self.data.loc[iv, :] / self.data.loc[iv, :].max(), label='%i' % iv)
else:
a.plot(self.bincenters, self.data.loc[iv, :], label='%i' % iv)
a.set_xlabel('Particle diameter (nm)')
a.set_ylabel(get_label(self.distributionType))
a.legend()
a.semilogx()
return f, a
def plot(self, vmax=None, vmin=None, scale='linear', show_minor_tickLabels=True,
removeTickLabels=["500", "700", "800", "900"],
plotOnTheseAxes=False,
cmap=plt_tools.get_colorMap_intensity(),
fit_pos=True,
ax=None,
colorbar = True):
""" plots and returns f,a,pc,cb (figure, axis, pcolormeshInstance, colorbar)
Arguments
---------
scale (optional): ('log',['linear']) - defines how the z-direction is scaled
vmax
vmin
show_minor_tickLabels:
cma:
fit_pos (optional): bool [True] - plots the position of a fitted normal distribution onto the plot.
in order for this to work execute fit_normal
ax (optional): axes instance [None] - option to plot on existing axes
"""
X, Y, Z = self._getXYZ()
Z = np.ma.masked_invalid(Z)
if type(ax).__name__ in axes_types:
a = ax
f = a.get_figure()
else:
f, a = plt.subplots()
# f.autofmt_xdate()
if scale == 'log':
scale = LogNorm()
elif scale == 'linear':
scale = None
pc = a.pcolormesh(Y, X, Z, vmin=vmin, vmax=vmax, norm=scale, cmap=cmap)
a.set_yscale('linear')
a.set_xscale('log')
a.set_xlim((self.bins[0], self.bins[-1]))
a.set_ylabel('Altitude (m)')
a.set_ylim((self.layercenters[0], self.layercenters[-1]))
a.set_xlabel('Diameter (nm)')
a.get_yaxis().set_tick_params(direction='out', which='both')
a.get_xaxis().set_tick_params(direction='out', which='both')
if colorbar:
cb = f.colorbar(pc)
label = get_label(self.distributionType)
cb.set_label(label)
else:
cb = None
if self.distributionType != 'calibration':
a.xaxis.set_minor_formatter(plt.FormatStrFormatter("%i"))
a.xaxis.set_major_formatter(plt.FormatStrFormatter("%i"))
f.canvas.draw() # this is important, otherwise the ticks (at least in case of minor ticks) are not created yet
ticks = a.xaxis.get_minor_ticks()
for i in ticks:
if i.label.get_text() in removeTickLabels:
i.label.set_visible(False)
if fit_pos:
if 'data_fit_normal' in dir(self):
a.plot(self.data_fit_normal.Pos, self.layercenters, color='m', linewidth=2, label='normal dist. center')
leg = a.legend(fancybox=True, framealpha=0.5)
leg.draw_frame(True)
return f, a, pc, cb
#todo: when you want to plot one plot on existing one it will rotated it twice!
def plot_particle_concentration(self, ax=None, label=None):
"""Plots the particle concentration as a function of altitude.
Parameters
----------
ax: matplotlib.axes instance, optional
perform plot on these axes.
rotate: bool.
When True the y-axes is the Altitude.
Returns
-------
matplotlib.axes instance
"""
# ax = SizeDist_TS.plot_particle_concetration(self, ax=ax, label=label)
# ax.set_xlabel('Altitude (m)')
#
# if rotate:
# g = ax.get_lines()[-1]
# x, y = g.get_xydata().transpose()
# xlim = ax.get_xlim()
# ylim = ax.get_ylim()
# ax.set_xlim(ylim)
# ax.set_ylim(xlim)
# g.set_xdata(y)
# g.set_ydata(x)
# xlabel = ax.get_xlabel()
# ylabel = ax.get_ylabel()
# ax.set_xlabel(ylabel)
# ax.set_ylabel(xlabel)
if type(ax).__name__ in axes_types:
color = plt_tools.color_cycle[len(ax.get_lines())]
f = ax.get_figure()
else:
f, ax = plt.subplots()
color = plt_tools.color_cycle[0]
# layers = self.convert2numberconcentration()
particles = self.get_particle_concentration().dropna()
ax.plot(particles.Count_rate.values, particles.index.values, color=color, linewidth=2)
if label:
ax.get_lines()[-1].set_label(label)
ax.legend()
ax.set_ylabel('Altitude (m)')
ax.set_xlabel('Particle number concentration (cm$^{-3})$')
return ax
def plot_fitres(self, amp=True, rotate=True):
""" Plots the results from fit_normal
Arguments
---------
amp: bool.
if the amplitude is to be plotted
"""
f, a = plt.subplots()
a.fill_between(self.layercenters, self.data_fit_normal.Sigma_high, self.data_fit_normal.Sigma_low,
color=plt_tools.color_cycle[0],
alpha=0.5,
)
self.data_fit_normal.Pos.plot(ax=a, color=plt_tools.color_cycle[0], linewidth=2)
g = a.get_lines()[-1]
g.set_label('Center of norm. dist.')
a.legend(loc=2)
a.set_ylabel('Particle diameter (nm)')
a.set_xlabel('Altitude (m)')
if amp:
a2 = a.twinx()
self.data_fit_normal.Amp.plot(ax=a2, color=plt_tools.color_cycle[1], linewidth=2)
g = a2.get_lines()[-1]
g.set_label('Amplitude of norm. dist.')
a2.legend()
a2.set_ylabel('Amplitude - %s' % (get_label(self.distributionType)))
else:
a2 = False
return f, a, a2
def plot_angstromex_fit(self):
if 'angstromexp_fit' not in dir(self):
raise ValueError('Execute function calculate_angstromex first!')
f, a = plt.subplots()
a.plot(self.angstromexp_fit.index, self.angstromexp_fit.data, 'o', color=plt_tools.color_cycle[0],
label='exp. data')
a.plot(self.angstromexp_fit.index, self.angstromexp_fit.fit, color=plt_tools.color_cycle[1], label='fit',
linewidth=2)
a.set_xlim((self.angstromexp_fit.index.min() * 0.95, self.angstromexp_fit.index.max() * 1.05))
a.set_ylim((self.angstromexp_fit.data.min() * 0.95, self.angstromexp_fit.data.max() * 1.05))
a.set_xlabel('Wavelength (nm)')
a.set_ylabel('AOD')
a.loglog()
a.xaxis.set_minor_formatter(plt.FormatStrFormatter("%i"))
a.yaxis.set_minor_formatter(plt.FormatStrFormatter("%.2f"))
return a
def plot_angstromex_LS(self, corr_coeff=False, std=False):
if 'angstromexp_fit' not in dir(self):
raise ValueError('Execute function calculate_angstromex first!')
f, a = plt.subplots()
a.plot(self.angstromexp_LS.index, self.angstromexp_LS.ang_exp, color=plt_tools.color_cycle[0], linewidth=2,
label='Angstrom exponent')
a.set_xlabel('Altitude (m)')
a.set_ylabel('Angstrom exponent')
if corr_coeff:
a.legend(loc=2)
a2 = a.twinx()
a2.plot(self.angstromexp_LS.index, self.angstromexp_LS.correlation_coef, color=plt_tools.color_cycle[1],
linewidth=2, label='corr_coeff')
a2.set_ylabel('Correlation coefficiant')
a2.legend(loc=1)
if std:
a.legend(loc=2)
a2 = a.twinx()
a2.plot(self.angstromexp_LS.index, self.angstromexp_LS.standard_dif, color=plt_tools.color_cycle[1],
linewidth=2, label='corr_coeff')
a2.set_ylabel('Standard deviation')
a2.legend(loc=1)
tmp = (self.angstromexp_LS.index.max() - self.angstromexp_LS.index.min()) * 0.05
a.set_xlim((self.angstromexp_LS.index.min() - tmp, self.angstromexp_LS.index.max() + tmp))
return a
def zoom_altitude(self, bottom, top):
"""'2014-11-24 16:02:30'"""
dist = self.copy()
dist.data = dist.data.truncate(before=bottom, after=top)
where = np.where(np.logical_and(dist.layercenters < top, dist.layercenters > bottom))
# dist.layercenters = dist.layercenters[where]
dist.layerbounderies = dist.layerbounderies[where]
if 'data_fit_normal' in dir(dist):
dist.data_fit_normal = dist.data_fit_normal.iloc[where]
return dist
# dist = self.copy()
# dist.data = dist.data.truncate(before=start, after = end)
# return dist
#
def average_overAltitude(self, window='1S'):
print('need fixn')
return False
# window = window
# self.data = self.data.resample(window, closed='right',label='right')
# if self.distributionType == 'calibration':
# self.data.values[np.where(np.isnan(self.data.values))] = 0
# return
def average_overAllAltitudes(self):
dataII = self.data.mean(axis=0)
out = pd.DataFrame(dataII).T
return SizeDist(out, self.bins, self.distributionType)
def fit_normal(self):
""" Fits a single normal distribution to each line in the data frame.
Returns
-------
pandas DataFrame instance (also added to namespace as data_fit_normal)
"""
super(SizeDist_LS, self).fit_normal()
self.data_fit_normal.index = self.layercenters
return self.data_fit_normal
# singleHist = np.zeros(self.data.shape[1])
# for i in xrange(self.data.shape[1]):
# line = self.data.values[:,i]
# singleHist[i] = np.average(line[~np.isnan(line)])
# return singleHist
#Todo: bins are redundand
# Todo: some functions should be switched of
class OpticalProperties(object):
def __init__(self, data, bins):
# self.data = data['extCoeffPerLayer']
self.data = data['extCoeff_perrow_perbin']
self.data_orig = data
self.AOD = data['AOD']
self.bins = bins
self.layercenters = self.data.index.values
self.asymmetry_parameter_LS = data['asymmetry_param']
# self.asymmetry_parameter_LS_alt = data['asymmetry_param_alt']
# ToDo: to define a distribution type does not really make sence ... just to make the stolen plot function happy
self.distributionType = 'dNdlogDp'
def get_extinction_coeff_verticle_profile(self):
"""
Creates a verticle profile of the extinction coefficient.
"""
ext = self.data.sum(axis=1)
ext = pd.DataFrame(ext, columns=['ext. coeff.'])
ext.index.name = 'Altitude'
out = ExtinctionCoeffVerticlProfile(ext, self, self.wavelength, self.index_of_refractio)
# out.wavelength = self.wavelength
# out.n = self.index_of_refractio
# out.parent = self
return out
def plot_AOD_cum(self, color=plt_tools.color_cycle[0], linewidth=2, ax=None, label='cumulative AOD',
extra_info=True):
if not ax:
f,a = plt.subplots()
else:
a = ax
# a = self.data_orig['AOD_cum'].plot(color=color, linewidth=linewidth, ax=ax, label=label)
g, = a.plot(self.data_orig['AOD_cum']['AOD per Layer'], self.data_orig['AOD_cum'].index, color=color, linewidth=linewidth, label=label)
# g = a.get_lines()[-1]
g.set_label(label)
a.legend()
# a.set_xlim(0, 3000)
a.set_ylabel('Altitude (m)')
a.set_xlabel('AOD')
txt = '''$\lambda = %s$ nm
n = %s
AOD = %.4f''' % (self.data_orig['wavelength'], self.data_orig['n'], self.data_orig['AOD'])
if extra_info:
a.text(0.7, 0.7, txt, transform=a.transAxes)
return a
def _getXYZ(self):
out = SizeDist_LS._getXYZ(self)
return out
def plot_extCoeffPerLayer(self,
vmax=None,
vmin=None,
scale='linear',
show_minor_tickLabels=True,
removeTickLabels=['500', '700', '800', '900'],
plotOnTheseAxes=False, cmap=plt_tools.get_colorMap_intensity(),
fit_pos=True,
ax=None):
f, a, pc, cb = SizeDist_LS.plot(self,
vmax=vmax,
vmin=vmin,
scale=scale,
show_minor_tickLabels=show_minor_tickLabels,
removeTickLabels=removeTickLabels,
plotOnTheseAxes=plotOnTheseAxes,
cmap=cmap,
fit_pos=fit_pos,
ax=ax)
cb.set_label('Extinction coefficient ($m^{-1}$)')
return f, a, pc, cb
class ExtinctionCoeffVerticlProfile(vertical_profile.VerticalProfile):
def __init__(self, ext, parent, wavelength, index_of_refraction):
super(ExtinctionCoeffVerticlProfile, self).__init__(ext)
self.parent = parent
self.wavelength = wavelength
self.index_of_refraction = index_of_refraction
def plot(self, *args, **kwargs):
a = super(ExtinctionCoeffVerticlProfile, self).plot(*args, **kwargs)
a.set_xlabel('Extinction coefficient (m$^{-1}$)')
return a
def simulate_sizedistribution(diameter=[10, 2500], numberOfDiameters=100, centerOfAerosolMode=200,
widthOfAerosolMode=0.2, numberOfParticsInMode=1000):
"""generates a numberconcentration of an aerosol layer which has a gaussian shape when plottet in dN/log(Dp).
However, returned is a numberconcentrations (simply the number of particles in each bin, no normalization)
Returns
Number concentration (#)
bin edges (nm)"""
start = diameter[0]
end = diameter[1]
noOfD = numberOfDiameters
centerDiameter = centerOfAerosolMode
width = widthOfAerosolMode
bins = np.linspace(np.log10(start), np.log10(end), noOfD)
binwidth = bins[1:] - bins[:-1]
bincenters = (bins[1:] + bins[:-1]) / 2.
dNDlogDp = plt.mlab.normpdf(bincenters, np.log10(centerDiameter), width)
extraScale = 1
scale = 1
while 1:
NumberConcent = dNDlogDp * binwidth * scale * extraScale
if scale != 1:
break
else:
scale = float(numberOfParticsInMode) / NumberConcent.sum()
binEdges = 10 ** bins
diameterBinwidth = binEdges[1:] - binEdges[:-1]
cols = []
for e, i in enumerate(binEdges[:-1]):
cols.append(str(i) + '-' + str(binEdges[e + 1]))
data = pd.DataFrame(np.array([NumberConcent / diameterBinwidth]), columns=cols)
return SizeDist(data, binEdges, 'dNdDp')
def simulate_sizedistribution_timeseries(diameter=[10, 2500], numberOfDiameters=100, centerOfAerosolMode=200,
widthOfAerosolMode=0.2, numberOfParticsInMode=1000,
startDate='2014-11-24 17:00:00',
endDate='2014-11-24 18:00:00', frequency=10):
delta = datetime.datetime.strptime(endDate, '%Y-%m-%d %H:%M:%S') - datetime.datetime.strptime(startDate,
'%Y-%m-%d %H:%M:%S')
periods = delta.total_seconds() / float(frequency)
rng = pd.date_range(startDate, periods=periods, freq='%ss' % frequency)
noOfOsz = 5
ampOfOsz = 100
oszi = np.linspace(0, noOfOsz * 2 * np.pi, periods)
sdArray = np.zeros((periods, numberOfDiameters - 1))
for e, i in enumerate(rng):
sdtmp = simulate_sizedistribution(diameter=diameter,
numberOfDiameters=numberOfDiameters,
centerOfAerosolMode=centerOfAerosolMode + (ampOfOsz * np.sin(oszi[e])))
sdArray[e] = sdtmp.data
sdts = pd.DataFrame(sdArray, index=rng, columns=sdtmp.data.columns)
return SizeDist_TS(sdts, sdtmp.bins, sdtmp.distributionType)
def simulate_sizedistribution_layerseries(diameter=[10, 2500], numberOfDiameters=100, heightlimits=[0, 6000],
noOflayers=100, layerHeight=[500., 4000.], layerThickness=[100., 300.],
layerDensity=[1000., 5000.], layerModecenter=[200., 800.], widthOfAerosolMode = 0.2 ):
gaussian = lambda x, mu, sig: np.exp(-(x - mu) ** 2 / (2 * sig ** 2))
lbt = np.linspace(heightlimits[0], heightlimits[1], noOflayers + 1)
layerbounderies = np.array([lbt[:-1], lbt[1:]]).transpose()
layercenter = (lbt[1:] + lbt[:-1]) / 2.
# strata = np.linspace(heightlimits[0],heightlimits[1],noOflayers+1)
layerArray = np.zeros((noOflayers, numberOfDiameters - 1))
for e, stra in enumerate(layercenter):
for i, lay in enumerate(layerHeight):
sdtmp = simulate_sizedistribution(diameter=diameter, numberOfDiameters=numberOfDiameters,
widthOfAerosolMode=widthOfAerosolMode, centerOfAerosolMode=layerModecenter[i],
numberOfParticsInMode=layerDensity[i])
layerArray[e] += sdtmp.data.values[0] * gaussian(stra, layerHeight[i], layerThickness[i])
sdls = pd.DataFrame(layerArray, index=layercenter, columns=sdtmp.data.columns)
return SizeDist_LS(sdls, sdtmp.bins, sdtmp.distributionType, layerbounderies)
def generate_aerosolLayer(diameter=[.01, 2.5], numberOfDiameters=30, centerOfAerosolMode=0.6,
widthOfAerosolMode=0.2, numberOfParticsInMode=10000, layerBoundery=[0., 10000], ):
"""Probably deprecated!?! generates a numberconcentration of an aerosol layer which has a gaussian shape when plottet in dN/log(Dp).
However, returned is a numberconcentrations (simply the number of particles in each bin, no normalization)
Returns
Number concentration (#)
bin edges (nm)"""
layerBoundery = np.array(layerBoundery)
start = diameter[0]
end = diameter[1]
noOfD = numberOfDiameters
centerDiameter = centerOfAerosolMode
width = widthOfAerosolMode
bins = np.linspace(np.log10(start), np.log10(end), noOfD)
binwidth = bins[1:] - bins[:-1]
bincenters = (bins[1:] + bins[:-1]) / 2.
dNDlogDp = plt.mlab.normpdf(bincenters, np.log10(centerDiameter), width)
extraScale = 1
scale = 1
while 1:
NumberConcent = dNDlogDp * binwidth * scale * extraScale
if scale != 1:
break
else:
scale = float(numberOfParticsInMode) / NumberConcent.sum()
binEdges = 10 ** bins
# diameterBinCenters = (binEdges[1:] + binEdges[:-1])/2.
diameterBinwidth = binEdges[1:] - binEdges[:-1]
cols = []
for e, i in enumerate(binEdges[:-1]):
cols.append(str(i) + '-' + str(binEdges[e + 1]))
layerBoundery = np.array([0., 10000.])
# layerThickness = layerBoundery[1:] - layerBoundery[:-1]
layerCenter = [5000.]
data = pd.DataFrame(np.array([NumberConcent / diameterBinwidth]), index=layerCenter, columns=cols)
# return data
# atmosAerosolNumberConcentration = pd.DataFrame()
# atmosAerosolNumberConcentration['bin_center'] = pd.Series(diameterBinCenters)
# atmosAerosolNumberConcentration['bin_start'] = pd.Series(binEdges[:-1])
# atmosAerosolNumberConcentration['bin_end'] = pd.Series(binEdges[1:])
# atmosAerosolNumberConcentration['numberConcentration'] = pd.Series(NumberConcent)
# return atmosAerosolNumberConcentration
return SizeDist_LS(data, binEdges, 'dNdDp', layerBoundery)
def test_generate_numberConcentration():
"""result should look identical to Atmospheric Chemistry and Physis page 422"""
nc = generate_aerosolLayer(diameter=[0.01, 10], centerOfAerosolMode=0.8, widthOfAerosolMode=0.3,
numberOfDiameters=100, numberOfParticsInMode=1000, layerBoundery=[0.0, 10000])
plt.plot(nc.bincenters, nc.data.values[0].transpose() * nc.binwidth, label='numberConc')
plt.plot(nc.bincenters, nc.data.values[0].transpose(), label='numberDist')
ncLN = nc.convert2dNdlogDp()
plt.plot(ncLN.bincenters, ncLN.data.values[0].transpose(), label='LogNormal')
plt.legend()
plt.semilogx()
def _perform_Miecalculations(diam, wavelength, n, noOfAngles=100.):
"""
Performs Mie calculations
Parameters
----------
diam: NumPy array of floats
Array of diameters over which to perform Mie calculations; units are um
wavelength: float
Wavelength of light in um for which to perform calculations
n: complex
Ensemble complex index of refraction
Returns
panda DataTable with the diameters as the index and the mie results in the different collumns
total_extinction_coefficient: this takes the sum of all particles crossections of the particular diameter in a qubic
meter. This is in principle the AOD of an L
"""
diam = np.asarray(diam)
extinction_efficiency = np.zeros(diam.shape)
scattering_efficiency = np.zeros(diam.shape)
absorption_efficiency = np.zeros(diam.shape)
extinction_crossection = np.zeros(diam.shape)
scattering_crossection = np.zeros(diam.shape)
absorption_crossection = np.zeros(diam.shape)
# phase_function_natural = pd.DataFrame()
angular_scattering_natural = pd.DataFrame()
# extinction_coefficient = np.zeros(diam.shape)
# scattering_coefficient = np.zeros(diam.shape)
# absorption_coefficient = np.zeros(diam.shape)
# Function for calculating the size parameter for wavelength l and radius r
sp = lambda r, l: 2. * np.pi * r / l
for e, d in enumerate(diam):
radius = d / 2.
# print('sp(radius, wavelength)', sp(radius, wavelength))
# print('n', n)
# print('d', d)
mie = bhmie.bhmie_hagen(sp(radius, wavelength), n, noOfAngles, diameter=d)
values = mie.return_Values_as_dict()
extinction_efficiency[e] = values['extinction_efficiency']
# print("values['extinction_crosssection']",values['extinction_crosssection'])
scattering_efficiency[e] = values['scattering_efficiency']
absorption_efficiency[e] = values['extinction_efficiency'] - values['scattering_efficiency']
extinction_crossection[e] = values['extinction_crosssection']
scattering_crossection[e] = values['scattering_crosssection']
absorption_crossection[e] = values['extinction_crosssection'] - values['scattering_crosssection']
# phase_function_natural[d] = values['phaseFct_natural']['Phase_function_natural'].values
angular_scattering_natural[d] = mie.get_angular_scatt_func().natural.values
# print('\n')
# phase_function_natural.index = values['phaseFct_natural'].index
angular_scattering_natural.index = mie.get_angular_scatt_func().index
out = pd.DataFrame(index=diam)
out['extinction_efficiency'] = pd.Series(extinction_efficiency, index=diam)
out['scattering_efficiency'] = pd.Series(scattering_efficiency, index=diam)
out['absorption_efficiency'] = pd.Series(absorption_efficiency, index=diam)
out['extinction_crossection'] = pd.Series(extinction_crossection, index=diam)
out['scattering_crossection'] = pd.Series(scattering_crossection, index=diam)
out['absorption_crossection'] = pd.Series(absorption_crossection, index=diam)
return out, angular_scattering_natural
def _get_coefficients(crossection, cn):
"""
Calculates the extinction, scattering or absorbtion coefficient
Parameters
----------
crosssection: float
Units are um^2
cn: float
Particle concentration in cc^-1
Returns
--------
coefficient in m^-1. This is the differential AOD.
"""
crossection = crossection.copy()
cn = cn.copy()
crossection *= 1e-12 # conversion from um^2 to m^2
cn *= 1e6 # conversion from cm^-3 to m^-3
coefficient = cn * crossection
# print('cn',cn)
# print('crossection', crossection)
# print('coeff',coefficient)
# print('\n')
return coefficient
def test_ext_coeff_vertical_profile():
#todo: make this a real test
dist = simulate_sizedistribution_layerseries(layerHeight=[3000.0, 3000.0],
layerDensity=[1000.0, 100.0],
layerModecenter=[100.0, 100.0],
layerThickness=[6000, 6000],
widthOfAerosolMode = 0.01,
noOflayers=3,
numberOfDiameters=1000)
dist.plot()
dist = dist.zoom_diameter(99,101)
avg = dist.average_overAllAltitudes()
f,a = avg.plot()
a.set_xscale('linear')
opt = dist.calculate_optical_properties(550, n = 1.455)
opt_II = dist.calculate_optical_properties(550, n = 1.1)
opt_III = dist.calculate_optical_properties(550, n = 4.)
ext = opt.get_extinction_coeff_verticle_profile()
ext_II = opt_II.get_extinction_coeff_verticle_profile()
ext_III = opt_III.get_extinction_coeff_verticle_profile()
tvI_is = (ext_III.data/ext.data).values[0][0]
tvI_want = 14.3980239083
tvII_is = (ext_II.data/ext.data).values[0][0]
tvII_want = 0.05272993413
print('small deviations could come from averaging over multiple bins with slightly different diameter')
print('test values 1 is/should_be: %s/%s'%(tvI_is,tvI_want))
print('test values 2 is/should_be: %s/%s'%(tvII_is,tvII_want))
return False | mit |
EntilZha/PyFunctional | setup.py | 1 | 1887 | import sys
from setuptools import setup, find_packages
try:
import pypandoc
long_description = pypandoc.convert(
"README.md", "rst", extra_args=["--columns=300"]
)
except (IOError, ImportError):
long_description = open("README.md").read()
common_install_requires = ["dill>=0.2.5", "tabulate<=1.0.0"]
if "__pypy__" in sys.builtin_module_names:
compression_requires = ["bz2file==0.98", "backports.lzma==0.0.6"]
install_requires = common_install_requires
else:
compression_requires = []
install_requires = common_install_requires
setup(
name="PyFunctional",
description="Package for creating data pipelines with chain functional programming",
long_description=long_description,
url="https://github.com/EntilZha/PyFunctional",
author="Pedro Rodriguez",
author_email="[email protected]",
maintainer="Pedro Rodriguez",
maintainer_email="[email protected]",
license="MIT",
keywords="functional pipeline data collection chain rdd linq parallel",
packages=find_packages(exclude=["contrib", "docs", "tests*", "test"]),
version="1.4.3",
install_requires=install_requires,
extras_requires={
"all": ["pandas"] + compression_requires,
"compression": compression_requires,
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| mit |
jch1/models | cognitive_mapping_and_planning/tfcode/cmp_utils.py | 14 | 6936 | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for setting up the CMP graph.
"""
import os, numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib.slim import arg_scope
import logging
from src import utils
import src.file_utils as fu
from tfcode import tf_utils
resnet_v2 = tf_utils.resnet_v2
custom_residual_block = tf_utils.custom_residual_block
def value_iteration_network(
fr, num_iters, val_neurons, action_neurons, kernel_size, share_wts=False,
name='vin', wt_decay=0.0001, activation_fn=None, shape_aware=False):
"""
Constructs a Value Iteration Network, convolutions and max pooling across
channels.
Input:
fr: NxWxHxC
val_neurons: Number of channels for maintaining the value.
action_neurons: Computes action_neurons * val_neurons at each iteration to
max pool over.
Output:
value image: NxHxWx(val_neurons)
"""
init_var = np.sqrt(2.0/(kernel_size**2)/(val_neurons*action_neurons))
vals = []
with tf.variable_scope(name) as varscope:
if shape_aware == False:
fr_shape = tf.unstack(tf.shape(fr))
val_shape = tf.stack(fr_shape[:-1] + [val_neurons])
val = tf.zeros(val_shape, name='val_init')
else:
val = tf.expand_dims(tf.zeros_like(fr[:,:,:,0]), dim=-1) * \
tf.constant(0., dtype=tf.float32, shape=[1,1,1,val_neurons])
val_shape = tf.shape(val)
vals.append(val)
for i in range(num_iters):
if share_wts:
# The first Value Iteration maybe special, so it can have its own
# paramterss.
scope = 'conv'
if i == 0: scope = 'conv_0'
if i > 1: varscope.reuse_variables()
else:
scope = 'conv_{:d}'.format(i)
val = slim.conv2d(tf.concat([val, fr], 3, name='concat_{:d}'.format(i)),
num_outputs=action_neurons*val_neurons,
kernel_size=kernel_size, stride=1, activation_fn=activation_fn,
scope=scope, normalizer_fn=None,
weights_regularizer=slim.l2_regularizer(wt_decay),
weights_initializer=tf.random_normal_initializer(stddev=init_var),
biases_initializer=tf.zeros_initializer())
val = tf.reshape(val, [-1, action_neurons*val_neurons, 1, 1],
name='re_{:d}'.format(i))
val = slim.max_pool2d(val, kernel_size=[action_neurons,1],
stride=[action_neurons,1], padding='VALID',
scope='val_{:d}'.format(i))
val = tf.reshape(val, val_shape, name='unre_{:d}'.format(i))
vals.append(val)
return val, vals
def rotate_preds(loc_on_map, relative_theta, map_size, preds,
output_valid_mask):
with tf.name_scope('rotate'):
flow_op = tf_utils.get_flow(loc_on_map, relative_theta, map_size=map_size)
if type(preds) != list:
rotated_preds, valid_mask_warps = tf_utils.dense_resample(preds, flow_op,
output_valid_mask)
else:
rotated_preds = [] ;valid_mask_warps = []
for pred in preds:
rotated_pred, valid_mask_warp = tf_utils.dense_resample(pred, flow_op,
output_valid_mask)
rotated_preds.append(rotated_pred)
valid_mask_warps.append(valid_mask_warp)
return rotated_preds, valid_mask_warps
def get_visual_frustum(map_size, shape_like, expand_dims=[0,0]):
with tf.name_scope('visual_frustum'):
l = np.tril(np.ones(map_size)) ;l = l + l[:,::-1]
l = (l == 2).astype(np.float32)
for e in expand_dims:
l = np.expand_dims(l, axis=e)
confs_probs = tf.constant(l, dtype=tf.float32)
confs_probs = tf.ones_like(shape_like, dtype=tf.float32) * confs_probs
return confs_probs
def deconv(x, is_training, wt_decay, neurons, strides, layers_per_block,
kernel_size, conv_fn, name, offset=0):
"""Generates a up sampling network with residual connections.
"""
batch_norm_param = {'center': True, 'scale': True,
'activation_fn': tf.nn.relu,
'is_training': is_training}
outs = []
for i, (neuron, stride) in enumerate(zip(neurons, strides)):
for s in range(layers_per_block):
scope = '{:s}_{:d}_{:d}'.format(name, i+1+offset,s+1)
x = custom_residual_block(x, neuron, kernel_size, stride, scope,
is_training, wt_decay, use_residual=True,
residual_stride_conv=True, conv_fn=conv_fn,
batch_norm_param=batch_norm_param)
stride = 1
outs.append((x,True))
return x, outs
def fr_v2(x, output_neurons, inside_neurons, is_training, name='fr',
wt_decay=0.0001, stride=1, updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Performs fusion of information between the map and the reward map.
Inputs
x: NxHxWxC1
Outputs
fr map: NxHxWx(output_neurons)
"""
if type(stride) != list:
stride = [stride]
with slim.arg_scope(resnet_v2.resnet_utils.resnet_arg_scope(
is_training=is_training, weight_decay=wt_decay)):
with slim.arg_scope([slim.batch_norm], updates_collections=updates_collections) as arg_sc:
# Change the updates_collections for the conv normalizer_params to None
for i in range(len(arg_sc.keys())):
if 'convolution' in arg_sc.keys()[i]:
arg_sc.values()[i]['normalizer_params']['updates_collections'] = updates_collections
with slim.arg_scope(arg_sc):
bottleneck = resnet_v2.bottleneck
blocks = []
for i, s in enumerate(stride):
b = resnet_v2.resnet_utils.Block(
'block{:d}'.format(i + 1), bottleneck, [{
'depth': output_neurons,
'depth_bottleneck': inside_neurons,
'stride': stride[i]
}])
blocks.append(b)
x, outs = resnet_v2.resnet_v2(x, blocks, num_classes=None, global_pool=False,
output_stride=None, include_root_block=False,
reuse=False, scope=name)
return x, outs
| apache-2.0 |
OpenNetworkingFoundation/Snowmass-ONFOpenTransport | RI/python_client/tapi_app.py | 4 | 2803 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import json
import requests
from requests.auth import HTTPBasicAuth
import matplotlib.pyplot as plt
import networkx as nx
NODE_COLOR='#ff9966'
NEP_COLOR='#cc00ff'
SIP_COLOR='#00ffff'
def main ():
sips = []
topologies = []
for port in sys.argv:
if not port.isnumeric(): continue
retrieve_context(sips, topologies, port)
draw_network_topology(sips, topologies)
def retrieve_context(sips, topologies, port, user='', password=''):
url = 'http://0.0.0.0' + ':' + port + '/data/context/'
print ("Retrieving TAPI Context from " + url)
response = requests.get(url, auth=HTTPBasicAuth(user, password))
context = response.json()
print ("Retrieved TAPI Context: " + context['uuid'])
for sip in context['service-interface-point']:
sips.append(sip)
for topo in context['topology-context']['topology']:
topologies.append(topo)
def draw_network_topology (sips, topologies) :
plt.axis('off')
nwk_graph = nx.Graph()
for sip in sips:
uuid = sip['uuid']
nwk_graph.add_node(uuid, col=SIP_COLOR, size=300)
for topo in topologies:
for node in topo['node']:
uuid = node['uuid']
nwk_graph.add_node(uuid, col=NODE_COLOR, size=1500)
for nep in node['owned-node-edge-point']:
nep_uuid = nep['uuid']
sip = nep['mapped-service-interface-point']
nwk_graph.add_node(nep_uuid, col=NEP_COLOR, size=300)
nwk_graph.add_edge(uuid, nep_uuid, col=NODE_COLOR)
if sip:
nwk_graph.add_edge(sip[0]['service-interface-point-uuid'], nep_uuid, col=SIP_COLOR)
for link in topo['link']:
nep1_uuid = link['node-edge-point'][0]['node-edge-point-uuid']
nep2_uuid = link['node-edge-point'][1]['node-edge-point-uuid']
nwk_graph.add_edge(nep1_uuid, nep2_uuid, col=NEP_COLOR)
node_list = list(nx.get_node_attributes(nwk_graph, 'col').keys())
node_col = list(nx.get_node_attributes(nwk_graph, 'col').values())
node_size = list(nx.get_node_attributes(nwk_graph, 'size').values())
edge_list = list(nx.get_edge_attributes(nwk_graph, 'col').keys())
edge_col = list(nx.get_edge_attributes(nwk_graph, 'col').values())
nx.draw_networkx(nwk_graph,
pos=nx.kamada_kawai_layout(nwk_graph),
nodelist=node_list,
edgelist=edge_list,
node_size=node_size,
node_color=node_col,
edge_color=edge_col,
font_size='6',
width=2.0)
plt.show()
if __name__ == "__main__":
main()
| apache-2.0 |
alexeyum/scikit-learn | sklearn/metrics/base.py | 46 | 4627 | """
Common code for all metrics
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils import check_array, check_consistent_length
from ..utils.multiclass import type_of_target
from ..exceptions import UndefinedMetricWarning as _UndefinedMetricWarning
from ..utils import deprecated
@deprecated("UndefinedMetricWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class UndefinedMetricWarning(_UndefinedMetricWarning):
pass
def _average_binary_score(binary_metric, y_true, y_score, average,
sample_weight=None):
"""Average a binary metric for multilabel classification
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
binary_metric : callable, returns shape [n_classes]
The binary metric function to use.
Returns
-------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options:
raise ValueError('average has to be one of {0}'
''.format(average_options))
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None:
score_weight = np.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == 'weighted':
if score_weight is not None:
average_weight = np.sum(np.multiply(
y_true, np.reshape(score_weight, (-1, 1))), axis=0)
else:
average_weight = np.sum(y_true, axis=0)
if average_weight.sum() == 0:
return 0
elif average == 'samples':
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c,
sample_weight=score_weight)
# Average the results
if average is not None:
return np.average(score, weights=average_weight)
else:
return score
| bsd-3-clause |
pompiduskus/scikit-learn | sklearn/metrics/ranking.py | 75 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/path.py | 4 | 37214 | """
A module for dealing with the polylines used throughout matplotlib.
The primary class for polyline handling in matplotlib is :class:`Path`.
Almost all vector drawing makes use of Paths somewhere in the drawing
pipeline.
Whilst a :class:`Path` instance itself cannot be drawn, there exists
:class:`~matplotlib.artist.Artist` subclasses which can be used for
convenient Path visualisation - the two most frequently used of these are
:class:`~matplotlib.patches.PathPatch` and
:class:`~matplotlib.collections.PathCollection`.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import math
from weakref import WeakValueDictionary
import numpy as np
from numpy import ma
from matplotlib import _path
from matplotlib.cbook import simple_linear_interpolation, maxdict
from matplotlib import rcParams
class Path(object):
"""
:class:`Path` represents a series of possibly disconnected,
possibly closed, line and curve segments.
The underlying storage is made up of two parallel numpy arrays:
- *vertices*: an Nx2 float array of vertices
- *codes*: an N-length uint8 array of vertex types
These two arrays always have the same length in the first
dimension. For example, to represent a cubic curve, you must
provide three vertices as well as three codes ``CURVE3``.
The code types are:
- ``STOP`` : 1 vertex (ignored)
A marker for the end of the entire path (currently not
required and ignored)
- ``MOVETO`` : 1 vertex
Pick up the pen and move to the given vertex.
- ``LINETO`` : 1 vertex
Draw a line from the current position to the given vertex.
- ``CURVE3`` : 1 control point, 1 endpoint
Draw a quadratic Bezier curve from the current position,
with the given control point, to the given end point.
- ``CURVE4`` : 2 control points, 1 endpoint
Draw a cubic Bezier curve from the current position, with
the given control points, to the given end point.
- ``CLOSEPOLY`` : 1 vertex (ignored)
Draw a line segment to the start point of the current
polyline.
Users of Path objects should not access the vertices and codes
arrays directly. Instead, they should use :meth:`iter_segments`
or :meth:`cleaned` to get the vertex/code pairs. This is important,
since many :class:`Path` objects, as an optimization, do not store a
*codes* at all, but have a default one provided for them by
:meth:`iter_segments`.
.. note::
The vertices and codes arrays should be treated as
immutable -- there are a number of optimizations and assumptions
made up front in the constructor that will not change when the
data changes.
"""
# Path codes
STOP = 0 # 1 vertex
MOVETO = 1 # 1 vertex
LINETO = 2 # 1 vertex
CURVE3 = 3 # 2 vertices
CURVE4 = 4 # 3 vertices
CLOSEPOLY = 79 # 1 vertex
#: A dictionary mapping Path codes to the number of vertices that the
#: code expects.
NUM_VERTICES_FOR_CODE = {STOP: 1,
MOVETO: 1,
LINETO: 1,
CURVE3: 2,
CURVE4: 3,
CLOSEPOLY: 1}
code_type = np.uint8
def __init__(self, vertices, codes=None, _interpolation_steps=1,
closed=False, readonly=False):
"""
Create a new path with the given vertices and codes.
Parameters
----------
vertices : array_like
The ``(n, 2)`` float array, masked array or sequence of pairs
representing the vertices of the path.
If *vertices* contains masked values, they will be converted
to NaNs which are then handled correctly by the Agg
PathIterator and other consumers of path data, such as
:meth:`iter_segments`.
codes : {None, array_like}, optional
n-length array integers representing the codes of the path.
If not None, codes must be the same length as vertices.
If None, *vertices* will be treated as a series of line segments.
_interpolation_steps : int, optional
Used as a hint to certain projections, such as Polar, that this
path should be linearly interpolated immediately before drawing.
This attribute is primarily an implementation detail and is not
intended for public use.
closed : bool, optional
If *codes* is None and closed is True, vertices will be treated as
line segments of a closed polygon.
readonly : bool, optional
Makes the path behave in an immutable way and sets the vertices
and codes as read-only arrays.
"""
if ma.isMaskedArray(vertices):
vertices = vertices.astype(np.float_).filled(np.nan)
else:
vertices = np.asarray(vertices, np.float_)
if (vertices.ndim != 2) or (vertices.shape[1] != 2):
msg = "'vertices' must be a 2D list or array with shape Nx2"
raise ValueError(msg)
if codes is not None:
codes = np.asarray(codes, self.code_type)
if (codes.ndim != 1) or len(codes) != len(vertices):
msg = ("'codes' must be a 1D list or array with the same"
" length of 'vertices'")
raise ValueError(msg)
if len(codes) and codes[0] != self.MOVETO:
msg = ("The first element of 'code' must be equal to 'MOVETO':"
" {0}")
raise ValueError(msg.format(self.MOVETO))
elif closed:
codes = np.empty(len(vertices), dtype=self.code_type)
codes[0] = self.MOVETO
codes[1:-1] = self.LINETO
codes[-1] = self.CLOSEPOLY
self._vertices = vertices
self._codes = codes
self._interpolation_steps = _interpolation_steps
self._update_values()
if readonly:
self._vertices.flags.writeable = False
if self._codes is not None:
self._codes.flags.writeable = False
self._readonly = True
else:
self._readonly = False
@classmethod
def _fast_from_codes_and_verts(cls, verts, codes, internals=None):
"""
Creates a Path instance without the expense of calling the constructor
Parameters
----------
verts : numpy array
codes : numpy array
internals : dict or None
The attributes that the resulting path should have.
Allowed keys are ``readonly``, ``should_simplify``,
``simplify_threshold``, ``has_nonfinite`` and
``interpolation_steps``.
"""
internals = internals or {}
pth = cls.__new__(cls)
if ma.isMaskedArray(verts):
verts = verts.astype(np.float_).filled(np.nan)
else:
verts = np.asarray(verts, np.float_)
pth._vertices = verts
pth._codes = codes
pth._readonly = internals.pop('readonly', False)
pth.should_simplify = internals.pop('should_simplify', True)
pth.simplify_threshold = (
internals.pop('simplify_threshold',
rcParams['path.simplify_threshold'])
)
pth._has_nonfinite = internals.pop('has_nonfinite', False)
pth._interpolation_steps = internals.pop('interpolation_steps', 1)
if internals:
raise ValueError('Unexpected internals provided to '
'_fast_from_codes_and_verts: '
'{0}'.format('\n *'.join(six.iterkeys(
internals
))))
return pth
def _update_values(self):
self._should_simplify = (
rcParams['path.simplify'] and
(len(self._vertices) >= 128 and
(self._codes is None or np.all(self._codes <= Path.LINETO)))
)
self._simplify_threshold = rcParams['path.simplify_threshold']
self._has_nonfinite = not np.isfinite(self._vertices).all()
@property
def vertices(self):
"""
The list of vertices in the `Path` as an Nx2 numpy array.
"""
return self._vertices
@vertices.setter
def vertices(self, vertices):
if self._readonly:
raise AttributeError("Can't set vertices on a readonly Path")
self._vertices = vertices
self._update_values()
@property
def codes(self):
"""
The list of codes in the `Path` as a 1-D numpy array. Each
code is one of `STOP`, `MOVETO`, `LINETO`, `CURVE3`, `CURVE4`
or `CLOSEPOLY`. For codes that correspond to more than one
vertex (`CURVE3` and `CURVE4`), that code will be repeated so
that the length of `self.vertices` and `self.codes` is always
the same.
"""
return self._codes
@codes.setter
def codes(self, codes):
if self._readonly:
raise AttributeError("Can't set codes on a readonly Path")
self._codes = codes
self._update_values()
@property
def simplify_threshold(self):
"""
The fraction of a pixel difference below which vertices will
be simplified out.
"""
return self._simplify_threshold
@simplify_threshold.setter
def simplify_threshold(self, threshold):
self._simplify_threshold = threshold
@property
def has_nonfinite(self):
"""
`True` if the vertices array has nonfinite values.
"""
return self._has_nonfinite
@property
def should_simplify(self):
"""
`True` if the vertices array should be simplified.
"""
return self._should_simplify
@should_simplify.setter
def should_simplify(self, should_simplify):
self._should_simplify = should_simplify
@property
def readonly(self):
"""
`True` if the `Path` is read-only.
"""
return self._readonly
def __copy__(self):
"""
Returns a shallow copy of the `Path`, which will share the
vertices and codes with the source `Path`.
"""
import copy
return copy.copy(self)
copy = __copy__
def __deepcopy__(self):
"""
Returns a deepcopy of the `Path`. The `Path` will not be
readonly, even if the source `Path` is.
"""
return self.__class__(
self.vertices.copy(), self.codes.copy(),
_interpolation_steps=self._interpolation_steps)
deepcopy = __deepcopy__
@classmethod
def make_compound_path_from_polys(cls, XY):
"""
Make a compound path object to draw a number
of polygons with equal numbers of sides XY is a (numpolys x
numsides x 2) numpy array of vertices. Return object is a
:class:`Path`
.. plot:: mpl_examples/api/histogram_path_demo.py
"""
# for each poly: 1 for the MOVETO, (numsides-1) for the LINETO, 1 for
# the CLOSEPOLY; the vert for the closepoly is ignored but we still
# need it to keep the codes aligned with the vertices
numpolys, numsides, two = XY.shape
if two != 2:
raise ValueError("The third dimension of 'XY' must be 2")
stride = numsides + 1
nverts = numpolys * stride
verts = np.zeros((nverts, 2))
codes = np.ones(nverts, int) * cls.LINETO
codes[0::stride] = cls.MOVETO
codes[numsides::stride] = cls.CLOSEPOLY
for i in range(numsides):
verts[i::stride] = XY[:, i]
return cls(verts, codes)
@classmethod
def make_compound_path(cls, *args):
"""Make a compound path from a list of Path objects."""
# Handle an empty list in args (i.e. no args).
if not args:
return Path(np.empty([0, 2], dtype=np.float32))
lengths = [len(x) for x in args]
total_length = sum(lengths)
vertices = np.vstack([x.vertices for x in args])
vertices.reshape((total_length, 2))
codes = np.empty(total_length, dtype=cls.code_type)
i = 0
for path in args:
if path.codes is None:
codes[i] = cls.MOVETO
codes[i + 1:i + len(path.vertices)] = cls.LINETO
else:
codes[i:i + len(path.codes)] = path.codes
i += len(path.vertices)
return cls(vertices, codes)
def __repr__(self):
return "Path(%r, %r)" % (self.vertices, self.codes)
def __len__(self):
return len(self.vertices)
def iter_segments(self, transform=None, remove_nans=True, clip=None,
snap=False, stroke_width=1.0, simplify=None,
curves=True, sketch=None):
"""
Iterates over all of the curve segments in the path. Each
iteration returns a 2-tuple (*vertices*, *code*), where
*vertices* is a sequence of 1 - 3 coordinate pairs, and *code* is
one of the :class:`Path` codes.
Additionally, this method can provide a number of standard
cleanups and conversions to the path.
Parameters
----------
transform : None or :class:`~matplotlib.transforms.Transform` instance
If not None, the given affine transformation will
be applied to the path.
remove_nans : {False, True}, optional
If True, will remove all NaNs from the path and
insert MOVETO commands to skip over them.
clip : None or sequence, optional
If not None, must be a four-tuple (x1, y1, x2, y2)
defining a rectangle in which to clip the path.
snap : None or bool, optional
If None, auto-snap to pixels, to reduce
fuzziness of rectilinear lines. If True, force snapping, and
if False, don't snap.
stroke_width : float, optional
The width of the stroke being drawn. Needed
as a hint for the snapping algorithm.
simplify : None or bool, optional
If True, perform simplification, to remove
vertices that do not affect the appearance of the path. If
False, perform no simplification. If None, use the
should_simplify member variable.
curves : {True, False}, optional
If True, curve segments will be returned as curve
segments. If False, all curves will be converted to line
segments.
sketch : None or sequence, optional
If not None, must be a 3-tuple of the form
(scale, length, randomness), representing the sketch
parameters.
"""
if not len(self):
return
cleaned = self.cleaned(transform=transform,
remove_nans=remove_nans, clip=clip,
snap=snap, stroke_width=stroke_width,
simplify=simplify, curves=curves,
sketch=sketch)
vertices = cleaned.vertices
codes = cleaned.codes
len_vertices = vertices.shape[0]
# Cache these object lookups for performance in the loop.
NUM_VERTICES_FOR_CODE = self.NUM_VERTICES_FOR_CODE
STOP = self.STOP
i = 0
while i < len_vertices:
code = codes[i]
if code == STOP:
return
else:
num_vertices = NUM_VERTICES_FOR_CODE[code]
curr_vertices = vertices[i:i+num_vertices].flatten()
yield curr_vertices, code
i += num_vertices
def cleaned(self, transform=None, remove_nans=False, clip=None,
quantize=False, simplify=False, curves=False,
stroke_width=1.0, snap=False, sketch=None):
"""
Cleans up the path according to the parameters returning a new
Path instance.
.. seealso::
See :meth:`iter_segments` for details of the keyword arguments.
Returns
-------
Path instance with cleaned up vertices and codes.
"""
vertices, codes = _path.cleanup_path(self, transform,
remove_nans, clip,
snap, stroke_width,
simplify, curves, sketch)
internals = {'should_simplify': self.should_simplify and not simplify,
'has_nonfinite': self.has_nonfinite and not remove_nans,
'simplify_threshold': self.simplify_threshold,
'interpolation_steps': self._interpolation_steps}
return Path._fast_from_codes_and_verts(vertices, codes, internals)
def transformed(self, transform):
"""
Return a transformed copy of the path.
.. seealso::
:class:`matplotlib.transforms.TransformedPath`
A specialized path class that will cache the
transformed result and automatically update when the
transform changes.
"""
return Path(transform.transform(self.vertices), self.codes,
self._interpolation_steps)
def contains_point(self, point, transform=None, radius=0.0):
"""
Returns *True* if the path contains the given point.
If *transform* is not *None*, the path will be transformed
before performing the test.
*radius* allows the path to be made slightly larger or
smaller.
"""
if transform is not None:
transform = transform.frozen()
result = _path.point_in_path(point[0], point[1], radius, self,
transform)
return result
def contains_points(self, points, transform=None, radius=0.0):
"""
Returns a bool array which is *True* if the path contains the
corresponding point.
If *transform* is not *None*, the path will be transformed
before performing the test.
*radius* allows the path to be made slightly larger or
smaller.
"""
if transform is not None:
transform = transform.frozen()
result = _path.points_in_path(points, radius, self, transform)
return result
def contains_path(self, path, transform=None):
"""
Returns *True* if this path completely contains the given path.
If *transform* is not *None*, the path will be transformed
before performing the test.
"""
if transform is not None:
transform = transform.frozen()
return _path.path_in_path(self, None, path, transform)
def get_extents(self, transform=None):
"""
Returns the extents (*xmin*, *ymin*, *xmax*, *ymax*) of the
path.
Unlike computing the extents on the *vertices* alone, this
algorithm will take into account the curves and deal with
control points appropriately.
"""
from .transforms import Bbox
path = self
if transform is not None:
transform = transform.frozen()
if not transform.is_affine:
path = self.transformed(transform)
transform = None
return Bbox(_path.get_path_extents(path, transform))
def intersects_path(self, other, filled=True):
"""
Returns *True* if this path intersects another given path.
*filled*, when True, treats the paths as if they were filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
return _path.path_intersects_path(self, other, filled)
def intersects_bbox(self, bbox, filled=True):
"""
Returns *True* if this path intersects a given
:class:`~matplotlib.transforms.Bbox`.
*filled*, when True, treats the path as if it was filled.
That is, if one path completely encloses the other,
:meth:`intersects_path` will return True.
"""
from .transforms import BboxTransformTo
rectangle = self.unit_rectangle().transformed(
BboxTransformTo(bbox))
result = self.intersects_path(rectangle, filled)
return result
def interpolated(self, steps):
"""
Returns a new path resampled to length N x steps. Does not
currently handle interpolating curves.
"""
if steps == 1:
return self
vertices = simple_linear_interpolation(self.vertices, steps)
codes = self.codes
if codes is not None:
new_codes = Path.LINETO * np.ones(((len(codes) - 1) * steps + 1, ))
new_codes[0::steps] = codes
else:
new_codes = None
return Path(vertices, new_codes)
def to_polygons(self, transform=None, width=0, height=0):
"""
Convert this path to a list of polygons. Each polygon is an
Nx2 array of vertices. In other words, each polygon has no
``MOVETO`` instructions or curves. This is useful for
displaying in backends that do not support compound paths or
Bezier curves, such as GDK.
If *width* and *height* are both non-zero then the lines will
be simplified so that vertices outside of (0, 0), (width,
height) will be clipped.
"""
if len(self.vertices) == 0:
return []
if transform is not None:
transform = transform.frozen()
if self.codes is None and (width == 0 or height == 0):
if transform is None:
return [self.vertices]
else:
return [transform.transform(self.vertices)]
# Deal with the case where there are curves and/or multiple
# subpaths (using extension code)
return _path.convert_path_to_polygons(self, transform, width, height)
_unit_rectangle = None
@classmethod
def unit_rectangle(cls):
"""
Return a :class:`Path` instance of the unit rectangle
from (0, 0) to (1, 1).
"""
if cls._unit_rectangle is None:
cls._unit_rectangle = \
cls([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0],
[0.0, 0.0]],
[cls.MOVETO, cls.LINETO, cls.LINETO, cls.LINETO,
cls.CLOSEPOLY],
readonly=True)
return cls._unit_rectangle
_unit_regular_polygons = WeakValueDictionary()
@classmethod
def unit_regular_polygon(cls, numVertices):
"""
Return a :class:`Path` instance for a unit regular
polygon with the given *numVertices* and radius of 1.0,
centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_polygons.get(numVertices)
else:
path = None
if path is None:
theta = (2*np.pi/numVertices *
np.arange(numVertices + 1).reshape((numVertices + 1, 1)))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
verts = np.concatenate((np.cos(theta), np.sin(theta)), 1)
codes = np.empty((numVertices + 1,))
codes[0] = cls.MOVETO
codes[1:-1] = cls.LINETO
codes[-1] = cls.CLOSEPOLY
path = cls(verts, codes, readonly=True)
if numVertices <= 16:
cls._unit_regular_polygons[numVertices] = path
return path
_unit_regular_stars = WeakValueDictionary()
@classmethod
def unit_regular_star(cls, numVertices, innerCircle=0.5):
"""
Return a :class:`Path` for a unit regular star
with the given numVertices and radius of 1.0, centered at (0,
0).
"""
if numVertices <= 16:
path = cls._unit_regular_stars.get((numVertices, innerCircle))
else:
path = None
if path is None:
ns2 = numVertices * 2
theta = (2*np.pi/ns2 * np.arange(ns2 + 1))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
r = np.ones(ns2 + 1)
r[1::2] = innerCircle
verts = np.vstack((r*np.cos(theta), r*np.sin(theta))).transpose()
codes = np.empty((ns2 + 1,))
codes[0] = cls.MOVETO
codes[1:-1] = cls.LINETO
codes[-1] = cls.CLOSEPOLY
path = cls(verts, codes, readonly=True)
if numVertices <= 16:
cls._unit_regular_polygons[(numVertices, innerCircle)] = path
return path
@classmethod
def unit_regular_asterisk(cls, numVertices):
"""
Return a :class:`Path` for a unit regular
asterisk with the given numVertices and radius of 1.0,
centered at (0, 0).
"""
return cls.unit_regular_star(numVertices, 0.0)
_unit_circle = None
@classmethod
def unit_circle(cls):
"""
Return the readonly :class:`Path` of the unit circle.
For most cases, :func:`Path.circle` will be what you want.
"""
if cls._unit_circle is None:
cls._unit_circle = cls.circle(center=(0, 0), radius=1,
readonly=True)
return cls._unit_circle
@classmethod
def circle(cls, center=(0., 0.), radius=1., readonly=False):
"""
Return a Path representing a circle of a given radius and center.
Parameters
----------
center : pair of floats
The center of the circle. Default ``(0, 0)``.
radius : float
The radius of the circle. Default is 1.
readonly : bool
Whether the created path should have the "readonly" argument
set when creating the Path instance.
Notes
-----
The circle is approximated using cubic Bezier curves. This
uses 8 splines around the circle using the approach presented
here:
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = np.sqrt((MAGIC*MAGIC) / 2.0)
vertices = np.array([[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[-MAGIC, 1.0],
[-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45],
[-SQRTHALF, SQRTHALF],
[-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45],
[-1.0, MAGIC],
[-1.0, 0.0],
[-1.0, -MAGIC],
[-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45],
[-SQRTHALF, -SQRTHALF],
[-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45],
[-MAGIC, -1.0],
[0.0, -1.0],
[0.0, -1.0]],
dtype=np.float_)
codes = [cls.CURVE4] * 26
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
return Path(vertices * radius + center, codes, readonly=readonly)
_unit_circle_righthalf = None
@classmethod
def unit_circle_righthalf(cls):
"""
Return a :class:`Path` of the right half
of a unit circle. The circle is approximated using cubic Bezier
curves. This uses 4 splines around the circle using the approach
presented here:
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
if cls._unit_circle_righthalf is None:
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = np.sqrt((MAGIC*MAGIC) / 2.0)
vertices = np.array(
[[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[0.0, -1.0]],
np.float_)
codes = cls.CURVE4 * np.ones(14)
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
cls._unit_circle_righthalf = cls(vertices, codes, readonly=True)
return cls._unit_circle_righthalf
@classmethod
def arc(cls, theta1, theta2, n=None, is_wedge=False):
"""
Return an arc on the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
Masionobe, L. 2003. `Drawing an elliptical arc using
polylines, quadratic or cubic Bezier curves
<http://www.spaceroots.org/documents/ellipse/index.html>`_.
"""
# degrees to radians
theta1 *= np.pi / 180.0
theta2 *= np.pi / 180.0
twopi = np.pi * 2.0
halfpi = np.pi * 0.5
eta1 = np.arctan2(np.sin(theta1), np.cos(theta1))
eta2 = np.arctan2(np.sin(theta2), np.cos(theta2))
eta2 -= twopi * np.floor((eta2 - eta1) / twopi)
# number of curve segments to make
if n is None:
n = int(2 ** np.ceil((eta2 - eta1) / halfpi))
if n < 1:
raise ValueError("n must be >= 1 or None")
deta = (eta2 - eta1) / n
t = np.tan(0.5 * deta)
alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0
steps = np.linspace(eta1, eta2, n + 1, True)
cos_eta = np.cos(steps)
sin_eta = np.sin(steps)
xA = cos_eta[:-1]
yA = sin_eta[:-1]
xA_dot = -yA
yA_dot = xA
xB = cos_eta[1:]
yB = sin_eta[1:]
xB_dot = -yB
yB_dot = xB
if is_wedge:
length = n * 3 + 4
vertices = np.zeros((length, 2), np.float_)
codes = cls.CURVE4 * np.ones((length, ), cls.code_type)
vertices[1] = [xA[0], yA[0]]
codes[0:2] = [cls.MOVETO, cls.LINETO]
codes[-2:] = [cls.LINETO, cls.CLOSEPOLY]
vertex_offset = 2
end = length - 2
else:
length = n * 3 + 1
vertices = np.empty((length, 2), np.float_)
codes = cls.CURVE4 * np.ones((length, ), cls.code_type)
vertices[0] = [xA[0], yA[0]]
codes[0] = cls.MOVETO
vertex_offset = 1
end = length
vertices[vertex_offset:end:3, 0] = xA + alpha * xA_dot
vertices[vertex_offset:end:3, 1] = yA + alpha * yA_dot
vertices[vertex_offset+1:end:3, 0] = xB - alpha * xB_dot
vertices[vertex_offset+1:end:3, 1] = yB - alpha * yB_dot
vertices[vertex_offset+2:end:3, 0] = xB
vertices[vertex_offset+2:end:3, 1] = yB
return cls(vertices, codes, readonly=True)
@classmethod
def wedge(cls, theta1, theta2, n=None):
"""
Return a wedge of the unit circle from angle
*theta1* to angle *theta2* (in degrees).
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
"""
return cls.arc(theta1, theta2, n, True)
_hatch_dict = maxdict(8)
@classmethod
def hatch(cls, hatchpattern, density=6):
"""
Given a hatch specifier, *hatchpattern*, generates a Path that
can be used in a repeated hatching pattern. *density* is the
number of lines per unit square.
"""
from matplotlib.hatch import get_path
if hatchpattern is None:
return None
hatch_path = cls._hatch_dict.get((hatchpattern, density))
if hatch_path is not None:
return hatch_path
hatch_path = get_path(hatchpattern, density)
cls._hatch_dict[(hatchpattern, density)] = hatch_path
return hatch_path
def clip_to_bbox(self, bbox, inside=True):
"""
Clip the path to the given bounding box.
The path must be made up of one or more closed polygons. This
algorithm will not behave correctly for unclosed paths.
If *inside* is `True`, clip to the inside of the box, otherwise
to the outside of the box.
"""
# Use make_compound_path_from_polys
verts = _path.clip_path_to_rect(self, bbox, inside)
paths = [Path(poly) for poly in verts]
return self.make_compound_path(*paths)
def get_path_collection_extents(
master_transform, paths, transforms, offsets, offset_transform):
"""
Given a sequence of :class:`Path` objects,
:class:`~matplotlib.transforms.Transform` objects and offsets, as
found in a :class:`~matplotlib.collections.PathCollection`,
returns the bounding box that encapsulates all of them.
*master_transform* is a global transformation to apply to all paths
*paths* is a sequence of :class:`Path` instances.
*transforms* is a sequence of
:class:`~matplotlib.transforms.Affine2D` instances.
*offsets* is a sequence of (x, y) offsets (or an Nx2 array)
*offset_transform* is a :class:`~matplotlib.transforms.Affine2D`
to apply to the offsets before applying the offset to the path.
The way that *paths*, *transforms* and *offsets* are combined
follows the same method as for collections. Each is iterated over
independently, so if you have 3 paths, 2 transforms and 1 offset,
their combinations are as follows:
(A, A, A), (B, B, A), (C, A, A)
"""
from .transforms import Bbox
if len(paths) == 0:
raise ValueError("No paths provided")
return Bbox.from_extents(*_path.get_path_collection_extents(
master_transform, paths, np.atleast_3d(transforms),
offsets, offset_transform))
def get_paths_extents(paths, transforms=[]):
"""
Given a sequence of :class:`Path` objects and optional
:class:`~matplotlib.transforms.Transform` objects, returns the
bounding box that encapsulates all of them.
*paths* is a sequence of :class:`Path` instances.
*transforms* is an optional sequence of
:class:`~matplotlib.transforms.Affine2D` instances to apply to
each path.
"""
from .transforms import Bbox, Affine2D
if len(paths) == 0:
raise ValueError("No paths provided")
return Bbox.from_extents(*_path.get_path_collection_extents(
Affine2D(), paths, transforms, [], Affine2D()))
def _define_deprecated_functions(ns):
from .cbook import deprecated
# The C++ functions are not meant to be used directly.
# Users should use the more pythonic wrappers in the Path
# class instead.
for func, alternative in [
('point_in_path', 'path.Path.contains_point'),
('get_path_extents', 'path.Path.get_extents'),
('point_in_path_collection', 'collection.Collection.contains'),
('path_in_path', 'path.Path.contains_path'),
('path_intersects_path', 'path.Path.intersects_path'),
('convert_path_to_polygons', 'path.Path.to_polygons'),
('cleanup_path', 'path.Path.cleaned'),
('points_in_path', 'path.Path.contains_points'),
('clip_path_to_rect', 'path.Path.clip_to_bbox')]:
ns[func] = deprecated(
since='1.3', alternative=alternative)(getattr(_path, func))
_define_deprecated_functions(locals())
| mit |
mblue9/tools-iuc | tools/vsnp/vsnp_build_tables.py | 2 | 17888 | #!/usr/bin/env python
import argparse
import multiprocessing
import os
import queue
import re
import pandas
import pandas.io.formats.excel
from Bio import SeqIO
INPUT_JSON_AVG_MQ_DIR = 'input_json_avg_mq_dir'
INPUT_JSON_DIR = 'input_json_dir'
INPUT_NEWICK_DIR = 'input_newick_dir'
# Maximum columns allowed in a LibreOffice
# spreadsheet is 1024. Excel allows for
# 16,384 columns, but we'll set the lower
# number as the maximum. Some browsers
# (e.g., Firefox on Linux) are configured
# to use LibreOffice for Excel spreadsheets.
MAXCOLS = 1024
OUTPUT_EXCEL_DIR = 'output_excel_dir'
def annotate_table(table_df, group, annotation_dict):
for gbk_chrome, pro in list(annotation_dict.items()):
ref_pos = list(table_df)
ref_series = pandas.Series(ref_pos)
ref_df = pandas.DataFrame(ref_series.str.split(':', expand=True).values, columns=['reference', 'position'])
all_ref = ref_df[ref_df['reference'] == gbk_chrome]
positions = all_ref.position.to_frame()
# Create an annotation file.
annotation_file = "%s_annotations.csv" % group
with open(annotation_file, "a") as fh:
for _, row in positions.iterrows():
pos = row.position
try:
aaa = pro.iloc[pro.index.get_loc(int(pos))][['chrom', 'locus', 'product', 'gene']]
try:
chrom, name, locus, tag = aaa.values[0]
print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh)
except ValueError:
# If only one annotation for the entire
# chromosome (e.g., flu) then having [0] fails
chrom, name, locus, tag = aaa.values
print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh)
except KeyError:
print("{}:{}\tNo annotated product".format(gbk_chrome, pos), file=fh)
# Read the annotation file into a data frame.
annotations_df = pandas.read_csv(annotation_file, sep='\t', header=None, names=['index', 'annotations'], index_col='index')
# Remove the annotation_file from disk since both
# cascade and sort tables are built using the file,
# and it is opened for writing in append mode.
os.remove(annotation_file)
# Process the data.
table_df_transposed = table_df.T
table_df_transposed.index = table_df_transposed.index.rename('index')
table_df_transposed = table_df_transposed.merge(annotations_df, left_index=True, right_index=True)
table_df = table_df_transposed.T
return table_df
def excel_formatter(json_file_name, excel_file_name, group, annotation_dict):
pandas.io.formats.excel.header_style = None
table_df = pandas.read_json(json_file_name, orient='split')
if annotation_dict is not None:
table_df = annotate_table(table_df, group, annotation_dict)
else:
table_df = table_df.append(pandas.Series(name='no annotations'))
writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter')
table_df.to_excel(writer, sheet_name='Sheet1')
writer_book = writer.book
ws = writer.sheets['Sheet1']
format_a = writer_book.add_format({'bg_color': '#58FA82'})
format_g = writer_book.add_format({'bg_color': '#F7FE2E'})
format_c = writer_book.add_format({'bg_color': '#0000FF'})
format_t = writer_book.add_format({'bg_color': '#FF0000'})
format_normal = writer_book.add_format({'bg_color': '#FDFEFE'})
formatlowqual = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})
format_ambigous = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})
format_n = writer_book.add_format({'bg_color': '#E2CFDD'})
rows, cols = table_df.shape
ws.set_column(0, 0, 30)
ws.set_column(1, cols, 2.1)
ws.freeze_panes(2, 1)
format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})
# Set last row.
ws.set_row(rows + 1, cols + 1, format_annotation)
# Make sure that row/column locations don't overlap.
ws.conditional_format(rows - 2, 1, rows - 1, cols, {'type': 'cell', 'criteria': '<', 'value': 55, 'format': formatlowqual})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'cell', 'criteria': '==', 'value': 'B$2', 'format': format_normal})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'A', 'format': format_a})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'G', 'format': format_g})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'C', 'format': format_c})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'T', 'format': format_t})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'S', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'Y', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'R', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'W', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'K', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'M', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'N', 'format': format_n})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': '-', 'format': format_n})
format_rotation = writer_book.add_format({})
format_rotation.set_rotation(90)
for column_num, column_name in enumerate(list(table_df.columns)):
ws.write(0, column_num + 1, column_name, format_rotation)
format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})
# Set last row.
ws.set_row(rows, 400, format_annotation)
writer.save()
def get_annotation_dict(gbk_file):
gbk_dict = SeqIO.to_dict(SeqIO.parse(gbk_file, "genbank"))
annotation_dict = {}
tmp_file = "features.csv"
# Create a file of chromosomes and features.
for chromosome in list(gbk_dict.keys()):
with open(tmp_file, 'w+') as fh:
for feature in gbk_dict[chromosome].features:
if "CDS" in feature.type or "rRNA" in feature.type:
try:
product = feature.qualifiers['product'][0]
except KeyError:
product = None
try:
locus = feature.qualifiers['locus_tag'][0]
except KeyError:
locus = None
try:
gene = feature.qualifiers['gene'][0]
except KeyError:
gene = None
fh.write("%s\t%d\t%d\t%s\t%s\t%s\n" % (chromosome, int(feature.location.start), int(feature.location.end), locus, product, gene))
# Read the chromosomes and features file into a data frame.
df = pandas.read_csv(tmp_file, sep='\t', names=["chrom", "start", "stop", "locus", "product", "gene"])
# Process the data.
df = df.sort_values(['start', 'gene'], ascending=[True, False])
df = df.drop_duplicates('start')
pro = df.reset_index(drop=True)
pro.index = pandas.IntervalIndex.from_arrays(pro['start'], pro['stop'], closed='both')
annotation_dict[chromosome] = pro
return annotation_dict
def get_base_file_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
elif base_file_name.find("_") > 0:
# The dot extension was likely changed to
# the " character.
items = base_file_name.split("_")
return "_".join(items[0:-1])
else:
return base_file_name
def output_cascade_table(cascade_order, mqdf, group, annotation_dict):
cascade_order_mq = pandas.concat([cascade_order, mqdf], join='inner')
output_table(cascade_order_mq, "cascade", group, annotation_dict)
def output_excel(df, type_str, group, annotation_dict, count=None):
# Output the temporary json file that
# is used by the excel_formatter.
if count is None:
if group is None:
json_file_name = "%s_order_mq.json" % type_str
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table.xlsx" % type_str)
else:
json_file_name = "%s_%s_order_mq.json" % (group, type_str)
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table.xlsx" % (group, type_str))
else:
if group is None:
json_file_name = "%s_order_mq_%d.json" % (type_str, count)
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table_%d.xlsx" % (type_str, count))
else:
json_file_name = "%s_%s_order_mq_%d.json" % (group, type_str, count)
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table_%d.xlsx" % (group, type_str, count))
df.to_json(json_file_name, orient='split')
# Output the Excel file.
excel_formatter(json_file_name, excel_file_name, group, annotation_dict)
def output_sort_table(cascade_order, mqdf, group, annotation_dict):
sort_df = cascade_order.T
sort_df['abs_value'] = sort_df.index
sort_df[['chrom', 'pos']] = sort_df['abs_value'].str.split(':', expand=True)
sort_df = sort_df.drop(['abs_value', 'chrom'], axis=1)
sort_df.pos = sort_df.pos.astype(int)
sort_df = sort_df.sort_values(by=['pos'])
sort_df = sort_df.drop(['pos'], axis=1)
sort_df = sort_df.T
sort_order_mq = pandas.concat([sort_df, mqdf], join='inner')
output_table(sort_order_mq, "sort", group, annotation_dict)
def output_table(df, type_str, group, annotation_dict):
if isinstance(group, str) and group.startswith("dataset"):
# Inputs are single files, not collections,
# so input file names are not useful for naming
# output files.
group_str = None
else:
group_str = group
count = 0
chunk_start = 0
chunk_end = 0
column_count = df.shape[1]
if column_count >= MAXCOLS:
# Here the number of columns is greater than
# the maximum allowed by Excel, so multiple
# outputs will be produced.
while column_count >= MAXCOLS:
count += 1
chunk_end += MAXCOLS
df_of_type = df.iloc[:, chunk_start:chunk_end]
output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)
chunk_start += MAXCOLS
column_count -= MAXCOLS
count += 1
df_of_type = df.iloc[:, chunk_start:]
output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)
else:
output_excel(df, type_str, group_str, annotation_dict)
def preprocess_tables(task_queue, annotation_dict, timeout):
while True:
try:
tup = task_queue.get(block=True, timeout=timeout)
except queue.Empty:
break
newick_file, json_file, json_avg_mq_file = tup
avg_mq_series = pandas.read_json(json_avg_mq_file, typ='series', orient='split')
# Map quality to dataframe.
mqdf = avg_mq_series.to_frame(name='MQ')
mqdf = mqdf.T
# Get the group.
group = get_base_file_name(newick_file)
snps_df = pandas.read_json(json_file, orient='split')
with open(newick_file, 'r') as fh:
for line in fh:
line = re.sub('[:,]', '\n', line)
line = re.sub('[)(]', '', line)
line = re.sub(r'[0-9].*\.[0-9].*\n', '', line)
line = re.sub('root\n', '', line)
sample_order = line.split('\n')
sample_order = list([_f for _f in sample_order if _f])
sample_order.insert(0, 'root')
tree_order = snps_df.loc[sample_order]
# Count number of SNPs in each column.
snp_per_column = []
for column_header in tree_order:
count = 0
column = tree_order[column_header]
for element in column:
if element != column[0]:
count = count + 1
snp_per_column.append(count)
row1 = pandas.Series(snp_per_column, tree_order.columns, name="snp_per_column")
# Count number of SNPS from the
# top of each column in the table.
snp_from_top = []
for column_header in tree_order:
count = 0
column = tree_order[column_header]
# for each element in the column
# skip the first element
for element in column[1:]:
if element == column[0]:
count = count + 1
else:
break
snp_from_top.append(count)
row2 = pandas.Series(snp_from_top, tree_order.columns, name="snp_from_top")
tree_order = tree_order.append([row1])
tree_order = tree_order.append([row2])
# In pandas=0.18.1 even this does not work:
# abc = row1.to_frame()
# abc = abc.T --> tree_order.shape (5, 18), abc.shape (1, 18)
# tree_order.append(abc)
# Continue to get error: "*** ValueError: all the input arrays must have same number of dimensions"
tree_order = tree_order.T
tree_order = tree_order.sort_values(['snp_from_top', 'snp_per_column'], ascending=[True, False])
tree_order = tree_order.T
# Remove snp_per_column and snp_from_top rows.
cascade_order = tree_order[:-2]
# Output the cascade table.
output_cascade_table(cascade_order, mqdf, group, annotation_dict)
# Output the sorted table.
output_sort_table(cascade_order, mqdf, group, annotation_dict)
task_queue.task_done()
def set_num_cpus(num_files, processes):
num_cpus = int(multiprocessing.cpu_count())
if num_files < num_cpus and num_files < processes:
return num_files
if num_cpus < processes:
half_cpus = int(num_cpus / 2)
if num_files < half_cpus:
return num_files
return half_cpus
return processes
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_avg_mq_json', action='store', dest='input_avg_mq_json', required=False, default=None, help='Average MQ json file')
parser.add_argument('--input_newick', action='store', dest='input_newick', required=False, default=None, help='Newick file')
parser.add_argument('--input_snps_json', action='store', dest='input_snps_json', required=False, default=None, help='SNPs json file')
parser.add_argument('--gbk_file', action='store', dest='gbk_file', required=False, default=None, help='Optional gbk file'),
parser.add_argument('--processes', action='store', dest='processes', type=int, help='User-selected number of processes to use for job splitting')
args = parser.parse_args()
if args.gbk_file is not None:
# Create the annotation_dict for annotating
# the Excel tables.
annotation_dict = get_annotation_dict(args.gbk_file)
else:
annotation_dict = None
# The assumption here is that the list of files
# in both INPUT_NEWICK_DIR and INPUT_JSON_DIR are
# named such that they are properly matched if
# the directories contain more than 1 file (i.e.,
# hopefully the newick file names and json file names
# will be something like Mbovis-01D6_* so they can be
# sorted and properly associated with each other).
if args.input_newick is not None:
newick_files = [args.input_newick]
else:
newick_files = []
for file_name in sorted(os.listdir(INPUT_NEWICK_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_NEWICK_DIR, file_name))
newick_files.append(file_path)
if args.input_snps_json is not None:
json_files = [args.input_snps_json]
else:
json_files = []
for file_name in sorted(os.listdir(INPUT_JSON_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_JSON_DIR, file_name))
json_files.append(file_path)
if args.input_avg_mq_json is not None:
json_avg_mq_files = [args.input_avg_mq_json]
else:
json_avg_mq_files = []
for file_name in sorted(os.listdir(INPUT_JSON_AVG_MQ_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_JSON_AVG_MQ_DIR, file_name))
json_avg_mq_files.append(file_path)
multiprocessing.set_start_method('spawn')
queue1 = multiprocessing.JoinableQueue()
queue2 = multiprocessing.JoinableQueue()
num_files = len(newick_files)
cpus = set_num_cpus(num_files, args.processes)
# Set a timeout for get()s in the queue.
timeout = 0.05
for i, newick_file in enumerate(newick_files):
json_file = json_files[i]
json_avg_mq_file = json_avg_mq_files[i]
queue1.put((newick_file, json_file, json_avg_mq_file))
# Complete the preprocess_tables task.
processes = [multiprocessing.Process(target=preprocess_tables, args=(queue1, annotation_dict, timeout, )) for _ in range(cpus)]
for p in processes:
p.start()
for p in processes:
p.join()
queue1.join()
if queue1.empty():
queue1.close()
queue1.join_thread()
| mit |
abhishekgahlot/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 28 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=["bradypus_variegatus_0",
"microryzomys_minutus_0"]):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
pratapvardhan/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
tzechiop/PANYNJ-Regression-Analysis-for-Toll-Traffic-Elasticity | createRegressInputs.py | 1 | 4738 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 6 09:29:50 2016
This script is used to create the x and y data files to be used for regression.
The script compiles columns from spreadsheets specified in 'colfile'. The column
'groupcol' (default = 'Year-Month') is used to combine the columns from every
spreadsheet, and thus should be included in every spreadsheet.
The file colfile should have have the following fields: 'infile', 'column', 'xy'
The file also converts variables to log if specified in 'colfile'.
The line below provides an example of a command used to run this script.
python createRegressInputs.py data\\regress_para\\regresscols_pathtotal_1.xlsx -o data\\regress_data
@author: thasegawa
"""
import argparse
import os
import pandas as pd
import numpy as np
# Function to create output filename
def createOutfilename(infile, add):
outfile, ext = os.path.splitext(infile)
outfile += add + ext
return outfile
# Function to combine new x or y data
def addNew(data, newdata, groupcol, columns):
if data is None:
data = pd.DataFrame(newdata[[groupcol] + columns])
else:
data = pd.merge(data, newdata[[groupcol] + columns], how = 'left', on = groupcol)
return data
# Convert speficied columns to log form
def convertToLog(data, column_list):
for column in column_list:
data[column] = data[column].apply(np.log10)
return data
# ============================================================================
print('Running createRegressInputs.py...')
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('colfile')
parser.add_argument('-o', '--outpath', help="specify the output folder")
parser.add_argument('-oy', '--outfiley', help="specify the output file. default is colfile + '_x' and colfile + '_y'")
parser.add_argument('-ox', '--outfilex', help="specify the output file. default is colfile + '_x' and colfile + '_y'")
parser.add_argument('-c', '--groupcol', default='Year-Month', help="specifies the column that specifies the time scale")
parser.add_argument('-y', '--sumY', default=True, help="specifies if the Y data should be summed or not")
args = vars(parser.parse_args())
colfile, outpath, outfiley, outfilex, groupcol, sumY = args['colfile'], args['outpath'], args['outfiley'], args['outfilex'], args['groupcol'], args['sumY']
#==============================================================================
# os.chdir(r'C:\Users\thasegawa\Documents\53 Port Authority Toll\06 Python Projects\Regression Analysis')
# colfile = 'data\\regress_para\\regresscols_pathtotal_all_nonlog.xlsx'
# outpath = 'data\\regress_data'
# outfiley = None
# outfilex = None
# groupcol = 'Year-Month'
# sumY = False
#==============================================================================
# If the outfile names were not specified, create defualt outfile names
if outfiley is None:
outfiley = createOutfilename(colfile, '_y')
if outfilex is None:
outfilex = createOutfilename(colfile, '_x')
if outpath is not None:
outfiley = os.path.join(outpath, os.path.basename(outfiley))
outfilex = os.path.join(outpath, os.path.basename(outfilex))
print('Column file: %s' % colfile)
print('Outfile for y: %s' % outfiley)
print('Outfile for x: %s' % outfilex)
# Read column file
coldata = pd.read_excel(colfile)
infile_list = coldata['infile'].unique()
# Compile Y data
ydata = None
xdata = None
for infile in infile_list:
rdata = pd.read_excel(infile)
# Iterate through x and y data of each input spreadsheet
subdata = coldata[coldata['infile'] == infile]
xy_list = subdata['xy'].unique()
for xy in xy_list:
print('Compiling {0} data from {1}'.format(xy, infile))
idx = subdata['xy'] == xy
columns = list(subdata['column'][idx])
if xy == 'y':
ydata = addNew(ydata, rdata, groupcol, columns)
else:
xdata = addNew(xdata, rdata, groupcol, columns)
# Sum the Y columns if specified
if sumY:
cols = [column for column in ydata.columns.values if column != groupcol]
ydata = pd.concat([ydata[groupcol], ydata[cols].sum(axis=1)], axis = 1)
ydata.columns = [groupcol, 'y']
# Convert columns to log
print('Converting columns to log')
logcolumn_list = coldata['column'][(coldata['xy'] == 'x') & (coldata['log'] == True)]
xdata = convertToLog(xdata, logcolumn_list)
logcolumn_list = coldata['column'][(coldata['xy'] == 'y') & (coldata['log'] == True)]
if sumY:
if len(logcolumn_list) > 0:
ydata = convertToLog(ydata, ['y'])
else:
ydata = convertToLog(ydata, logcolumn_list)
# Output files
ydata.to_excel(outfiley, index = False)
xdata.to_excel(outfilex, index = False)
print('Success!') | mit |
ritviksahajpal/EPIC | read_EPIC_output/read_EPIC_output.py | 1 | 13383 | import constants, pandas, os, fnmatch, logging, pdb, numpy, datetime, re
from sqlalchemy import create_engine
class EPIC_Output_File():
"""
Class to read EPIC Output files
"""
def __init__(self, ftype='', tag=''):
"""
Constructor
"""
# Get name of latest output directory (based on what time it was modified)
os.chdir(constants.epic_dir+os.sep+'output')
# Get list of all directories in output folder, select the ones which have the current TAG
dirs = [d for d in os.listdir(constants.epic_dir+os.sep+'output') if os.path.isdir(constants.epic_dir+os.sep+'output')]
cur_dirs = [d for d in dirs if constants.OUT_TAG in d]
# Select the TAGged directory which is the latest
self.ldir = sorted(cur_dirs, key=lambda x: os.path.getmtime(x), reverse=True)[:1][0]
if constants.DO_FOLDER:
self.epic_out_dir = constants.FOLDER_PATH
else:
self.epic_out_dir = constants.epic_dir + os.sep + 'output' + os.sep + self.ldir # Latest output directory
# Create a sqlite database in the analysis directory
self.db_path = constants.db_dir + os.sep + ftype + '_' + tag + '_' + self.ldir + '.db'
self.db_name = 'sqlite:///' + self.db_path
self.csv_path = constants.csv_dir + os.sep + ftype + '_' + tag + '_' + self.ldir + '.csv'
self.engine = create_engine(self.db_name)
self.ftype = ftype
self.tag = tag
self.ifexist = 'replace'
def get_col_widths(self, fl):
df = pandas.read_table(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP, header=None, nrows=1)
wt = df.iloc[0][0]
# Assume the columns (right-aligned) are one or more spaces followed by one or more non-space
cols = re.findall('\s+\S+', wt)
return [len(col) for col in cols]
def read_repeat_blocks(self, inp_file, start_sep='', end_sep=''):
"""
Read repeated blocks of data with data lying between start_sep and end_sep
Assumes atleast 2 spaces between columns
Currently tested on ACN files
:param inp_file:
:param start_sep:
:param end_sep:
:return:
"""
tmp_csv = constants.csv_dir + os.sep + 'tmp.csv'
odf = pandas.DataFrame()
cur_yr = constants.START_YR
with open(inp_file) as fp:
for idx, result in enumerate(re.findall(start_sep + '(.*?)' + end_sep, fp.read(), re.S)):
if idx == 0:
continue
last_line_idx = len(result.split('\n'))
df = pandas.DataFrame(result.split('\n')[2:last_line_idx-1])
df.to_csv(tmp_csv)
df = pandas.read_csv(tmp_csv, skiprows=1,
engine='python',
sep='[\s,]{2,20}',
index_col=0)
df.set_index(df.columns[0], inplace=True)
odf = odf.append({'site': os.path.basename(inp_file)[:-4],
'year': cur_yr,
'WOC': df.loc['WOC(kg/ha)']['TOT']}, ignore_index=True)
cur_yr += 1
return odf
###############################
# ACN
###############################
def parse_ACN(self, fls):
list_df = []
for idx, fl in enumerate(fls):
try:
df = self.read_repeat_blocks(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, start_sep='CO2', end_sep='CFEM')
except:
logging.info('Error reading ' + fl)
list_df.append(df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# ACM
###############################
def parse_ACM(self, fls):
list_df = []
for idx, fl in enumerate(fls):
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP,
skipinitialspace=True, usecols=constants.ACM_PARAMS, sep=' ')
except:
logging.info('Error reading ' + fl)
df['site'] = fl[:-4]
list_df.append(df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# ACY
###############################
def parse_ACY(self, fls):
list_df = []
for idx, fl in enumerate(fls):
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP,
skipinitialspace=True, usecols=constants.ACY_PARAMS, sep=' ')
except:
logging.info('Error reading ' + fl)
df['site'] = fl[:-4]
list_df.append(df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# ANN
###############################
def parse_ANN(self, fls):
list_df = []
# Get column widths
cols_df = pandas.read_table(self.epic_out_dir + os.sep + self.ftype + os.sep + fls[0], skiprows=constants.SKIP,
sep=' ', skipinitialspace=True)
widths = [5,4]
for i in range(len(cols_df.columns.values)-2):
widths.append(8)
for idx, fl in enumerate(fls):
try:
df = pandas.read_fwf(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP, sep=' ',
usecols=constants.ANN_PARAMS, skipinitialspace=True, widths=widths)
except:
logging.info('Error reading ' + fl)
df['site'] = fl[:-4]
list_df.append(df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# ATG
###############################
def parse_ATG(self, fls):
list_df = []
for fl in fls:
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl,
skiprows=constants.SKIP, skipinitialspace=True, usecols=constants.ATG_PARAMS, sep=' ')
except:
logging.info('Error reading ' + fl)
#time_df = df[(df.Y >= int(constants.START_YR)) & (df.Y <= int(constants.END_YR))]
df['site'] = fl[:-4]
df.rename(columns={'Y': 'YR'}, inplace=True)
list_df.append(df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# DGN
###############################
def parse_DGN(self, fls):
list_df = []
for fl in fls:
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl, skiprows=constants.SKIP, delim_whitespace=True,
usecols=constants.DGN_PARAMS, parse_dates={"datetime": [0,1,2]}, index_col="datetime",
date_parser=lambda x: pandas.datetime.strptime(x, '%Y %m %d'))
except:
logging.info('Error reading ' + fl)
start = df.index.searchsorted(datetime.datetime(constants.START_YR, 1, 1))
end = df.index.searchsorted(datetime.datetime(constants.END_YR, 12, 31))
time_df = df.ix[start:end]
time_df = time_df.groupby(time_df.index.map(lambda x: x.year)).max()
time_df['site'] = fl[:-4]
list_df.append(time_df)
frame_df = pandas.concat(list_df)
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
###############################
# SCN
###############################
def parse_SCN(self, fls):
list_df = []
for idx, fl in enumerate(fls):
temp_df = pandas.DataFrame(index=[constants.END_YR], columns=constants.SCN_PARAMS)
try:
df = pandas.read_csv(self.epic_out_dir + os.sep + self.ftype + os.sep + fl,
skiprows=constants.SKIP_SCN, skipinitialspace=True, sep=' ')
except:
logging.info('Error reading ' + fl)
for var in constants.SCN_PARAMS:
temp_df[var] = df.TOT.ix[var]
temp_df['site'] = fl[:-4]
temp_df['YR'] = temp_df.index
list_df.append(temp_df)
frame_df = pandas.concat(list_df)
frame_df.index = range(len(frame_df))
frame_df.to_csv(self.csv_path)
frame_df.to_sql(self.db_name, self.engine, if_exists=self.ifexist)
def collect_epic_output(self, fls):
if(self.ftype == 'DGN'):
self.parse_DGN(fls)
elif(self.ftype == 'ACY'):
self.parse_ACY(fls)
elif(self.ftype == 'ANN'):
self.parse_ANN(fls)
elif(self.ftype == 'ATG'):
self.parse_ATG(fls)
elif(self.ftype == 'SCN'):
self.parse_SCN(fls)
elif(self.ftype == 'ACM'):
self.parse_ACM(fls)
elif(self.ftype == 'ACN'):
self.parse_ACN(fls)
else:
logging.info('Wrong file type')
def sql_to_csv():
"""
SQL stores information from all years. We then extract information for the latest year from this file
:return:
"""
# @TODO: Exclude columns which have already been read from other files
epic_fl_types = constants.GET_PARAMS
dfs = pandas.DataFrame()
for idx, fl_name in enumerate(epic_fl_types):
obj = EPIC_Output_File(ftype=fl_name, tag=constants.TAG)
try:
df = pandas.read_sql_table(obj.db_name, obj.engine)
# Rename year
df.rename(columns={'year': 'YR'}, inplace=True)
except:
logging.info(obj.db_name + ' not found')
if fl_name <> 'SCN':
# Get df for all sites and in tears in constants.EXTR_YRS
slice = df[df['YR'].isin(constants.EXTR_YRS)]
slice['isite'] = slice['site']
slice = slice.set_index(['site', 'YR']).unstack('YR')
if idx == 0:
dfs = slice
else:
dfs = pandas.merge(dfs, slice, how='outer')
else: # SCN
# SCN should not be parsed since we are reading the annual ACN now
continue
if idx == 0:
dfs = df
else:
dfs = pandas.merge(dfs, df, how='outer')
dfs.columns = ['{}_{}'.format(col[0], col[1]) for col in dfs.columns.values]
# Merge with EPICRUN.DAT
epic_df = pandas.read_csv(constants.sims_dir + os.sep + obj.ldir + os.sep + 'EPICRUN.DAT', sep='\s+', header=None)
epic_df.columns = ['ASTN', 'ISIT', 'IWP1','IWP5', 'IWND', 'INPS', 'IOPS', 'IWTH']
# 1. Read ieSllist.dat and get mukey and corresponding index
# 2. Convert to dataframe
# 3. Merge with SSURGO properties csv file
# 4. Merge EPIC outputs with EPICRUN.DAT
# 5. Merge EPIC and SSURGO and output to csv
soil_dict = {}
with open(constants.sims_dir + os.sep + obj.ldir + os.sep + constants.SLLIST) as f:
for line in f:
#Sample line from soil file: 1 "Soils//1003958.sol"
(key, val) = int(line.split()[0]), int(line.split('//')[1].split('.')[0])
soil_dict[key] = val
soil_df = pandas.DataFrame.from_dict(soil_dict, orient='index').reset_index()
soil_df.columns = ['INPS', 'mukey']
sgo_file = pandas.read_csv(constants.sgo_dir + os.sep + constants.dominant)
grp_sgo = sgo_file.groupby('mukey').mean().reset_index()
grp_sgo = pandas.merge(grp_sgo, soil_df, on='mukey')
# Merge with EPICRUN
site_col = 'isite_' + str(constants.EXTR_YRS[0])
dfs[[site_col]] = dfs[[site_col]].astype(int)
epic_df[['ASTN']] = epic_df[['ASTN']].astype(int) # ASTN is site number
dfs = pandas.merge(dfs, epic_df, left_on=site_col, right_on='ASTN')
# Merge with SSURGO file
dfs = pandas.merge(dfs, grp_sgo, on='INPS') # INPS is identifier of soil files
dfs.to_csv(constants.csv_dir + os.sep + 'EPIC_' + obj.ldir + '.csv')
if __name__ == '__main__':
for idx, fl_name in enumerate(constants.GET_PARAMS):
print idx, fl_name
obj = EPIC_Output_File(ftype=fl_name, tag=constants.TAG)
# Get list of all output files for each EPIC output category
try:
list_fls = fnmatch.filter(os.listdir(obj.epic_out_dir + os.sep + constants.GET_PARAMS[idx] + os.sep), '*.' + fl_name)
# Collect EPIC output to database and csv
if len(list_fls) > 0:
obj.collect_epic_output(list_fls)
except:
logging.info('Error in reading ' + fl_name)
# Extract results
sql_to_csv()
| mit |
legacysurvey/legacypipe | py/legacyanalysis/sersic-agn.py | 2 | 10776 | from tractor.galaxy import *
from tractor import *
from legacypipe.survey import *
from legacypipe.coadds import quick_coadds
from tractor.devagn import *
from tractor.sersic import SersicGalaxy, SersicIndex
from tractor.seragn import SersicAgnGalaxy
from astrometry.util.file import pickle_to_file, unpickle_from_file
import pylab as plt
import numpy as np
import matplotlib.gridspec as gridspec
import os
ima = dict(interpolation='nearest', origin='lower')
class Duck(object):
pass
def flux_string(br):
s = []
for band in 'grz':
flux = br.getFlux(band)
if flux <= 0:
s.append('%s=(%.2f nmy)' % (band, flux))
else:
s.append('%s=%.2f' % (band, NanoMaggies.nanomaggiesToMag(flux)))
s = ', '.join(s)
return s
def showmods(tims,mods):
#plt.figure(figsize=(10,6))
cols = len(tims) // 3
panels = [((tim.getImage()-mod)*tim.getInvError()) for tim,mod in list(zip(tims, mods))]
h = min([p.shape[0] for p in panels])
w = min([p.shape[1] for p in panels])
panels = [p[:h,:w] for p in panels]
stack = []
while len(panels):
stack.append(np.hstack(panels[:cols]))
panels = panels[cols:]
stack = np.vstack((stack))
plt.imshow(stack, vmin=-2, vmax=2, **ima)
plt.xticks(w * (0.5 + np.arange(cols)), np.arange(cols))
plt.yticks(h * (0.5 + np.arange(3)), ['g','r','z'])
def showresid(tims,mods,wcs,bands='grz'):
co,_ = quick_coadds(tims, bands, wcs)
comod,_ = quick_coadds(tims, bands, wcs, images=mods)
plt.imshow(np.flipud(get_rgb([i-m for i,m in zip(co,comod)], bands)))
plt.xticks([]); plt.yticks([])
return co,comod
def showboth(tims,mods,wcs,bands):
fig = plt.figure(num=1, figsize=(14,6), constrained_layout=True)
fig.clf()
gs = fig.add_gridspec(3, 4)
ax = fig.add_subplot(gs[:, :3])
cols = len(tims) // 3
rows = 3
panels = [((tim.getImage()-mod)*tim.getInvError()) for tim,mod in list(zip(tims, mods))]
h = min([p.shape[0] for p in panels])
w = min([p.shape[1] for p in panels])
panels = [p[:h,:w] for p in panels]
stack = []
#hzero = np.zeros((h,1))
while len(panels):
# hs = [hzero]
# for p in panels[:cols]:
# hs.extend([p, hzero])
# hs = np.hstack(hs)
# hh,hw = hs.shape
# if len(stack) == 0:
# wzero = np.zeros((1,hw))
# stack.append(wzero)
# stack.append(hs)
# stack.append(wzero)
stack.append(np.hstack(panels[:cols]))
panels = panels[cols:]
stack = np.vstack((stack))
ax.imshow(stack, vmin=-2, vmax=2, **ima)
xl,yl = ax.get_xlim(), ax.get_ylim()
#a = ax.get_axis()
for c in range(cols+1):
plt.axvline(c * w, color='k')
for r in range(rows+1):
plt.axhline(r * h, color='k')
#ax.set_axis(a)
ax.set_xlim(xl)
ax.set_ylim(yl)
#xl,yl = ax.get_xlim(), ax.get_ylim()
ax.set_xticks(w * (0.5 + np.arange(cols)))
ax.set_xticklabels(np.arange(cols))
ax.set_yticks(h * (0.5 + np.arange(3)))
ax.set_yticklabels(['g','r','z'])
ax = fig.add_subplot(gs[2, 3])
co,comod = showresid(tims, mods, wcs)
ax = fig.add_subplot(gs[0, 3])
plt.imshow(np.flipud(get_rgb(co, bands)))
plt.xticks([]); plt.yticks([])
ax = fig.add_subplot(gs[1, 3])
plt.imshow(np.flipud(get_rgb(comod, bands)))
plt.xticks([]); plt.yticks([])
def main():
if os.path.exists('results.pickle'):
results = unpickle_from_file('results.pickle')
else:
results = []
for isrc,(ra,dec,brickname) in enumerate([
(0.2266, 3.9822, '0001p040'),
(7.8324, 1.2544, '0078p012'),
(1.1020, 3.9040, '0011p040'),
(7.3252, 4.6847, '0073p047'),
(3.1874, 3.9724, '0031p040'),
(9.5112, 4.6934, '0094p047'),
(4.4941, 1.1058, '0043p010'),
(3.8900, 0.6041, '0038p005'),
(8.1934, 4.0124, '0081p040'),
(6.8125, 0.5463, '0068p005'),
]):
#if isrc < 7:
# continue
if isrc not in [4,6,7,8,9]:
continue
outdir = 'out_%.4f_%.4f' % (ra,dec)
datadir = outdir.replace('out_', 'data_')
#cmd = 'ssh cori "cd legacypipe2/py && python legacypipe/runbrick.py --radec %.4f %.4f --width 100 --height 100 --survey-dir fakedr9 --outdir %s --stage image_coadds --skip-calibs && python legacyanalysis/create_testcase.py --survey-dir fakedr9 %s/coadd/*/*/*-ccds.fits %s %s"' % (ra, dec, outdir, outdir, datadir, brickname)
#cmd = 'ssh cori "cd legacypipe2/py && python legacypipe/runbrick.py --radec %.4f %.4f --width 100 --height 100 --survey-dir fakedr9 --outdir %s --stage image_coadds --skip-calibs && python legacyanalysis/create_testcase.py --survey-dir fakedr9 --outlier-dir %s %s/coadd/*/*/*-ccds.fits %s %s"' % (ra, dec, outdir, outdir, outdir, datadir, brickname)
outbrick = ('custom-%06i%s%05i' %
(int(1000*ra), 'm' if dec < 0 else 'p',
int(1000*np.abs(dec))))
cmd = 'ssh cori "cd legacypipe2/py && python legacyanalysis/create_testcase.py --survey-dir fakedr9 --outlier-dir %s --outlier-brick %s %s/coadd/*/*/*-ccds.fits %s %s"' % (outdir, outbrick, outdir, datadir, brickname)
#os.system(cmd)
cmd = 'rsync -arv cori:legacypipe2/py/%s .' % datadir
#os.system(cmd)
#continue
survey = LegacySurveyData(datadir)
b = Duck()
b.ra = ra
b.dec = dec
W,H = 80,80
wcs = wcs_for_brick(b, W=W, H=H)
targetrd = np.array([wcs.pixelxy2radec(x,y) for x,y in
[(1,1),(W,1),(W,H),(1,H),(1,1)]])
ccds = survey.ccds_touching_wcs(wcs)
print(len(ccds), 'CCDs')
ims = [survey.get_image_object(ccd) for ccd in ccds]
keepims = []
for im in ims:
h,w = im.shape
if h >= H and w >= W:
keepims.append(im)
ims = keepims
gims = [im for im in ims if im.band == 'g']
rims = [im for im in ims if im.band == 'r']
zims = [im for im in ims if im.band == 'z']
nk = min([len(gims), len(rims), len(zims), 5])
ims = gims[:nk] + rims[:nk] + zims[:nk]
print('Keeping', len(ims), 'images')
tims = [im.get_tractor_image(pixPsf=True, hybridPsf=True, normalizePsf=True, splinesky=True, radecpoly=targetrd) for im in ims]
bands = 'grz'
devsrc = DevGalaxy(RaDecPos(ra,dec), NanoMaggies(**dict([(b,10.) for b in bands])), EllipseESoft(0., 0., 0.))
tr = Tractor(tims, [devsrc])
tr.freezeParam('images')
tr.optimize_loop()
print('Fit DeV source:', devsrc)
devmods = list(tr.getModelImages())
showboth(tims, devmods, wcs, bands);
s = flux_string(devsrc.brightness)
plt.suptitle('DeV model: ' + s + '\ndchisq 0.')
plt.savefig('src%02i-dev.png' % isrc)
devchi = 2. * tr.getLogLikelihood()
dasrc = DevAgnGalaxy(devsrc.pos.copy(), devsrc.brightness.copy(), devsrc.shape.copy(), NanoMaggies(**dict([(b,1.) for b in bands])))
#dasrc = DevAgnGalaxy(RaDecPos(ra,dec), NanoMaggies(**dict([(b,10.) for b in bands])), EllipseESoft(0., 0., 0.), NanoMaggies(**dict([(b,1.) for b in bands])))
tr = Tractor(tims, [dasrc])
tr.freezeParam('images')
tr.optimize_loop()
print('Fit Dev+PSF source:', dasrc)
damods = list(tr.getModelImages())
showboth(tims, damods, wcs, bands)
s1 = flux_string(dasrc.brightnessDev)
s2 = flux_string(dasrc.brightnessPsf)
pcts = [100. * dasrc.brightnessPsf.getFlux(b) / dasrc.brightnessDev.getFlux(b) for b in bands]
s3 = ', '.join(['%.2f' % p for p in pcts])
dachi = 2. * tr.getLogLikelihood()
plt.suptitle('DeV + Point Source model: DeV %s, PSF %s' % (s1, s2) + ' (%s %%)' % s3 +
'\ndchisq %.1f' % (dachi - devchi))
plt.savefig('src%02i-devcore.png' % isrc)
#sersrc = SersicGalaxy(RaDecPos(ra, dec), NanoMaggies(**dict([(b,10.) for b in bands])), EllipseESoft(0.5, 0., 0.), SersicIndex(4.0))
sersrc = SersicGalaxy(devsrc.pos.copy(), devsrc.brightness.copy(), devsrc.shape.copy(), SersicIndex(4.0))
tr = Tractor(tims, [sersrc])
tr.freezeParam('images')
r = tr.optimize_loop()
print('Opt:', r)
print('Fit Ser source:', sersrc)
if sersrc.sersicindex.getValue() >= 6.0:
sersrc.freezeParam('sersicindex')
r = tr.optimize_loop()
print('Re-fit Ser source:', sersrc)
sermods = list(tr.getModelImages())
showboth(tims, sermods, wcs, bands)
s = flux_string(sersrc.brightness)
serchi = 2. * tr.getLogLikelihood()
plt.suptitle('Sersic model: %s, index %.2f' % (s, sersrc.sersicindex.getValue()) +
'\ndchisq %.1f' % (serchi - devchi))
plt.savefig('src%02i-ser.png' % isrc)
#sasrc = SersicAgnGalaxy(RaDecPos(ra, dec), NanoMaggies(**dict([(b,10.) for b in bands])), EllipseESoft(0.5, 0., 0.), SersicIndex(4.0), NanoMaggies(**dict([(b,1.) for b in bands])))
si = sersrc.sersicindex.getValue()
if si > 6.0:
si = 4.0
si = SersicIndex(si)
sasrc = SersicAgnGalaxy(sersrc.pos.copy(), sersrc.brightness.copy(), sersrc.shape.copy(), si, NanoMaggies(**dict([(b,1.) for b in bands])))
tr = Tractor(tims, [sasrc])
tr.freezeParam('images')
r = tr.optimize_loop()
print('Fit Ser+PSF source:', sasrc)
if sasrc.sersicindex.getValue() >= 6.0:
sasrc.freezeParam('sersicindex')
r = tr.optimize_loop()
print('Re-fit Ser+PSF source:', sasrc)
samods = list(tr.getModelImages())
showboth(tims, samods, wcs, bands)
s1 = flux_string(sasrc.brightness)
s2 = flux_string(sasrc.brightnessPsf)
pcts = [100. * sasrc.brightnessPsf.getFlux(b) / sasrc.brightness.getFlux(b) for b in bands]
s3 = ', '.join(['%.2f' % p for p in pcts])
sachi = 2. * tr.getLogLikelihood()
plt.suptitle('Sersic + Point Source model: Ser %s, index %.2f, PSF %s' % (s1, sasrc.sersicindex.getValue(), s2) +
' (%s %%)' % s3 +
'\ndchisq %.1f' % (sachi - devchi))
plt.savefig('src%02i-sercore.png' % isrc)
ri = (ra, dec, brickname, devsrc, devchi, dasrc, dachi,
sersrc, serchi, sasrc, sachi)
if len(results) > isrc:
results[isrc] = ri
else:
results.append(ri)
pickle_to_file(results, 'results.pickle')
if __name__ == '__main__':
main()
| bsd-3-clause |
henrykironde/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
fabianvf/osf.io | scripts/annotate_rsvps.py | 60 | 2256 | """Utilities for annotating workshop RSVP data.
Example ::
import pandas as pd
from scripts import annotate_rsvps
frame = pd.read_csv('workshop.csv')
annotated = annotate_rsvps.process(frame)
annotated.to_csv('workshop-annotated.csv')
"""
import re
import logging
from dateutil.parser import parse as parse_date
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from website.models import User, Node, NodeLog
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def find_by_email(email):
try:
return User.find_one(Q('username', 'iexact', email))
except ModularOdmException:
return None
def find_by_name(name):
try:
parts = re.split(r'\s+', name.strip())
except:
return None
if len(parts) < 2:
return None
users = User.find(
reduce(
lambda acc, value: acc & value,
[
Q('fullname', 'icontains', part.decode('utf-8', 'ignore'))
for part in parts
]
)
).sort('-date_created')
if not users:
return None
if len(users) > 1:
logger.warn('Multiple users found for name {}'.format(name))
return users[0]
def logs_since(user, date):
return NodeLog.find(
Q('user', 'eq', user._id) &
Q('date', 'gt', date)
)
def nodes_since(user, date):
return Node.find(
Q('creator', 'eq', user._id) &
Q('date_created', 'gt', date)
)
def process(frame):
frame = frame.copy()
frame['user_id'] = ''
frame['user_logs'] = ''
frame['user_nodes'] = ''
frame['last_log'] = ''
for idx, row in frame.iterrows():
user = (
find_by_email(row['Email address'].strip()) or
find_by_name(row['Name'])
)
if user:
date = parse_date(row['Workshop_date'])
frame.loc[idx, 'user_id'] = user._id
logs = logs_since(user, date)
frame.loc[idx, 'user_logs'] = logs.count()
frame.loc[idx, 'user_nodes'] = nodes_since(user, date).count()
if logs:
frame.loc[idx, 'last_log'] = logs.sort('-date')[0].date.strftime('%c')
return frame
| apache-2.0 |
hypergravity/bopy | bopy/imagetools/image.py | 1 | 5749 | # -*- coding: utf-8 -*-
"""
@author: cham
Created on Fri Aug 14 15:24:20 2015
"""
# import aplpy
# from astropy.table import Table
# from astropy.coordinates import Galactic, SkyCoord
from astropy.wcs import WCS
from astropy.io import fits
from reproject import reproject_from_healpix, reproject_interp, reproject_to_healpix
import matplotlib.pyplot as plt
from healpy import write_map
def image_reproject_from_healpix_to_file(source_image_hdu, target_image_hdu_header, filepath=None):
""" reproject from healpix image to normal wcs image
:param source_image_hdu: the HDU object of source image (healpix)
:param target_image_hdu_header: the HDU object of target image (wcs)
:param filepath: the output file path
:return: array, footprint
"""
array, footprint = reproject_from_healpix(source_image_hdu, target_image_hdu_header)
if filepath is not None:
# write file
fits.writeto(filepath, array, target_image_hdu_header, clobber=True) # clobber=OVERWRITE
else:
# return array & footprint
return array, footprint
def image_reproject_to_healpix_to_file(array, target_image_hdu_header, coordsys='galactic', filepath=None):
"""reproject image array to healpix image and write file
:param array: image data
:param target_image_hdu_header: the HDU object of
:param coordsys: target coordinate system --> {'galactic', 'equatorial'}
:param filepath: the output file path
:return: array, footprint (only if filepath=None)
"""
array, footprint = reproject_to_healpix((array, target_image_hdu_header), coordsys)
if filepath is not None:
# write file
write_map(filepath, array)
else:
# return array & footprint
return array, footprint
def image_reproject_wcs_to_file(source_image_hdu, target_image_hdu_header, filepath=None):
"""reproject one wcs image to the wcs of another image
:param source_image_hdu: the HDU object of source image
:param target_image_hdu_header: the header object of target image
:param filepath: the output file path
:return:
"""
array, footprint = reproject_interp(source_image_hdu, target_image_hdu_header)
if filepath is not None:
# write file
fits.writeto(filepath, array, target_image_hdu_header, clobber=True) # clobber=OVERWRITE
else:
# return array & footprint
return array, footprint
def image_query(data_type, target_image_hdu_header, filepath=None):
"""query image from stored data
:param data_type: data type of the queried source image
:param target_image_hdu_header: target header
:param filepath: output file path
:return: array, footprint
"""
data_type_coll = {
'haslam_408': ('healpix', 1, '/pool/maps/LAMBDA/haslam408/haslam408_dsds_Remazeilles2014_ns2048.fits'),
'planck_30': ('healpix', 1, '/pool/skysurvey/PLANCK/PR2/LFI_SkyMap_030_1024_R2.01_full.fits'),
'planck_44': ('healpix', 1, '/pool/skysurvey/PLANCK/PR2/LFI_SkyMap_044_1024_R2.01_full.fits'),
'planck_70': ('healpix', 1, '/pool/skysurvey/PLANCK/PR2/LFI_SkyMap_070_2048_R2.01_full.fits'),
'planck_100': ('healpix', 1, '/pool/skysurvey/PLANCK/PR2/HFI_SkyMap_100_2048_R2.00_full.fits'),
'planck_143': ('healpix', 1, '/pool/skysurvey/PLANCK/PR2/HFI_SkyMap_143_2048_R2.00_full.fits'),
'planck_217': ('healpix', 1, '/pool/skysurvey/PLANCK/PR2/HFI_SkyMap_217_2048_R2.00_full.fits'),
'planck_353': ('healpix', 1, '/pool/skysurvey/PLANCK/PR2/HFI_SkyMap_353_2048_R2.00_full.fits'),
'planck_545': ('healpix', 1, '/pool/skysurvey/PLANCK/PR2/HFI_SkyMap_545_2048_R2.00_full.fits'),
'planck_857': ('healpix', 1, '/pool/skysurvey/PLANCK/PR2/HFI_SkyMap_857_2048_R2.00_full.fits'),
'planck_dust': ('healpix', 1,
'/pool/skysurvey/PLANCK/PR2/com/COM_CompMap_ThermalDust-commander_2048_R2.00.fits')}
assert data_type in data_type_coll.keys()
data_type = 'planck_dust'
map_type, map_hdu, map_path = data_type_coll[data_type]
if map_type == 'healpix':
if filepath is not None:
image_reproject_from_healpix_to_file(fits.open(map_path)[map_hdu],
target_image_hdu_header, filepath=filepath)
else:
array, footprint = image_reproject_from_healpix_to_file(fits.open(map_path)[map_hdu],
target_image_hdu_header, filepath=None)
return array, footprint
else:
return None, None
# -------------------------------------------------------------------------
if __name__ == '__main__':
target_header = fits.open('/pool/mmt/2015b/wise/ngc_663/w1/mosaic_bm/mosaic.fits')[0].header
target_wcs = WCS(target_header)
# haslam408 = fits.open('/pool/maps/LAMBDA/haslam408/haslam408_dsds_Remazeilles2014_ns2048.fits')[1]
# haslam408 = fits.open('/pool/maps/LAMBDA/IRIS/IRIS_nohole_1_2048.fits')[1]
# hdu_in = fits.open('/pool/MMT/2015b/iras/b1/mosaic16/mosaic.fits')[0]
# array, footprint = image_reproject_wcs_to_file(hdu_in, target_header)
array_, footprint_ = image_query('planck_857', target_header)
fig = plt.figure()
ax1 = plt.subplot(1, 1, 1, projection=target_wcs)
ax1.imshow(array_, origin='lower', vmin=1, vmax=5000)
ax1.coords.grid(color='white')
ax1.coords['ra'].set_axislabel('Right Ascension')
ax1.coords['dec'].set_axislabel('Declination')
fig.canvas.draw()
| bsd-3-clause |
sharadmv/trees | trees/tssb/util.py | 1 | 1943 | import cPickle as pickle
import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
def plot_tssb(tssb, ax=None):
g = nx.DiGraph()
if ax:
ax.set_axis_off()
assert tssb.root is not None
add_nodes(g, tssb.root)
pos = nx.graphviz_layout(g, prog='dot', args='-Granksep=100.0')
labels = {n: n.point_count for n in g.nodes()}
nodes = nx.draw_networkx_nodes(g, pos,
node_color='b',
node_size=300,
alpha=0.8, ax=ax)
nx.draw_networkx_edges(g, pos,
alpha=0.8, arrows=False, ax=ax)
labels = nx.draw_networkx_labels(g, pos, labels, font_size=12, font_color='w', ax=ax)
return g, nodes, labels
def add_nodes(g, node):
for c, child_node in node.children.items():
g.add_edge(node, child_node)
add_nodes(g, child_node)
def generate_data(N, tssb, collect=True):
data = []
y = []
for i in xrange(N):
node, index = tssb.sample_one()
data.append(node.sample_one())
y.append(index)
if collect:
tssb.garbage_collect()
return np.array(data), y
def plot_data(X, z, tssb=None):
nodes = set(z)
color_map = sns.color_palette("coolwarm", len(set(map(len, nodes))))
colors = {}
for c, n in zip(color_map, set(map(len, nodes))):
colors[n] = c
for i, (x, y) in enumerate(X):
plt.scatter(x, y, color=colors[len(z[i])])
def save_tssb(tssb, location):
with open(location, 'wb') as fp:
pickle.dump(tssb.get_state(), fp)
def load_tssb(location):
with open(location, 'rb') as fp:
tssb = pickle.load(fp)
return tssb
def print_tssb(t, y, N):
points = xrange(N)
nodes = set([t.point_index(point)[1] for point in points])
assignments = {}
for node in nodes:
assignments[node] = [y[p] for p in t.get_node(node).points]
return assignments
| mit |
jenshnielsen/basemap | examples/plotsst.py | 8 | 1770 | from mpl_toolkits.basemap import Basemap
from netCDF4 import Dataset, date2index
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
date = datetime(2007,12,15,0) # date to plot.
# open dataset.
dataset = \
Dataset('http://www.ncdc.noaa.gov/thredds/dodsC/OISST-V2-AVHRR_agg')
timevar = dataset.variables['time']
timeindex = date2index(date,timevar) # find time index for desired date.
# read sst. Will automatically create a masked array using
# missing_value variable attribute. 'squeeze out' singleton dimensions.
sst = dataset.variables['sst'][timeindex,:].squeeze()
# read ice.
ice = dataset.variables['ice'][timeindex,:].squeeze()
# read lats and lons (representing centers of grid boxes).
lats = dataset.variables['lat'][:]
lons = dataset.variables['lon'][:]
lons, lats = np.meshgrid(lons,lats)
# create figure, axes instances.
fig = plt.figure()
ax = fig.add_axes([0.05,0.05,0.9,0.9])
# create Basemap instance.
# coastlines not used, so resolution set to None to skip
# continent processing (this speeds things up a bit)
m = Basemap(projection='kav7',lon_0=0,resolution=None)
# draw line around map projection limb.
# color background of map projection region.
# missing values over land will show up this color.
m.drawmapboundary(fill_color='0.3')
# plot sst, then ice with pcolor
im1 = m.pcolormesh(lons,lats,sst,shading='flat',cmap=plt.cm.jet,latlon=True)
im2 = m.pcolormesh(lons,lats,ice,shading='flat',cmap=plt.cm.gist_gray,latlon=True)
# draw parallels and meridians, but don't bother labelling them.
m.drawparallels(np.arange(-90.,99.,30.))
m.drawmeridians(np.arange(-180.,180.,60.))
# add colorbar
cb = m.colorbar(im1,"bottom", size="5%", pad="2%")
# add a title.
ax.set_title('SST and ICE analysis for %s'%date)
plt.show()
| gpl-2.0 |
erjerison/adaptability | github_submission/mutual_information_analysis_1_8_2017.py | 1 | 21819 | import numpy
import sys
import matplotlib.pylab as pt
import matplotlib.cm
import numpy.random
import matplotlib.ticker as ticker
from matplotlib.lines import Line2D
import scipy.stats
from matplotlib.transforms import Affine2D
import mpl_toolkits.axisartist.floating_axes as floating_axes
import matplotlib
matplotlib.rcParams['font.sans-serif'] = 'Arial'
matplotlib.rcParams['font.size'] = 10.0
matplotlib.rcParams['lines.markeredgewidth'] = 0
matplotlib.rcParams['lines.markersize'] = 3.5
matplotlib.rcParams['lines.linewidth'] = .5
matplotlib.rcParams['legend.fontsize'] = 8.0
matplotlib.rcParams['axes.linewidth']=.5
matplotlib.rcParams['patch.linewidth']=.5
def permute_within_categories(categories, cat_inds):
#categories: 1d array where each item has an index indicating which category it belongs to. The category indices need not be consecutive.
#cat_inds: list of category indices.
n = len(categories)
inds = numpy.arange(n) #Original order
permuted_order = numpy.zeros((n,),dtype='int')
for i in range(len(cat_inds)):
items_in_cat_unpermuted = inds[categories == cat_inds[i]]
permuted_order[items_in_cat_unpermuted] = numpy.random.permutation(items_in_cat_unpermuted)
return permuted_order
def permute_within_categories_preserve_num_muts(mutation_table, categories, cat_inds):
#categories: 1d array where each item has an index indicating which category it belongs to. The category indices need not be consecutive.
#cat_inds: list of category indices.
n = len(categories)
inds = numpy.arange(n) #Original order
n_muts = mutation_table.shape[1]
mut_inds = numpy.arange(n_muts)
permuted_mutation_table = numpy.zeros_like(mutation_table)
for i in range(len(cat_inds)):
category_indices = inds[categories == cat_inds[i]]
#Construct ordered pair list of which mutations occurred in which clones in this category
pop_mut_list = []
for index in category_indices:
muts = mut_inds[mutation_table[index,:] > .5]
for mut in muts:
pop_mut_list.append([index, mut])
#Permute mutations
n_muts_category = len(pop_mut_list)
perm = numpy.random.permutation(numpy.arange(n_muts_category))
pop_mut_list = numpy.array(pop_mut_list)
pop_mut_list_permuted = pop_mut_list
pop_mut_list_permuted[:,1] = pop_mut_list[perm,1]
#Construct the section of the permuted mutation table for this category
for j in range(len(pop_mut_list_permuted[:,0])):
mut_loc = pop_mut_list_permuted[j,:]
permuted_mutation_table[mut_loc[0],mut_loc[1]] = 1
return permuted_mutation_table
def calculate_cat_inds(categories):
categories = numpy.array(categories)
return numpy.unique(categories)
def calculate_helper_matrix(categories, cat_inds):
#The helper matrix is a utility for quickly summing over specified rows in a table. It is intended to be matrix multiplied by the original mutation table; hence it is n_cats x n_pops
num_cats = len(cat_inds)
num_pops = len(categories)
helper_matrix = numpy.zeros((num_cats,num_pops))
for i in range(num_cats):
specific_cat_inds = numpy.where(categories == cat_inds[i])
helper_matrix[i, specific_cat_inds] = 1
return helper_matrix
def calculate_entropy_statistic(mutation_table, helper_matrix):
muts_per_gene = numpy.sum(mutation_table, axis = 0)
collapsed_table = numpy.dot(helper_matrix,mutation_table)
pops_per_category = numpy.dot(helper_matrix,helper_matrix.T)
#print pops_per_category
probs = numpy.dot(numpy.linalg.inv(pops_per_category),collapsed_table)
num_genes = mutation_table.shape[1]
entropies = numpy.zeros((num_genes,))
total_pops = numpy.float(numpy.sum(pops_per_category))
for i in range(num_genes):
nonzero_inds = numpy.all([probs[:,i] > 0 , probs[:,i]< 1], axis = 0)
nonzero_p_hit = probs[:,i][nonzero_inds]
nonzero_p_no_hit = 1. - nonzero_p_hit
pops_per_cat_temp = numpy.diag(pops_per_category)[nonzero_inds]
entropies[i] = numpy.sum(-1*pops_per_cat_temp/total_pops*(nonzero_p_hit*numpy.log2(nonzero_p_hit) + nonzero_p_no_hit*numpy.log2(nonzero_p_no_hit)))
return numpy.sum(entropies)
def calculate_entropy_statistic2(mutation_table, helper_matrix):
#This function can be used to weight double-hit mutations less than other mutations, since they carry less information.
#However, for this dataset including the 2-hit mutations with equal weight was equivalently sensitive.
muts_per_gene = numpy.sum(mutation_table, axis = 0)
collapsed_table = numpy.dot(helper_matrix,mutation_table)
pops_per_category = numpy.dot(helper_matrix,helper_matrix.T)
probs = numpy.dot(numpy.linalg.inv(pops_per_category),collapsed_table) #probability that a population in this category got a mutation
#print probs
num_genes = mutation_table.shape[1]
entropies = numpy.zeros((num_genes,))
weight = 1.
total_pops = numpy.float(numpy.sum(pops_per_category))
for i in range(num_genes):
if muts_per_gene[i] > 2.1:
nonzero_inds = numpy.all([probs[:,i] > 0 , probs[:,i]< 1], axis = 0)
nonzero_p_hit = probs[:,i][nonzero_inds]
nonzero_p_no_hit = 1. - nonzero_p_hit
pops_per_cat_temp = numpy.diag(pops_per_category)[nonzero_inds]
entropies[i] = numpy.sum(-1*pops_per_cat_temp/total_pops*(nonzero_p_hit*numpy.log2(nonzero_p_hit) + nonzero_p_no_hit*numpy.log2(nonzero_p_no_hit)))
else:
nonzero_inds = numpy.all([probs[:,i] > 0 , probs[:,i]< 1], axis = 0)
nonzero_p_hit = probs[:,i][nonzero_inds]
nonzero_p_no_hit = 1. - nonzero_p_hit
pops_per_cat_temp = numpy.diag(pops_per_category)[nonzero_inds]
entropies[i] = weight*numpy.sum(-1*pops_per_cat_temp/total_pops*(nonzero_p_hit*numpy.log2(nonzero_p_hit) + nonzero_p_no_hit*numpy.log2(nonzero_p_no_hit)))
return numpy.sum(entropies)
def calculate_presence_absence_statistic(mutation_table, helper_matrix):
#This is a simple test statistic based on whether or not a particular mutation was or wasn't hit in some category.
collapsed_table = numpy.dot(helper_matrix,mutation_table)
num_zeros = numpy.sum(collapsed_table < .5)
return num_zeros
#Read in the list of mutations that fixed in each population. Filter out snps that occur in multiple descendants of the same founder--these were SGV from the passaging of this segregant well.
input_file = 'data/mutation_lists_with_aa_positions_reannotated.txt'
#First loop to find any mutations that are shared among descendants of the same segregant
file = open(input_file,'r')
file_lines = file.readlines()
file.close()
segregant_mut_dict = {}
common_mut_dict = {}
for line in file_lines:
linelist = line.strip().split('\t')
if len(linelist) < 1.5:
#Go to the next clone
clone_name = linelist[0]
segregant = clone_name.split('_')[0]
if segregant not in segregant_mut_dict:
segregant_mut_dict[segregant] = []
else:
mutation = ('_').join(str(i) for i in linelist)
if len(linelist) > 5.5:
if linelist[6] == 'Non':
if mutation in segregant_mut_dict[segregant]:
print segregant, mutation
if segregant in common_mut_dict:
common_mut_dict[segregant].append(mutation)
else:
common_mut_dict[segregant] = [mutation]
if mutation not in segregant_mut_dict[segregant]:
segregant_mut_dict[segregant].append(mutation)
##Second loop to identify all de novo nonsynonymous mutations (and indels)
gene_dict_by_sample = {}
mutation_dict_by_sample = {}
for line in file_lines:
linelist = line.strip().split('\t')
if len(linelist) < 1.5:
#Go to the next clone
clone_name = linelist[0]
gene_dict_by_sample[clone_name] = []
mutation_dict_by_sample[clone_name] = []
local_gene_names = []
segregant = clone_name.split('_')[0]
else:
gene_name = linelist[4]
mutation = ('_').join(str(i) for i in linelist)
if len(linelist) > 5.5:
if linelist[6] == 'Non':
if segregant in common_mut_dict: #There might be shared ancestral snps
if ((gene_name not in local_gene_names) and (len(gene_name) < 6.5) and (mutation not in common_mut_dict[segregant])): #We have not already counted this mutation, it is not an ancestral mutation, and it is not in a dubious ORF
local_gene_names.append(gene_name)
gene_dict_by_sample[clone_name].append(gene_name)
mutation_dict_by_sample[clone_name].append(mutation)
elif ((gene_name not in local_gene_names) and (len(gene_name) < 6.5)): #We have not already counted this mutation, it is not an ancestral mutation, and it is not in a dubious ORF
local_gene_names.append(gene_name)
gene_dict_by_sample[clone_name].append(gene_name)
mutation_dict_by_sample[clone_name].append(mutation)
#Determine how many independent times each gene was mutated, and make list of genes for each segregant.
gene_name_counts = []
gene_names = []
samples = sorted(gene_dict_by_sample.keys())
for sample in samples:
for gene in gene_dict_by_sample[sample]:
if gene in gene_names:
index = numpy.where(numpy.array(gene_names) == gene)[0]
gene_name_counts[index] += 1
else:
gene_names.append(gene)
gene_name_counts.append(1)
gene_name_counts_sc = numpy.zeros_like( numpy.array(gene_name_counts) )
gene_name_counts_ypd = numpy.zeros_like( numpy.array(gene_name_counts) )
for sample in samples:
env = sample.split('_')[2]
if env == 'sc':
for gene in gene_dict_by_sample[sample]:
index = numpy.where(numpy.array(gene_names) == gene)[0]
gene_name_counts_sc[index] += 1
elif env == 'ypd':
for gene in gene_dict_by_sample[sample]:
index = numpy.where(numpy.array(gene_names) == gene)[0]
gene_name_counts_ypd[index] += 1
#print gene_name_counts_sc
#print gene_name_counts_ypd
##Import fitness and founder genotype data
#Import fitness and genotype data
filename1 = 'data/fitness_measurements_with_population_names_12_29_2016.csv'
filename2 = 'data/control_replicate_measurements.csv'
filename3 = 'data/segregant_genotypes_deduplicated_with_header.csv'
segregant_vector = []
init_fits_ypd = []
init_std_errs_ypd = []
init_fits_sc = []
init_std_errs_sc = []
final_fits_ypd_pops_in_ypd = []
segregant_vector_ypd_pops = []
final_fits_sc_pops_in_sc = []
segregant_vector_sc_pops = []
final_fits_sc_pops_in_ypd = []
final_fits_ypd_pops_in_sc = []
file1 = open(filename1,'r')
firstline = 0
for line in file1:
if firstline < .5:
firstline += 1
continue
linestrs = line.strip().split(';')
segregant_vector.append(linestrs[0])
init_fits_ypd.append(float(linestrs[1]))
init_std_errs_ypd.append(float(linestrs[2]))
init_fits_sc.append(float(linestrs[3]))
init_std_errs_sc.append(float(linestrs[4]))
ypd_evolved_pops = linestrs[5].split(',')
for entry in ypd_evolved_pops:
segregant_vector_ypd_pops.append(linestrs[0])
final_fits_ypd_pops_in_ypd.append(float(entry.split()[1]))
final_fits_ypd_pops_in_sc.append(float(entry.split()[2]))
sc_evolved_pops = linestrs[6].split(',')
for entry in sc_evolved_pops:
segregant_vector_sc_pops.append(linestrs[0])
final_fits_sc_pops_in_ypd.append(float(entry.split()[1]))
final_fits_sc_pops_in_sc.append(float(entry.split()[2]))
file1.close()
init_fits_ypd = numpy.array(init_fits_ypd)
init_std_errs_ypd = numpy.array(init_std_errs_ypd)
init_fits_sc = numpy.array(init_fits_sc)
init_std_errs_sc = numpy.array(init_std_errs_sc)
final_fits_ypd_pops_in_ypd = numpy.array(final_fits_ypd_pops_in_ypd)
final_fits_ypd_pops_in_sc = numpy.array(final_fits_ypd_pops_in_sc)
segregant_vector_ypd_pops = numpy.array(segregant_vector_ypd_pops)
segregant_vector_sc_pops = numpy.array(segregant_vector_sc_pops)
final_fits_sc_pops_in_ypd = numpy.array(final_fits_sc_pops_in_ypd)
final_fits_ypd_pops_in_sc = numpy.array(final_fits_ypd_pops_in_sc)
ypd_controls = {}
sc_controls = {}
file2 = open(filename2,'r')
firstline = 0
for line in file2:
if firstline < .5:
firstline += 1
continue
linestrs = line.strip().split(';')
ypd_controls[linestrs[0]] = [float(i) for i in linestrs[1].split(',')]
sc_controls[linestrs[0]] = [float(i) for i in linestrs[2].split(',')]
file2.close()
genotype_mat = []
file3 = open(filename3,'r')
firstline = 0
for line in file3:
if firstline < .5:
firstline += 1
continue
linelist = line.strip().split(';')
genotype = [int(i) for i in linelist[1].split(',')]
genotype_mat.append(genotype)
genotype_mat = numpy.array(genotype_mat)
rm_allele = numpy.array(genotype_mat[:,3777],dtype='Bool')
by_allele = numpy.array(1 - genotype_mat[:,3777],dtype='Bool')
####Set up mutation table
gene_names = numpy.array(gene_names)
double_hit_genes = gene_names[numpy.array(gene_name_counts) > 1.5]
#print seg_samples
#print samples
num_double_hit_genes = len(double_hit_genes)
#print double_hit_genes
gene_names_reordered = ['KRE33', 'ENP2', 'BFR2', 'BMS1', 'UTP20', 'RPS8A', 'RPS6A','CRM1', 'ECM16', 'BUD23', 'IRA1', 'IRA2', 'GPB1', 'GPB2', 'PDE2','SIR2', 'SIR3', 'SIR4', 'RXT3', 'NNK1', 'YPK9', 'LTE1', 'SRS2','PAR32','STE11','RRP3','RQC2']
#print set(double_hit_genes) == set(gene_names_reordered)
new_gene_order = []
for i in range(len(gene_names_reordered)):
index = numpy.where(numpy.array(double_hit_genes) == gene_names_reordered[i])[0][0]
new_gene_order.append(index)
mutation_table = numpy.zeros((254,num_double_hit_genes))
indel_table = numpy.zeros((254,num_double_hit_genes))
for i in range(len(samples)):
for j in range(num_double_hit_genes):
if double_hit_genes[new_gene_order[j]] in gene_dict_by_sample[samples[i]]:
mutation_table[i,j] = 1
gene_ind = gene_dict_by_sample[samples[i]].index(double_hit_genes[new_gene_order[j]])
mutation = mutation_dict_by_sample[samples[i]][gene_ind]
mutation_list = mutation.split('_')
#print mutation_list
if (len(mutation_list[3].split(':')) > 1.5 or 'Stop' in mutation_list[-1]): #indels and premature stops
indel_table[i,j] = 1
mutation_table[i,j] -= 1
###Determine genotype of sequenced populations
genotype_mat_sequenced_populations = numpy.zeros((len(samples),len(genotype_mat[0,:])))
i = 0
seg_samples = []
for clone in samples:
name_strs = clone.split('_')
seg = name_strs[0]
seg_samples.append(seg)
genotype_index = segregant_vector.index(seg)
genotype_mat_sequenced_populations[i,:] = genotype_mat[genotype_index,:]
i += 1
###Set up an indicator variable for the environment
env_list = []
for sample in samples:
env = sample.split('_')[2]
if env=='sc':
env_list.append(1)
elif env=='ypd':
env_list.append(0)
env_list = numpy.array(env_list, dtype='Bool')
##Set up an indicator variable for the Kre33 allele
kre33_allele = genotype_mat_sequenced_populations[:, 9596]
kre33_allele = numpy.array(kre33_allele, dtype='Bool')
##Set up a variable for the segregant
seg_counter = 0
prev_seg = seg_samples[0]
founder_num_key = []
for i in range(len(samples)):
seg = seg_samples[i]
if seg == prev_seg:
founder_num_key.append(seg_counter)
else:
seg_counter += 1
prev_seg = seg
founder_num_key.append(seg_counter)
founder_num_key = numpy.array(founder_num_key)
###Determine mutations per gene for 4 categories: Kre33-RM/30C; Kre33-BY/30C; Kre33-RM/37C; Kre33-BY/37C
group4 = numpy.array(env_list*kre33_allele, dtype='Bool')
group3 = numpy.array(env_list*(1 - kre33_allele), dtype='Bool')
group2 = numpy.array((1 - env_list)*kre33_allele, dtype='Bool')
group1 = numpy.array((1 - env_list)*(1 - kre33_allele), dtype='Bool')
counts_grp1_mutations = numpy.sum(mutation_table[group1, :], axis=0)
counts_grp1_indels = numpy.sum(indel_table[group1,:], axis=0)
counts_grp2_mutations = numpy.sum(mutation_table[group2, :], axis=0)
counts_grp2_indels = numpy.sum(indel_table[group2,:], axis=0)
counts_grp3_mutations = numpy.sum(mutation_table[group3, :], axis=0)
counts_grp3_indels = numpy.sum(indel_table[group3,:], axis=0)
counts_grp4_mutations = numpy.sum(mutation_table[group4, :], axis=0)
counts_grp4_indels = numpy.sum(indel_table[group4,:], axis=0)
###Basic counting
num_nonsyn_muts_sc = numpy.sum(gene_name_counts_sc)
num_nonsyn_muts_ypd = numpy.sum(gene_name_counts_ypd)
#print 'num_nonsyn_muts_sc', num_nonsyn_muts_sc, 'num_nonsyn_muts_ypd', num_nonsyn_muts_ypd
num_kre33_ass_muts_sc = numpy.sum( counts_grp1_mutations[0:10] ) + numpy.sum( counts_grp2_mutations[0:10] )
num_kre33_ass_muts_ypd = numpy.sum( counts_grp3_mutations[0:10] ) + numpy.sum( counts_grp4_mutations[0:10] )
frac_kre33_ass_muts_sc = num_kre33_ass_muts_sc/float(num_nonsyn_muts_sc)
frac_kre33_ass_muts_ypd = num_kre33_ass_muts_ypd/float(num_nonsyn_muts_ypd)
#print 'kre33_muts_sc', num_kre33_ass_muts_sc
#print 'kre33_muts_ypd', num_kre33_ass_muts_ypd
#print 'kre33 frac muts sc', frac_kre33_ass_muts_sc
#print 'kre33 frac muts ypd', frac_kre33_ass_muts_ypd
###Basic counting per population
num_pops_kre33_mut = numpy.sum( mutation_table[:, 0] > .5 )
frac_pops_kre33_mut = num_pops_kre33_mut/float(mutation_table.shape[0])
#print num_pops_kre33_mut, frac_pops_kre33_mut
####
iter = 10000
mut_table = mutation_table + indel_table
categories_kre33 = numpy.array(kre33_allele)
categories_null1 = numpy.zeros((len(categories_kre33),))
cat_inds_kre33 = calculate_cat_inds(categories_kre33)
cat_inds_null1 = calculate_cat_inds(categories_null1)
helper_mat_kre33 = calculate_helper_matrix(categories_kre33, cat_inds_kre33)
helper_mat_null1 = calculate_helper_matrix(categories_null1, cat_inds_null1)
mi_stat_kre33 = calculate_entropy_statistic(mut_table, helper_mat_null1) - calculate_entropy_statistic(mut_table, helper_mat_kre33)
mi_stat_permutations = []
for i in range(iter):
permuted_mut_table = permute_within_categories_preserve_num_muts(mut_table, categories_null1, cat_inds_null1)
mi_stat_permutations.append(calculate_entropy_statistic(permuted_mut_table, helper_mat_null1) - calculate_entropy_statistic(permuted_mut_table, helper_mat_kre33))
print 'Kre33 allele effect'
print 'p =', numpy.sum(mi_stat_permutations > mi_stat_kre33)/float(iter), '(10,000 permutations)'
print 'mi, treatment', mi_stat_kre33
print 'mi, mean of null', numpy.mean(mi_stat_permutations)
print 'difference', mi_stat_kre33 - numpy.mean(mi_stat_permutations), mi_stat_kre33 - numpy.percentile(mi_stat_permutations, 2.5), mi_stat_kre33 - numpy.percentile(mi_stat_permutations, 97.5)
#Control for the kre33 allele and look for additional effects of the environment
categories_env = env_list*10 + categories_kre33
cat_inds_env = calculate_cat_inds(categories_env)
helper_mat_env = calculate_helper_matrix(categories_env, cat_inds_env)
mi_stat_env = calculate_entropy_statistic(mut_table, helper_mat_kre33) - calculate_entropy_statistic(mut_table, helper_mat_env)
mi_stat_permutations_null2 = []
for i in range(iter):
permuted_mut_table = permute_within_categories_preserve_num_muts(mut_table, categories_kre33, cat_inds_kre33)
mi_stat_permutations_null2.append(calculate_entropy_statistic(permuted_mut_table, helper_mat_kre33) - calculate_entropy_statistic(permuted_mut_table, helper_mat_env))
#print permuted_order
print 'Environment effect'
print 'p =', numpy.sum(mi_stat_permutations_null2 > mi_stat_env)/float(iter)
print 'mi, treatment', mi_stat_env
print 'mi, mean of null', numpy.mean(mi_stat_permutations_null2)
print 'difference', mi_stat_env - numpy.mean(mi_stat_permutations_null2), mi_stat_env - numpy.percentile(mi_stat_permutations_null2, 2.5), mi_stat_env - numpy.percentile(mi_stat_permutations_null2, 97.5)
#Control for kre33 and environment and look for additional effects of genotype
categories_genotype = founder_num_key*100 + categories_env #This will sum over the environment groups within a founder class
cat_inds_genotype = calculate_cat_inds(categories_genotype)
helper_mat_genotype = calculate_helper_matrix(categories_genotype, cat_inds_genotype)
mi_stat_genotype = calculate_entropy_statistic(mut_table, helper_mat_env) - calculate_entropy_statistic(mut_table, helper_mat_genotype)
mi_stat_permutations_null4 = []
for i in range(iter):
permuted_mut_table = permute_within_categories_preserve_num_muts(mut_table, categories_env, cat_inds_env)
mi_stat_permutations_null4.append(calculate_entropy_statistic(permuted_mut_table, helper_mat_env) - calculate_entropy_statistic(permuted_mut_table, helper_mat_genotype))
print 'Genotype effect'
print 'p =', numpy.sum(mi_stat_permutations_null4 > mi_stat_genotype)/float(iter)
print 'mi, treatment', mi_stat_genotype
print 'mi, mean of null', numpy.mean(mi_stat_permutations_null4)
print 'difference', mi_stat_genotype - numpy.mean(mi_stat_permutations_null4), mi_stat_genotype - numpy.percentile(mi_stat_permutations_null4, 2.5), mi_stat_genotype - numpy.percentile(mi_stat_permutations_null4, 97.5)
#For reference, calculate the overall entropy
hits_per_gene = numpy.sum(mut_table, axis=0)
pi = hits_per_gene/254.
print 'Total entropy'
print numpy.sum(-1*(pi*numpy.log2(pi) + (1.- pi)*numpy.log2(1.-pi)))
f, (ax1, ax2, ax3) = pt.subplots(1, 3, figsize = (16,6))
ax1.set_title('Kre33 allele effect')
ax1.set_ylabel('Probability')
ax1.set_xlabel('Mutual information')
ax1.hist(mi_stat_permutations, color='grey',normed=True,bins=20,edgecolor="none")
ax1.axvline(mi_stat_kre33,0,.2,color='blue')
ymax = ax1.get_ylim()[1]
ax1.plot(mi_stat_kre33,.2*ymax,marker='o',color='b',markersize=5,markeredgewidth=0)
ax2.set_title('Environment effect')
ax2.set_ylabel('Probability')
ax2.set_xlabel('Mutual information')
ax2.hist(mi_stat_permutations_null2, color='grey',normed=True,bins=20,edgecolor="none")
ymax = ax2.get_ylim()[1]
ax2.axvline(mi_stat_env,0,.2,color='blue')
ax2.plot(mi_stat_env,.2*ymax,marker='o',color='b',markersize=5,markeredgewidth=0)
ax3.set_title('Genotype effect')
ax3.set_ylabel('Probability')
ax3.set_xlabel('Mutual information')
ax3.hist(mi_stat_permutations_null4, color='grey',normed=True,bins=20,edgecolor="none")
ax3.axvline(mi_stat_genotype,0,.2,color='blue')
ymax = ax3.get_ylim()[1]
ax3.plot(mi_stat_genotype,.2*ymax,marker='o',color='b',markersize=5,markeredgewidth=0)
pt.savefig('Mutual_information_detection.pdf',bbox_inches='tight') | mit |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/nltk/parse/dependencygraph.py | 5 | 31002 | # Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Jason Narad <[email protected]>
# Steven Bird <[email protected]> (modifications)
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
"""
Tools for reading and writing dependency trees.
The input is assumed to be in Malt-TAB format
(http://stp.lingfil.uu.se/~nivre/research/MaltXML.html).
"""
from __future__ import print_function, unicode_literals
from collections import defaultdict
from itertools import chain
from pprint import pformat
import subprocess
import warnings
from nltk.tree import Tree
from nltk.compat import python_2_unicode_compatible, string_types
#################################################################
# DependencyGraph Class
#################################################################
@python_2_unicode_compatible
class DependencyGraph(object):
"""
A container for the nodes and labelled edges of a dependency structure.
"""
def __init__(self, tree_str=None, cell_extractor=None, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""Dependency graph.
We place a dummy `TOP` node with the index 0, since the root node is
often assigned 0 as its head. This also means that the indexing of the
nodes corresponds directly to the Malt-TAB format, which starts at 1.
If zero-based is True, then Malt-TAB-like input with node numbers
starting at 0 and the root node assigned -1 (as produced by, e.g.,
zpar).
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
self.nodes = defaultdict(lambda: {'address': None,
'word': None,
'lemma': None,
'ctag': None,
'tag': None,
'feats': None,
'head': None,
'deps': defaultdict(list),
'rel': None,
})
self.nodes[0].update(
{
'ctag': 'TOP',
'tag': 'TOP',
'address': 0,
}
)
self.root = None
if tree_str:
self._parse(
tree_str,
cell_extractor=cell_extractor,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
def remove_by_address(self, address):
"""
Removes the node with the given address. References
to this node in others will still exist.
"""
del self.nodes[address]
def redirect_arcs(self, originals, redirect):
"""
Redirects arcs to any of the nodes in the originals list
to the redirect node address.
"""
for node in self.nodes.values():
new_deps = []
for dep in node['deps']:
if dep in originals:
new_deps.append(redirect)
else:
new_deps.append(dep)
node['deps'] = new_deps
def add_arc(self, head_address, mod_address):
"""
Adds an arc from the node specified by head_address to the
node specified by the mod address.
"""
relation = self.nodes[mod_address]['rel']
self.nodes[head_address]['deps'].setdefault(relation, [])
self.nodes[head_address]['deps'][relation].append(mod_address)
#self.nodes[head_address]['deps'].append(mod_address)
def connect_graph(self):
"""
Fully connects all non-root nodes. All nodes are set to be dependents
of the root node.
"""
for node1 in self.nodes.values():
for node2 in self.nodes.values():
if node1['address'] != node2['address'] and node2['rel'] != 'TOP':
relation = node2['rel']
node1['deps'].setdefault(relation, [])
node1['deps'][relation].append(node2['address'])
#node1['deps'].append(node2['address'])
def get_by_address(self, node_address):
"""Return the node with the given address."""
return self.nodes[node_address]
def contains_address(self, node_address):
"""
Returns true if the graph contains a node with the given node
address, false otherwise.
"""
return node_address in self.nodes
def to_dot(self):
"""Return a dot representation suitable for using with Graphviz.
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> print(dg.to_dot())
digraph G{
edge [dir=forward]
node [shape=plaintext]
<BLANKLINE>
0 [label="0 (None)"]
0 -> 2 [label="ROOT"]
1 [label="1 (John)"]
2 [label="2 (loves)"]
2 -> 1 [label=""]
2 -> 3 [label=""]
3 [label="3 (Mary)"]
}
"""
# Start the digraph specification
s = 'digraph G{\n'
s += 'edge [dir=forward]\n'
s += 'node [shape=plaintext]\n'
# Draw the remaining nodes
for node in sorted(self.nodes.values(), key=lambda v: v['address']):
s += '\n%s [label="%s (%s)"]' % (node['address'], node['address'], node['word'])
for rel, deps in node['deps'].items():
for dep in deps:
if rel is not None:
s += '\n%s -> %s [label="%s"]' % (node['address'], dep, rel)
else:
s += '\n%s -> %s ' % (node['address'], dep)
s += "\n}"
return s
def _repr_svg_(self):
"""Show SVG representation of the transducer (IPython magic).
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> dg._repr_svg_().split('\\n')[0]
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>'
"""
dot_string = self.to_dot()
try:
process = subprocess.Popen(
['dot', '-Tsvg'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except OSError:
raise Exception('Cannot find the dot binary from Graphviz package')
out, err = process.communicate(dot_string)
if err:
raise Exception(
'Cannot create svg representation by running dot from string: {}'
''.format(dot_string))
return out
def __str__(self):
return pformat(self.nodes)
def __repr__(self):
return "<DependencyGraph with {0} nodes>".format(len(self.nodes))
@staticmethod
def load(filename, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""
:param filename: a name of a file in Malt-TAB format
:param zero_based: nodes in the input file are numbered starting from 0
rather than 1 (as produced by, e.g., zpar)
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
:return: a list of DependencyGraphs
"""
with open(filename) as infile:
return [
DependencyGraph(
tree_str,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
for tree_str in infile.read().split('\n\n')
]
def left_children(self, node_index):
"""
Returns the number of left children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]['deps'].values())
index = self.nodes[node_index]['address']
return sum(1 for c in children if c < index)
def right_children(self, node_index):
"""
Returns the number of right children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]['deps'].values())
index = self.nodes[node_index]['address']
return sum(1 for c in children if c > index)
def add_node(self, node):
if not self.contains_address(node['address']):
self.nodes[node['address']].update(node)
def _parse(self, input_, cell_extractor=None, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""Parse a sentence.
:param extractor: a function that given a tuple of cells returns a
7-tuple, where the values are ``word, lemma, ctag, tag, feats, head,
rel``.
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
def extract_3_cells(cells, index):
word, tag, head = cells
return index, word, word, tag, tag, '', head, ''
def extract_4_cells(cells, index):
word, tag, head, rel = cells
return index, word, word, tag, tag, '', head, rel
def extract_7_cells(cells, index):
line_index, word, lemma, tag, _, head, rel = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, tag, tag, '', head, rel
def extract_10_cells(cells, index):
line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, ctag, tag, feats, head, rel
extractors = {
3: extract_3_cells,
4: extract_4_cells,
7: extract_7_cells,
10: extract_10_cells,
}
if isinstance(input_, string_types):
input_ = (line for line in input_.split('\n'))
lines = (l.rstrip() for l in input_)
lines = (l for l in lines if l)
cell_number = None
for index, line in enumerate(lines, start=1):
cells = line.split(cell_separator)
if cell_number is None:
cell_number = len(cells)
else:
assert cell_number == len(cells)
if cell_extractor is None:
try:
cell_extractor = extractors[cell_number]
except KeyError:
raise ValueError(
'Number of tab-delimited fields ({0}) not supported by '
'CoNLL(10) or Malt-Tab(4) format'.format(cell_number)
)
try:
index, word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells, index)
except (TypeError, ValueError):
# cell_extractor doesn't take 2 arguments or doesn't return 8
# values; assume the cell_extractor is an older external
# extractor and doesn't accept or return an index.
word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells)
if head == '_':
continue
head = int(head)
if zero_based:
head += 1
self.nodes[index].update(
{
'address': index,
'word': word,
'lemma': lemma,
'ctag': ctag,
'tag': tag,
'feats': feats,
'head': head,
'rel': rel,
}
)
# Make sure that the fake root node has labeled dependencies.
if (cell_number == 3) and (head == 0):
rel = top_relation_label
self.nodes[head]['deps'][rel].append(index)
if self.nodes[0]['deps'][top_relation_label]:
root_address = self.nodes[0]['deps'][top_relation_label][0]
self.root = self.nodes[root_address]
self.top_relation_label = top_relation_label
else:
warnings.warn(
"The graph doesn't contain a node "
"that depends on the root element."
)
def _word(self, node, filter=True):
w = node['word']
if filter:
if w != ',':
return w
return w
def _tree(self, i):
""" Turn dependency graphs into NLTK trees.
:param int i: index of a node
:return: either a word (if the indexed node is a leaf) or a ``Tree``.
"""
node = self.get_by_address(i)
word = node['word']
deps = sorted(chain.from_iterable(node['deps'].values()))
if deps:
return Tree(word, [self._tree(dep) for dep in deps])
else:
return word
def tree(self):
"""
Starting with the ``root`` node, build a dependency tree using the NLTK
``Tree`` constructor. Dependency labels are omitted.
"""
node = self.root
word = node['word']
deps = sorted(chain.from_iterable(node['deps'].values()))
return Tree(word, [self._tree(dep) for dep in deps])
def triples(self, node=None):
"""
Extract dependency triples of the form:
((head word, head tag), rel, (dep word, dep tag))
"""
if not node:
node = self.root
head = (node['word'], node['ctag'])
for i in sorted(chain.from_iterable(node['deps'].values())):
dep = self.get_by_address(i)
yield (head, dep['rel'], (dep['word'], dep['ctag']))
for triple in self.triples(node=dep):
yield triple
def _hd(self, i):
try:
return self.nodes[i]['head']
except IndexError:
return None
def _rel(self, i):
try:
return self.nodes[i]['rel']
except IndexError:
return None
# what's the return type? Boolean or list?
def contains_cycle(self):
"""Check whether there are cycles.
>>> dg = DependencyGraph(treebank_data)
>>> dg.contains_cycle()
False
>>> cyclic_dg = DependencyGraph()
>>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0}
>>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1}
>>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2}
>>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3}
>>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4}
>>> cyclic_dg.nodes = {
... 0: top,
... 1: child1,
... 2: child2,
... 3: child3,
... 4: child4,
... }
>>> cyclic_dg.root = top
>>> cyclic_dg.contains_cycle()
[3, 1, 2, 4]
"""
distances = {}
for node in self.nodes.values():
for dep in node['deps']:
key = tuple([node['address'], dep])
distances[key] = 1
for _ in self.nodes:
new_entries = {}
for pair1 in distances:
for pair2 in distances:
if pair1[1] == pair2[0]:
key = tuple([pair1[0], pair2[1]])
new_entries[key] = distances[pair1] + distances[pair2]
for pair in new_entries:
distances[pair] = new_entries[pair]
if pair[0] == pair[1]:
path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0])
return path
return False # return []?
def get_cycle_path(self, curr_node, goal_node_index):
for dep in curr_node['deps']:
if dep == goal_node_index:
return [curr_node['address']]
for dep in curr_node['deps']:
path = self.get_cycle_path(self.get_by_address(dep), goal_node_index)
if len(path) > 0:
path.insert(0, curr_node['address'])
return path
return []
def to_conll(self, style):
"""
The dependency graph in CoNLL format.
:param style: the style to use for the format (3, 4, 10 columns)
:type style: int
:rtype: str
"""
if style == 3:
template = '{word}\t{tag}\t{head}\n'
elif style == 4:
template = '{word}\t{tag}\t{head}\t{rel}\n'
elif style == 10:
template = '{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n'
else:
raise ValueError(
'Number of tab-delimited fields ({0}) not supported by '
'CoNLL(10) or Malt-Tab(4) format'.format(style)
)
return ''.join(template.format(i=i, **node) for i, node in sorted(self.nodes.items()) if node['tag'] != 'TOP')
def nx_graph(self):
"""Convert the data in a ``nodelist`` into a networkx labeled directed graph."""
import networkx
nx_nodelist = list(range(1, len(self.nodes)))
nx_edgelist = [
(n, self._hd(n), self._rel(n))
for n in nx_nodelist if self._hd(n)
]
self.nx_labels = {}
for n in nx_nodelist:
self.nx_labels[n] = self.nodes[n]['word']
g = networkx.MultiDiGraph()
g.add_nodes_from(nx_nodelist)
g.add_edges_from(nx_edgelist)
return g
class DependencyGraphError(Exception):
"""Dependency graph exception."""
def demo():
malt_demo()
conll_demo()
conll_file_demo()
cycle_finding_demo()
def malt_demo(nx=False):
"""
A demonstration of the result of reading a dependency
version of the first sentence of the Penn Treebank.
"""
dg = DependencyGraph("""Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
""")
tree = dg.tree()
tree.pprint()
if nx:
# currently doesn't work
import networkx
from matplotlib import pylab
g = dg.nx_graph()
g.info()
pos = networkx.spring_layout(g, dim=1)
networkx.draw_networkx_nodes(g, pos, node_size=50)
# networkx.draw_networkx_edges(g, pos, edge_color='k', width=8)
networkx.draw_networkx_labels(g, pos, dg.nx_labels)
pylab.xticks([])
pylab.yticks([])
pylab.savefig('tree.png')
pylab.show()
def conll_demo():
"""
A demonstration of how to read a string representation of
a CoNLL format dependency tree.
"""
dg = DependencyGraph(conll_data1)
tree = dg.tree()
tree.pprint()
print(dg)
print(dg.to_conll(4))
def conll_file_demo():
print('Mass conll_read demo...')
graphs = [DependencyGraph(entry)
for entry in conll_data2.split('\n\n') if entry]
for graph in graphs:
tree = graph.tree()
print('\n')
tree.pprint()
def cycle_finding_demo():
dg = DependencyGraph(treebank_data)
print(dg.contains_cycle())
cyclic_dg = DependencyGraph()
cyclic_dg.add_node({'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0})
cyclic_dg.add_node({'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1})
cyclic_dg.add_node({'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2})
cyclic_dg.add_node({'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3})
cyclic_dg.add_node({'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4})
print(cyclic_dg.contains_cycle())
treebank_data = """Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
"""
conll_data1 = """
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
"""
conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _
2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _
4 wild wild Adj Adj attr|stell|onverv 5 mod _ _
5 zwaaien zwaai N N soort|mv|neut 2 vc _ _
6 . . Punc Punc punt 5 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
1 Dat dat Pron Pron aanw|neut|attr 2 det _ _
2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _
5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _
6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _
7 . . Punc Punc punt 6 punct _ _
1 Het het Pron Pron onbep|neut|zelfst 2 su _ _
2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 bij bij Prep Prep voor 2 ld _ _
4 de de Art Art bep|zijdofmv|neut 6 det _ _
5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _
6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _
7 die die Pron Pron betr|neut|zelfst 6 mod _ _
8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _
9 ginds ginds Adv Adv gew|aanw 12 mod _ _
10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _
11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _
12 gelaten laat V V trans|verldw|onverv 11 vc _ _
13 . . Punc Punc punt 12 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _
3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _
4 naast naast Prep Prep voor 11 mod _ _
5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _
6 op op Prep Prep voor 11 ld _ _
7 de de Art Art bep|zijdofmv|neut 8 det _ _
8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _
9 kunnen kan V V hulp|inf 2 vc _ _
10 gaan ga V V hulp|inf 9 vc _ _
11 liggen lig V V intrans|inf 10 vc _ _
12 . . Punc Punc punt 11 punct _ _
1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _
2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _
3 mams mams N N soort|ev|neut 4 det _ _
4 rug rug N N soort|ev|neut 5 obj1 _ _
5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _
6 hebben heb V V hulp|inf 2 vc _ _
7 en en Conj Conj neven 0 ROOT _ _
8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _
9 de de Art Art bep|zijdofmv|neut 10 det _ _
10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _
11 . . Punc Punc punt 10 punct _ _
1 Of of Conj Conj onder|metfin 0 ROOT _ _
2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _
5 met met Prep Prep voor 10 mod _ _
6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _
7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _
8 rond rond Adv Adv deelv 10 svp _ _
9 kunnen kan V V hulp|inf 3 vc _ _
10 slenteren slenter V V intrans|inf 9 vc _ _
11 in in Prep Prep voor 10 mod _ _
12 de de Art Art bep|zijdofmv|neut 13 det _ _
13 buurt buurt N N soort|ev|neut 11 obj1 _ _
14 van van Prep Prep voor 13 mod _ _
15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _
16 . . Punc Punc punt 15 punct _ _
"""
if __name__ == '__main__':
demo()
| apache-2.0 |
bnaul/scikit-learn | sklearn/cluster/_dbscan.py | 3 | 16156 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..utils.validation import _check_sample_weight, _deprecate_positional_args
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
@_deprecate_positional_args
def dbscan(X, eps=0.5, *, min_samples=5, metric='minkowski',
metric_params=None, algorithm='auto', leaf_size=30, p=2,
sample_weight=None, n_jobs=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : {array-like, sparse (CSR) matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, default=0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function.
min_samples : int, default=5
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : str or callable, default='minkowski'
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit.
X may be a :term:`sparse graph <sparse graph>`,
in which case only "nonzero" elements may be considered neighbors.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, default=30
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, default=2
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search. ``None`` means
1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means
using all processors. See :term:`Glossary <n_jobs>` for more details.
If precomputed distance are used, parallel execution is not available
and thus n_jobs will have no effect.
Returns
-------
core_samples : ndarray of shape (n_core_samples,)
Indices of core samples.
labels : ndarray of shape (n_samples,)
Cluster labels for each point. Noisy samples are given the label -1.
See also
--------
DBSCAN
An estimator interface for this clustering algorithm.
OPTICS
A similar estimator interface clustering at multiple values of eps. Our
implementation is optimized for memory usage.
Notes
-----
For an example, see :ref:`examples/cluster/plot_dbscan.py
<sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n). It may attract a higher
memory complexity when querying these nearest neighborhoods, depending
on the ``algorithm``.
One way to avoid the query complexity is to pre-compute sparse
neighborhoods in chunks using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with
``mode='distance'``, then using ``metric='precomputed'`` here.
Another way to reduce memory and computation time is to remove
(near-)duplicate points and use ``sample_weight`` instead.
:func:`cluster.optics <sklearn.cluster.optics>` provides a similar
clustering with lower memory usage.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017).
DBSCAN revisited, revisited: why and how you should (still) use DBSCAN.
ACM Transactions on Database Systems (TODS), 42(3), 19.
"""
est = DBSCAN(eps=eps, min_samples=min_samples, metric=metric,
metric_params=metric_params, algorithm=algorithm,
leaf_size=leaf_size, p=p, n_jobs=n_jobs)
est.fit(X, sample_weight=sample_weight)
return est.core_sample_indices_, est.labels_
class DBSCAN(ClusterMixin, BaseEstimator):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, default=0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function.
min_samples : int, default=5
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a :term:`Glossary <sparse graph>`, in which
case only "nonzero" elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, default=30
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, default=None
The power of the Minkowski metric to be used to calculate distance
between points. If None, then ``p=2`` (equivalent to the Euclidean
distance).
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
core_sample_indices_ : ndarray of shape (n_core_samples,)
Indices of core samples.
components_ : ndarray of shape (n_core_samples, n_features)
Copy of each core sample found by training.
labels_ : ndarray of shape (n_samples)
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Examples
--------
>>> from sklearn.cluster import DBSCAN
>>> import numpy as np
>>> X = np.array([[1, 2], [2, 2], [2, 3],
... [8, 7], [8, 8], [25, 80]])
>>> clustering = DBSCAN(eps=3, min_samples=2).fit(X)
>>> clustering.labels_
array([ 0, 0, 0, 1, 1, -1])
>>> clustering
DBSCAN(eps=3, min_samples=2)
See also
--------
OPTICS
A similar clustering at multiple values of eps. Our implementation
is optimized for memory usage.
Notes
-----
For an example, see :ref:`examples/cluster/plot_dbscan.py
<sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n). It may attract a higher
memory complexity when querying these nearest neighborhoods, depending
on the ``algorithm``.
One way to avoid the query complexity is to pre-compute sparse
neighborhoods in chunks using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with
``mode='distance'``, then using ``metric='precomputed'`` here.
Another way to reduce memory and computation time is to remove
(near-)duplicate points and use ``sample_weight`` instead.
:class:`cluster.OPTICS` provides a similar clustering with lower memory
usage.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017).
DBSCAN revisited, revisited: why and how you should (still) use DBSCAN.
ACM Transactions on Database Systems (TODS), 42(3), 19.
"""
@_deprecate_positional_args
def __init__(self, eps=0.5, *, min_samples=5, metric='euclidean',
metric_params=None, algorithm='auto', leaf_size=30, p=None,
n_jobs=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.metric_params = metric_params
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features, or distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
"""
X = self._validate_data(X, accept_sparse='csr')
if not self.eps > 0.0:
raise ValueError("eps must be positive.")
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# Calculate neighborhood for all samples. This leaves the original
# point in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if self.metric == 'precomputed' and sparse.issparse(X):
# set the diagonal to explicit values, as a point is its own
# neighbor
with warnings.catch_warnings():
warnings.simplefilter('ignore', sparse.SparseEfficiencyWarning)
X.setdiag(X.diagonal()) # XXX: modifies X's internals in-place
neighbors_model = NearestNeighbors(
radius=self.eps, algorithm=self.algorithm,
leaf_size=self.leaf_size, metric=self.metric,
metric_params=self.metric_params, p=self.p, n_jobs=self.n_jobs)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = np.full(X.shape[0], -1, dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= self.min_samples,
dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
self.core_sample_indices_ = np.where(core_samples)[0]
self.labels_ = labels
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix,
and return cluster labels.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels. Noisy samples are given the label -1.
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
ycaihua/scikit-learn | examples/svm/plot_custom_kernel.py | 115 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
hdmetor/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
ngoix/OCRF | examples/model_selection/plot_underfitting_overfitting.py | 53 | 2668 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
belltailjp/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
FernanOrtega/DAT210x | Module6/assignment2.py | 1 | 6469 | import math
import pandas as pd
# The Dataset comes from:
# https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits
# At face value, this looks like an easy lab;
# But it has many parts to it, so prepare yourself before starting...
def load(path_test, path_train):
# Load up the data.
# You probably could have written this..
with open(path_test, 'r') as f: testing = pd.read_csv(f)
with open(path_train, 'r') as f: training = pd.read_csv(f)
# The number of samples between training and testing can vary
# But the number of features better remain the same!
n_features = testing.shape[1]
X_test = testing.ix[:,:n_features-1]
X_train = training.ix[:,:n_features-1]
y_test = testing.ix[:,n_features-1:].values.ravel()
y_train = training.ix[:,n_features-1:].values.ravel()
#
# Special:
perc2keep = 1
index = X_train.shape[0]
X_train_sliced = X_train[:int(math.ceil(index * perc2keep))]
y_train_sliced = y_train[:int(math.ceil(index * perc2keep))]
print X_train_sliced.shape
print y_train_sliced.shape
return X_train_sliced, X_test, y_train_sliced, y_test
def peekData(X_train):
# The 'targets' or labels are stored in y. The 'samples' or data is stored in X
print "Peeking your data..."
fig = plt.figure()
cnt = 0
for col in range(5):
for row in range(10):
plt.subplot(5, 10, cnt + 1)
plt.imshow(X_train.ix[cnt,:].reshape(8,8), cmap=plt.cm.gray_r, interpolation='nearest')
plt.axis('off')
cnt += 1
fig.set_tight_layout(True)
#plt.show()
def drawPredictions(X_train, X_test, y_train, y_test):
fig = plt.figure()
# Make some guesses
y_guess = model.predict(X_test)
#
# INFO: This is the second lab we're demonstrating how to
# do multi-plots using matplot lab. In the next assignment(s),
# it'll be your responsibility to use this and assignment #1
# as tutorials to add in the plotting code yourself!
num_rows = 10
num_cols = 5
index = 0
for col in range(num_cols):
for row in range(num_rows):
plt.subplot(num_cols, num_rows, index + 1)
# 8x8 is the size of the image, 64 pixels
plt.imshow(X_test.ix[index,:].reshape(8,8), cmap=plt.cm.gray_r, interpolation='nearest')
# Green = Guessed right
# Red = Fail!
fontcolor = 'g' if y_test[index] == y_guess[index] else 'r'
plt.title('Label: %i' % y_guess[index], fontsize=6, color=fontcolor)
plt.axis('off')
index += 1
fig.set_tight_layout(True)
#plt.show()
#
# TODO: Pass in the file paths to the .tes and the .tra files
X_train, X_test, y_train, y_test = load('Datasets/optdigits.tes', 'Datasets/optdigits.tra')
import matplotlib.pyplot as plt
from sklearn import svm
#
# Get to know your data. It seems its already well organized in
# [n_samples, n_features] form. Our dataset looks like (4389, 784).
# Also your labels are already shaped as [n_samples].
#peekData(X_train)
#
# TODO: Create an SVC classifier. Leave C=1, but set gamma to 0.001
# and set the kernel to linear. Then train the model on the training
# data / labels:
print "Training SVC Classifier..."
#
model = svm.SVC(kernel='rbf', C=1, gamma=0.001)
model.fit(X_train, y_train)
# TODO: Calculate the score of your SVC against the testing data
print "Scoring SVC Classifier..."
#
score = model.score(X_test, y_test)
print "Score:\n", score
# Visual Confirmation of accuracy
drawPredictions(X_train, X_test, y_train, y_test)
#
# TODO: Print out the TRUE value of the 1000th digit in the test set
# By TRUE value, we mean, the actual provided label for that sample
#
true_1000th_test_value = y_test[999]
print "1000th test label: ", true_1000th_test_value
#
# TODO: Predict the value of the 1000th digit in the test set.
# Was your model's prediction correct?
# INFO: If you get a warning on your predict line, look at the
# notes from the previous module's labs.
#
guess_1000th_test_value = model.predict(X_test.ix[999,:])
print "1000th test prediction: ", guess_1000th_test_value
#
# TODO: Use IMSHOW to display the 1000th test image, so you can
# visually check if it was a hard image, or an easy image
#
#fig = plt.figure()
#plt.imshow(X_test.ix[999,:].reshape(8,8), cmap=plt.cm.gray_r, interpolation='nearest')
#
# TODO: Were you able to beat the USPS advertised accuracy score
# of 98%? If so, STOP and answer the lab questions. But if you
# weren't able to get that high of an accuracy score, go back
# and change your SVC's kernel to 'poly' and re-run your lab
# again.
#
# TODO: Were you able to beat the USPS advertised accuracy score
# of 98%? If so, STOP and answer the lab questions. But if you
# weren't able to get that high of an accuracy score, go back
# and change your SVC's kernel to 'rbf' and re-run your lab
# again.
#
# TODO: Were you able to beat the USPS advertised accuracy score
# of 98%? If so, STOP and answer the lab questions. But if you
# weren't able to get that high of an accuracy score, go back
# and tinker with your gamma value and C value until you're able
# to beat the USPS. Don't stop tinkering until you do. =).
#################################################
#
# TODO: Once you're able to beat the +98% accuracy score of the
# USPS, go back into the load() method. Look for the line that
# reads "# Special:"
#
# Immediately under that line, alter X_train and y_train ONLY.
# Keep just the ___FIRST___ 4% of the samples. In other words,
# for every 100 samples found, throw away 96 of them. Make sure
# all the samples (and labels) you keep come from the start of
# X_train and y_train.
# If the first 4% is a decimal number, then use int + ceil to
# round up to the nearest whole integer.
# That operation might require some Pandas indexing skills, or
# perhaps some numpy indexing skills if you'd like to go that
# route. Feel free to ask on the class forum if you want; but
# try to exercise your own muscles first, for at least 30
# minutes, by reviewing the Pandas documentation and stack
# overflow. Through that, in the process, you'll pick up a lot.
# Part of being a machine learning practitioner is know what
# questions to ask and where to ask them, so this is a great
# time to start!
# Re-Run your application after throwing away 96% your training
# data. What accuracy score do you get now?
#
# TODO: Lastly, change your kernel back to linear and run your
# assignment one last time. What's the accuracy score this time?
# Surprised?
plt.show() | mit |
kernc/scikit-learn | sklearn/gaussian_process/kernels.py | 24 | 66334 | """Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if not isinstance(bounds, six.string_types) or bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = isinstance(bounds, six.string_types) and bounds == "fixed"
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels."""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr, value in sorted(self.__dict__.items()):
if attr.startswith("hyperparameter_"):
r.append(value)
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(getattr(self, hyperparameter.name))
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
setattr(self, hyperparameter.name,
np.exp(theta[i:i + hyperparameter.n_elements]))
i += hyperparameter.n_elements
else:
setattr(self, hyperparameter.name, np.exp(theta[i]))
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1."""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y)."""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels."""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators. """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
self.hyperparameter_constant_value = \
Hyperparameter("constant_value", "numeric", constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
self.hyperparameter_noise_level = \
Hyperparameter("noise_level", "numeric", noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
if np.iterable(length_scale):
if len(length_scale) > 1:
self.anisotropic = True
self.length_scale = np.asarray(length_scale, dtype=np.float)
else:
self.anisotropic = False
self.length_scale = float(length_scale[0])
else:
self.anisotropic = False
self.length_scale = float(length_scale)
self.length_scale_bounds = length_scale_bounds
if self.anisotropic: # anisotropic length_scale
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds,
len(length_scale))
else:
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or self.length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (self.length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
raise Exception("Anisotropic kernels require that the number "
"of length scales and features match.")
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, self.length_scale)
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (self.length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else: # isotropic
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_alpha = \
Hyperparameter("alpha", "numeric", alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_periodicity = \
Hyperparameter("periodicity", "numeric", periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
self.hyperparameter_sigma_0 = \
Hyperparameter("sigma_0", "numeric", sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.hyperparameter_gamma = \
Hyperparameter("gamma", "numeric", gamma_bounds)
self.metric = metric
if pairwise_kernels_kwargs is not None:
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
else:
self.pairwise_kernels_kwargs = {}
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**self.pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **self.pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X)[:, 0]
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
| bsd-3-clause |
acompa/leptoid | leptoid/forecasting.py | 1 | 4776 | """
Leptoid interface to R for forecasting. Notes on rpy2:
(1) rpy2 maintains a namespace of vars--look for notes on its current
state
(2) R's forecast library lets one specify error, seasonality, and trend
type from {none, additive, multiplicative}
(3) for starters, specify additive trend, multiplicative seasonality
Uses R's forecast package to generate forecasts. See the paper for more details:
http://www.jstatsoft.org/v27/i03/paper
"""
import datetime
import numpy as np
import logging
LOG = logging.getLogger('forecasting')
# Importing rpy2 and working with Numpy objects.
import rpy2.robjects as robjects
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
# Importing plotting functions from R.
RBMP = robjects.r['bitmap']
RPLOT = robjects.r['plot']
RDEVOFF = robjects.r['dev.off']
# Importing packages from R.
from rpy2.robjects.packages import importr
forecast = importr('forecast')
graphics = importr('graphics')
from leptoid.utils import get_forecast_attribute
RECENT_DATA_WINDOW = 120
PLOT_DIRECTORY = '/var/leptoid/img/'
PLOT_SIGNIFICANCE_THRESHOLD = 1E-5
def add_new_series(nseries, seriesname='nseries'):
"""Adds time series to R's global environment as ts object. """
freq = len(nseries)
robjects.globalenv['raw_vector'] = nseries
robjects.r('%s <- ts(raw_vector, frequency=%i)' % (seriesname, freq))
def _forecast_utilization(series, model_config=None):
"""
Generate a forecast using R functions.
Parameters
----------
series
np.array containing sample data
model_config
dict with values for 'model_type' and 'horizon'
Returns an R object with these attributes:
model: a list with model information,
mean: forecasted values for nseries,
level: confidence values for prediction,
x: the original values in nseries,
upper: upper limit for confidence interval
lower: lower limit " " " " "
fitted: fitted values (aka one-step ahead forecasts)
method: forecasting method used (as str)
residuals: errors from fitted models (ie. x - fitted)
Returns None if no sample data is available for the last
${recent_data_window} observations in the sample.
"""
# Defining default settings.
if model_config is None:
model_type = "ZZZ"
horizon = int(0.1 * len(series))
else:
model_type = model_config['model_type']
horizon = model_config['horizon']
# Handling case where recent samples are missing or not available.
if (series[-1 * RECENT_DATA_WINDOW:] == 0).all():
forecast_output = None
else:
etsout = forecast.ets(series, model=model_type)
forecast_output = forecast.forecast(etsout, h=horizon)
return forecast_output
def forecast(queue):
"""
Forecasting values using R's forecast package. Series reporting empty data
over the last ${recent_data_window} minutes will return None.
Parameters
----------
queue
pandas.TimeSeries with utilization data. Data is extracted from the
underlying buffer using np.frombuffer.
service, instance_id
information for instance, used when plotting
TODO: Currently Leptoid uses R via rpy2. This will change since rpy2 is
poorly maintained.
"""
LOG.info("Generating forecast for %s:%s" %
(queue.service, queue.instance_id))
# Retrieving forecast from rpy2, then extracting the attributes
# (forecasted utilization, one-step ahead forecast) we want.
model_output = _forecast_utilization(np.frombuffer(queue.utilization.data))
# Handling case where output == None, indicating a dormant instance.
if model_output == None:
in_sample_forecast = None
util_estimate = None
else:
in_sample_forecast = get_forecast_attribute(model_output, "fitted")
util_estimate = get_forecast_attribute(model_output, "mean")
# Let's avoid empty plots
if np.max(util_estimate) > PLOT_SIGNIFICANCE_THRESHOLD:
plot_forecast(model_output, queue)
return (in_sample_forecast, util_estimate)
def plot_forecast(model_output, queue):
"""
Plots output from R forecast. Uses R functions extracted via rpy2
(see global vars above).
Parameters
----------
model_output
R object returned by leptoid.forecasting._forecast_utilization(). Contains
forecast information (confidence intervals, etc.).
queue
leptoid.ServiceQueue with information about the service
Returns nothing, but saves a plot to disk with a timestamp.
"""
forecast_method = get_forecast_attribute(model_output, attr='method')
n = datetime.datetime.now()
RBMP('%s%s-%s-%i-%i-%i-%i:%i.jpg' %
(PLOT_DIRECTORY, queue.service, queue.instance_id, n.year,
n.month, n.day, n.hour, n.minute),
width=1400, height=800, units='px', type='jpeg')
RPLOT(model_output, main="Util forecast for %s:%s (using %s)" %
(queue.service, queue.instance_id, forecast_method),
xlab="Minutes elapsed since %s" % queue.get_first_timestamp())
RDEVOFF()
| apache-2.0 |
danielelinaro/dynclamp | python/lcg/process_ecode.py | 1 | 13300 | import lcg
from glob import glob
import numpy as np
import os
import sys
import matplotlib.pylab as plt
import aec
#from scipy.interpolate import interp1d
plt.rc('font',**{'family':'sans-serif','sans-serif':['Arial'], 'size': 8})
plt.rc('axes', linewidth = 1)
plt.rc("xtick", direction="out")
plt.rc("ytick", direction="out")
plt.rc("ytick", direction="out")
def gatherH5files(folder):
files = glob('{0}/*.h5'.format(folder))
kfiles = glob('{0}/*_kernel*.dat'.format(folder))
return files, kfiles
def argfindspks(data, threshold=-20,deadwindow=30):
''' Extracts the indexes of the peaks in the data with threshold crossing.
Uses a dead window/minimum peak distance.
'''
N = len(data)
ii = np.arange(0,N)
# threshold crossing
dx = data > threshold
idx=ii[dx][np.diff(ii[dx])>1]
idx = np.append(idx,[ii[dx][-1]])
# find peaks using the dead window
index = []
for ii in idx:
lower = ii - deadwindow
upper = ii + deadwindow
if lower < 0:
lower = 0
if upper > N:
upper = N
index.append(lower + np.argmax(data[lower:upper]))
return np.array(index)
def compensateVoltage(ent, Ke, entNames = ['AnalogInput', 'Waveform']):
for e in ent:
if e['name'] in [entNames[0]]:
V = e['data']
if e['name'] in [entNames[1]]:
I = e['data']
metadata = e['metadata']
V = aec.compensate(V, I, Ke)
return V, I, metadata
def analyseAPprotocol(folder, tpre = 5, tpost = 20, ax = None):
files, kfiles = gatherH5files(folder)
# Uses the first kernel
Ke = np.loadtxt(kfiles[0]) / 1e9
V = []
I = []
idx = []
ktrials = [ os.path.basename(k).split('_')[0] for k in kfiles]
print('Analysing spike shapes...')
for f in files:
ent, info = lcg.loadH5Trace(f)
if not os.path.basename(f).split('.')[0] in ktrials:
try:
inset_samples
except NameError:
inset_samples = np.arange(np.floor(-(tpre*1e-3)/info['dt']),
np.floor(+(tpost*1e-3) / info['dt']),
dtype = int)
# time_actual = np.linspace( - tpre, tpost, len(inset_samples))
time = np.linspace( - tpre, tpost, len(inset_samples))
tmpV, tmpI, meta = compensateVoltage(ent, Ke, ['AnalogInput','Waveform'])
spks = argfindspks(tmpV,-20)
if len(spks) == 1:
# f = interp1d(time_actual, tmpV[spks + inset_samples], 'cubic')
# V.append(f(time))
V.append(tmpV[spks + inset_samples])
I.append(tmpI[spks + inset_samples])
V = np.vstack(V).T
I = np.vstack(I).T
dt = info['dt'] * 1e3
dV3 = np.diff(V, 3, 0)/(dt ** 3)
dV1 = np.diff(V, 1, 0)/dt
dV3Thresh = 10000
# Not counting for the extra points resulting from the diff
idx = np.array([np.where(dV3[::,ii]>dV3Thresh)[0][0] for ii in range(dV3.shape[1])]) + 3
idxmean = np.where(np.mean(dV3, 1)>dV3Thresh)[0][0] + 3
mV = np.mean(V, 1)
# mdV1 = np.mean(dV1[1:,], 1)
# ax.plot(mV[1:],mdV1, color = 'r', lw = 1)
# ax.plot(mV[idxmean + 1] + [ - 5,+ 5], mdV1[idxmean] + [0, 0], 'k--')
ax.plot(time, V, color = 'gray', lw = 0.8)
[ax.plot(time[ii], V[ii, jj], 'ro',
markerfacecolor = 'gray',
markersize = 2) for jj, ii in enumerate(idx)]
ax.plot(time,mV, 'r', lw = 1)
ax.axis('tight')
ax.set_xticks(np.unique(np.linspace(-tpre, tpost, 10).astype(int)))
ax.set_yticks(np.unique(np.linspace(np.min(mV), max(mV), 5).astype(int)))
ax.plot(time[np.mean(idx)] + [0, 0],ax.get_ylim(),'k--')
tmp = mV[np.mean(idx)] + [0, 0]
ax.plot([ - tpre, tpost],tmp, 'k--')
# Scale in right corner
ax.set_xticks(np.linspace(-tpre, tpost, 12).astype(int))
ax.set_yticks(np.linspace(min(mV), max(mV), 5).astype(int))
tmpx = ax.get_xticks()
tmpy = ax.get_yticks()
ax.plot(tmpx[ -4:-2], tmpy[ - 2] + [0, 0], 'k')
ax.plot(tmpx[ - 4] + [0, 0], tmpy[ - 2::], 'k')
ax.text(tmpx[ - 4], tmpy[-2] - 2, '{0}ms'.format(np.diff(tmpx[0: 2])[0]),
va = 'top', ha = 'left', fontsize = 7)
ax.text(tmpx[ - 4], tmpy[-1], '{0}mV'.format(np.diff(tmpy[-2::])[0]),
va = 'top', ha = 'right', fontsize = 7, rotation =90)
return mV[np.mean(idx)]
def analyseVIprotocol(folder, ax = None):
files, kfiles = gatherH5files(folder)
Ke = np.loadtxt(kfiles[0]) / 1e9
V = []
I = []
idx = []
ktrials = [ os.path.basename(k).split('_')[0] for k in kfiles]
print('Analysing hyperpolarizing current steps...')
for f in files:
ent, info = lcg.loadH5Trace(f)
if not os.path.basename(f).split('.')[0] in ktrials:
tmpV, tmpI, meta = compensateVoltage(ent, Ke, ['AnalogInput', 'Waveform'])
V.append(tmpV)
I.append(tmpI)
time = np.linspace( 0, info['tend'], len(tmpV))
prot_time = np.cumsum(meta[::, 0])
idx = np.where(time > prot_time[-3] - .2)[0]
V = np.vstack(V).T
I = np.vstack(I).T
ax[1].plot(time[idx], V[idx,::], 'k', clip_on = False)
ax[0].plot(time[idx], I[idx,::], 'k', clip_on = False)
# Scale in left corner
for a in ax:
a.axis('tight')
a.clipbox = False
a.set_xlim([time[idx[0]], time[idx[-1]]])
tmpx = a.get_xlim()
tmpy = a.get_ylim()
a.set_yticks(np.linspace(tmpy[0],tmpy[-1], 4).astype(int))
a.set_xticks(np.round(np.linspace(tmpx[0],tmpx[ -1], 4), 3) )
# print time[idx[0]]
tmpx = ax[1].get_xticks()
tmpy = ax[1].get_yticks()
ax[1].plot(tmpx[ -1] + [0, 0], tmpy[:2] + 2, 'k', clip_on=False)
ax[1].text(tmpx[ -1] + 0.05, tmpy[ 1], '{0}mV'.format(np.diff(tmpy[-2::])[0]),
va = 'top', ha = 'left', fontsize = 7, rotation =90)
ax[1].plot(tmpx[-2:], tmpy[0] + [2, 2], 'k', clip_on=False)
ax[1].text(tmpx[ -2] , tmpy[0] + 3, '{0}s'.format(np.diff(tmpx[0: 2])[0]),
va = 'bottom', ha = 'left', fontsize = 7)
tmpx = ax[0].get_xticks()
tmpy = ax[0].get_yticks()
ax[0].plot(tmpx[ -1] + [0, 0], tmpy[:2], 'k', clip_on=False)
ax[0].text(tmpx[ -1] + 0.05, tmpy[ 1], '{0}pA'.format(np.diff(tmpy[-2::])[0]),
va = 'top', ha = 'left', fontsize = 7, rotation =90)
return None
#
def analyseTAUprotocol(folder, ax = None):
files, kfiles = gatherH5files(folder)
Ke = np.loadtxt(kfiles[0]) / 1e9
V = []
I = []
spks = []
ktrials = [ os.path.basename(k).split('_')[0] for k in kfiles]
print('Analysing tau protocol...')
for f in files:
ent, info = lcg.loadH5Trace(f)
if not os.path.basename(f).split('.')[0] in ktrials:
tmpV, tmpI, meta = compensateVoltage(ent, Ke, ['AnalogInput', 'Waveform'])
V.append(tmpV)
I.append(tmpI)
time = np.linspace( 0, info['tend'], len(tmpV))
prot_time = np.cumsum(meta[::, 0])
idx = np.where((time > prot_time[0] - 0.0055) & (time < prot_time[1] + 0.1))[0]
fitidx = np.where((time > prot_time[1] + 0.0007) & (time < prot_time[1] + 0.08))[0]
V = np.vstack(V).T
I = np.vstack(I).T
ax.plot(time[idx], V[idx,], color = 'gray', lw = 0.6)
ax.plot(time[idx], np.mean(V[idx,], 1), color = 'k', lw =.7)
func = lambda x, a, b, c, d, e: a * (1-np.exp( -x / b )) + c * (1-np.exp( -x / d )) + e
from scipy.optimize import curve_fit
popt, pcov = curve_fit(func, np.reshape(np.repeat(time - time[fitidx[0]],V.shape[1]),V.shape)[fitidx,: ].flatten(), V[fitidx,:].flatten())
ax.plot(time[fitidx[0]:idx[-1] ],
func(time[fitidx[0]:idx[-1]] - time[fitidx[0]],*popt), 'r')
# import ipdb; ipdb.set_trace()
ax.axis('tight')
remove_spines(ax)
ax.set_ylabel('Voltage (mV)')
ax.set_xlabel('Time (s)')
return popt
def analyseSTEPprotocol(folder, ax = None):
files, kfiles = gatherH5files(folder)
Ke = np.loadtxt(kfiles[0]) / 1e9
V = []
I = []
spks = []
ktrials = [ os.path.basename(k).split('_')[0] for k in kfiles]
print('Analysing step protocol...')
spks = []
for f in files:
ent, info = lcg.loadH5Trace(f)
if not os.path.basename(f).split('.')[0] in ktrials:
tmpV, tmpI, meta = compensateVoltage(ent, Ke, ['AnalogInput', 'Waveform'])
V.append(tmpV)
I.append(tmpI)
spks.append(argfindspks(tmpV,-20))
time = np.linspace( 0, info['tend'], len(tmpV))
prot_time = np.cumsum(meta[::, 0])
idx = np.where((time > prot_time[0] - 0.1) & (time < prot_time[1] + .1))[0]
V = np.vstack(V).T
I = np.vstack(I).T
ax.plot(time[idx], V[idx,], color = 'gray', lw = 0.6)
ax.plot(time[idx], V[idx,0], color = 'k', lw = 1)
spks = [time[sp] for sp in spks]
isi = [np.diff(sp) for sp in spks]
adapt_coeff = [(i[-1] - i[0]) / i[0] for i in isi]
# print adapt_coeff
ax.axis('tight')
remove_spines(ax)
ax.set_ylabel('Voltage (mV)')
ax.set_xlabel('Time (s)')
return np.mean(adapt_coeff)
def analyseRAMPprotocol(folder, ax = None):
files, kfiles = gatherH5files(folder)
Ke = np.loadtxt(kfiles[0]) / 1e9
V = []
I = []
spks = []
ktrials = [ os.path.basename(k).split('_')[0] for k in kfiles]
print('Analysing ramp protocol...')
for f in files:
ent, info = lcg.loadH5Trace(f)
if not os.path.basename(f).split('.')[0] in ktrials:
tmpV, tmpI, meta = compensateVoltage(ent, Ke, ['AnalogInput', 'Waveform'])
V.append(tmpV)
I.append(tmpI)
spks.append(argfindspks(tmpV,-20))
time = np.linspace( 0, info['tend'], len(tmpV))
prot_time = np.cumsum(meta[::, 0])
idx = np.where(time > prot_time[-3])[0]
V = np.vstack(V).T
I = np.vstack(I).T
ax[0].plot(time[idx], V[idx, 0], 'k')
IFR_time = []
IFR = []
threshold_current = [I[ii[0], jj] for jj, ii in enumerate(spks)]
for sp in spks:
sp = time[sp]
IFR_time.append(sp[1:] - np.diff(sp) / 2)
IFR.append(1./ np.diff(sp))
ax[1].plot(np.hstack(IFR_time), np.hstack(IFR), 'r-o',
markerfacecolor = 'gray', markeredgecolor = 'k',
markersize = 2)
for a in ax:
a.axis('tight')
tmp = ax[0].get_ylim()
ax[1].set_xlim(ax[0].get_xlim())
ax[1].set_ylim([0, max(ax[1].get_ylim()) * 1.2])
ax[0].set_ylim(tmp)
remove_spines(ax[0])
ax[0].set_ylabel('Voltage (mV)')
ax[0].set_xlabel('Time (s)')
remove_spines(ax[1], v = 'right')
ax[1].spines['top'].set_visible(False)
ax[1].xaxis.set_visible(False)
ax[1].spines['right'].set_color('red')
ax[1].tick_params(axis='y', colors='red')
ax[1].set_ylabel('Firing Freq (Hz)', color = 'red')
return np.mean(threshold_current)
def remove_spines(ax, h = 'bottom', v = 'left'):
nv = 'right'
nh = 'top'
if h in ['top']:
nh = 'bottom'
ax.get_xaxis().tick_top()
else:
ax.get_xaxis().tick_bottom()
if v in ['right']:
nh = 'left'
ax.get_yaxis().tick_right()
else:
ax.get_yaxis().tick_left()
ax.spines[nv].set_visible(False)
ax.spines[nh].set_visible(False)
ax.spines[v].set_visible(True)
ax.spines[h].set_visible(True)
ax.yaxis.set_label_position(v)
ax.xaxis.set_label_position(h)
ax.xaxis.set_ticks_position(h)
ax.yaxis.set_ticks_position(v)
def create_figure():
fig = plt.figure(figsize = (7, 7), facecolor = 'w',
edgecolor = None)
ax = []
ax.append(fig.add_axes([.05, .6, .45, .3]))
ax.append(fig.add_axes([.5, .6, .45, .1]))
ax.append(fig.add_axes([.5, .72, .45, .2]))
ax.append(fig.add_axes([.1, .37, .8, .2]))
newax = plt.axes([.1, .37, .8, .2], axisbg='none')
ax.append(fig.add_axes(newax))
ax.append(fig.add_axes([.1, .1, .3, .2]))
ax.append(fig.add_axes([.5, .1, .4, .2]))
fig.text(0.05,0.92,'A',fontsize=10,verticalalignment='bottom',
horizontalalignment='right')
fig.text(0.45,0.92,'B',fontsize=10,verticalalignment='bottom',
horizontalalignment='right')
fig.text(0.05,0.58,'C',fontsize=10,verticalalignment='bottom',
horizontalalignment='right')
fig.text(0.05,0.3,'D',fontsize=10,verticalalignment='bottom',
horizontalalignment='right')
fig.text(0.45,0.3,'E',fontsize=10,verticalalignment='bottom',
horizontalalignment='right')
return (fig, ax)
def analyze(directory='.'):
(fig, ax) = create_figure()
threshold = analyseAPprotocol('ap/01', ax = ax[0])
analyseVIprotocol('vi/01', ax[1:3])
analyseRAMPprotocol('ramp/01', ax[3: 5])
analyseTAUprotocol('tau/01', ax[5])
analyseSTEPprotocol('steps/01', ax[6])
for a in ax[:3]:
a.axis('off')
args = {}
args['format'] = 'pdf'
print('Saving figure...')
figname = 'ecode{0}.'.format('01')
fig.savefig('{0}{1}'.format(figname, args['format']),**args)
args['format'] = 'png'
fig.savefig('{0}{1}'.format(figname, args['format']),**args)
plt.show()
| gpl-3.0 |
UNR-AERIAL/scikit-learn | sklearn/svm/tests/test_sparse.py | 32 | 12988 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
bilderbuchi/OF_repo_utilities | plot_issue_stats.py | 1 | 11205 | #!/usr/bin/env python3
"""Main script for openFrameworks Github issues visualization."""
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
import dateutil
import pickle
import github_tools
import os
import sys
from subprocess import check_output
from operator import itemgetter
# TODO:
# possibly use a database instead of pickling: https://github.com/pudo/dataset
# link plots
# Calculate average, stddev, max of time-to-fix, open time.
# other plots:
# % issues without label
# issue longest silent
# longest open issue
# most issues closed/opened per week/7day window
# % without any comment
# most bugs squashed
def naive_dt(aware_datetime):
"""
Return a naive datetime.datetime object converted from a passed aware one.
Enables comparison of aware datetimes from the local repo and the
naive ones coming from PyGithub.
"""
return aware_datetime.astimezone(dateutil.tz.tzutc()).replace(tzinfo=None)
def annot_tags_events(axis, tag_list, events, event_titles):
"""Add tag and event annotations to a given axis"""
for _t in tag_list:
axis.axvline(_t['date'], color='y', alpha=0.5)
# coordinates need to be converted to mpl internal format, see
# http://stackoverflow.com/a/11068038/599884
axis.annotate(_t['name'], xy=(mdates.date2num(_t['date']), 0.90),
xycoords=("data", "axes fraction"), ha='right', va='top')
for e in range(len(event_titles)):
axis.axvspan(events[e][0], events[e][1], color='y', alpha=0.5)
axis.annotate(event_titles[e], xy=(mdates.date2num(events[e][0]), 0.97),
xycoords=("data", "axes fraction"), ha='right', va='top')
def main():
"""Main function for plot_issue_stats"""
###########################################################################
# CONFIGURATION
target_branch = 'master'
mpl.rc('axes', grid=True, axisbelow=True)
mpl.rc('grid', linestyle='-', color='lightgray')
mpl.rc('figure', dpi=90)
# -------------------------------------------------------------------------
event_datefmt = "%Y-%m-%d"
tmp = [['2008-09-04', '2008-09-09'],
['2011-01-10', '2011-01-14'],
['2012-02-20', '2012-02-27'],
['2013-08-08', '2013-08-14']]
OFEventTitles = ['OFLab Linz', 'DevCon Pittsburgh',
'DevCon Detroit', 'DevCon Yamaguchi']
OFEvents = [[datetime.datetime.strptime(x, event_datefmt) for x in y]
for y in tmp]
pickle_dir = os.path.abspath('issue_stats_pickles')
autosave_dir = os.path.abspath('issue_stats_autosave')
###########################################################################
# Fetch needed data
print('Fetching fresh data from Github')
Repo = github_tools.get_repo()
###########################################################################
print('\nGetting issues')
print('Github shows ' + str(Repo.open_issues) + ' open issues and PRs.')
github_tools.log_traffic() # initial call to establish baseline
issues_path = os.path.join(pickle_dir, 'Issues.pickle')
if os.path.isfile(issues_path):
print('Loading issues from disk. Updating...')
with open(issues_path, 'rb') as fp:
Issues = pickle.load(fp)
last_update = max({v.updated_at for v in Issues.values()})
print('Last updated at ' + str(last_update) + ' UTC')
_issue_updates = Repo.get_issues(state='all', since=last_update)
_counter = 0
for i in _issue_updates:
_counter += 1
Issues[i.number] = i # replace updated issues in local structure
print(str(_counter) + ' issue(s) updated')
else:
print('\nFetching issues from Github')
Issues = dict() # holds the PyGithub issues, indexed by number
for i in Repo.get_issues(state='all'):
# i.update() # to fetch whole contents
# This takes one request per update(), and is very slow!
# (15+ min and 10-20kB/s)
# skipping this step takes 100 requests for 3000 issues and ca 2min
Issues[i.number] = i
print('Issues received')
print('Creating processed issue list')
issue_list = []
for i in Issues.values():
if i.closed_at:
_closed = i.closed_at
else:
_closed = None
_duration = (i.closed_at or datetime.datetime.now()) - i.created_at
issue_list.append({'number': i.number,
'state': i.state,
'created_at': i.created_at,
'closed_at': _closed,
'duration_open': _duration})
issue_list.sort(key=itemgetter('number'))
print('%s issues on record' % len(issue_list))
github_tools.log_traffic()
###########################################################################
print('\nGetting tags')
Tags = Repo.get_tags()
print('Creating tags list')
tags_list = []
for t in Tags:
tags_list.append({'date': t.commit.commit.committer.date,
'name': t.name})
github_tools.log_traffic()
###########################################################################
print('\nGetting commits')
commits_list = []
repopath = github_tools.local_repo_location()
if repopath:
print('Getting commit data from local repository...')
# check for correct branch
if check_output(['git', 'symbolic-ref', '--short', 'HEAD'],
cwd=repopath,
universal_newlines=True).rstrip() != target_branch:
print('ERROR: Please check out the branch ' +
target_branch + ' first.')
sys.exit(1)
# check if up-to-date commit is checked out
current_sha = Repo.get_branch(target_branch).commit.sha
if check_output(['git', 'rev-parse', '--verify', 'HEAD'],
cwd=repopath,
universal_newlines=True).rstrip() != current_sha:
print('ERROR: Please sync with the remote repository. ' +
'The current online commit is ' + current_sha)
sys.exit(1)
# get commit list
_out = check_output(['git', '--no-pager', 'log', target_branch,
'--pretty=format:"%h %ci %ai %p"'],
cwd=repopath,
universal_newlines=True)
_outlist = [o.strip('"') for o in _out.split(sep='\n')]
for l in _outlist:
# split into sha, committer date, author date, parents
_temp = l.split(sep=' ')
_parse = dateutil.parser.parse
# TODO: add parents to data structure
commits_list.append({'sha': _temp[0],
'committer_date': naive_dt(_parse(_temp[1])),
'author_date': naive_dt(_parse(_temp[2]))})
print('%s commits on record' % len(commits_list))
print('Done')
else:
print('No local repository specified. Getting commits from Github')
Commits = Repo.get_commits()
for c in Commits:
commits_list.append({'sha': c.sha,
'committer_date': c.commit.committer.date,
'author_date': c.commit.author.date})
print('%s commits received' % len(commits_list))
github_tools.log_traffic()
###########################################################################
print('\nProcessing objects')
# one row of dates, one row of indices, +1 for opening, -1 for closing
open_issue_count = []
for i in issue_list:
open_issue_count.append({'date': i['created_at'],
'status_change': 1})
if i['closed_at']:
open_issue_count.append({'date': i['closed_at'],
'status_change': -1})
open_issue_count.sort(key=itemgetter('date'))
_sum = 0
for i in open_issue_count:
_sum += i['status_change']
i['open_issues'] = _sum
xbegin = min([min([x['author_date'] for x in commits_list]),
min([x['date'] for x in open_issue_count])])
xend = datetime.datetime.utcnow()
print("Data range: %s days" % str((xend-xbegin).days))
bin_rrule = dateutil.rrule.rrule(dateutil.rrule.WEEKLY,
dtstart=xbegin,
byweekday=dateutil.rrule.MO)
bin_edges = mpl.dates.date2num([xbegin] +
bin_rrule.between(xbegin, xend, inc=False) +
[xend])
###########################################################################
# Pickling of data structures
with open(issues_path, 'wb') as fp:
pickle.dump(Issues, fp)
###########################################################################
print('Plotting figure')
fig = plt.figure(figsize=(380/25.4, 200/25.4))
ax = fig.add_subplot(211)
plt.title('OF issue tracker statistics - created ' + str(xend.date()))
annot_tags_events(ax, tags_list, OFEvents, OFEventTitles)
ax.plot([x['date'] for x in open_issue_count],
[x['open_issues'] for x in open_issue_count],
label='open issues', color='k', alpha=0.8)
_closed_issue_dates = [x['closed_at'] for x in issue_list
if x['state'] == 'closed']
ax.hist([mpl.dates.date2num([x['created_at'] for x in issue_list]),
mpl.dates.date2num(_closed_issue_dates)],
histtype='barstacked',
bins=bin_edges,
label=['created issues', 'closed issues'],
color=['red', 'green'],
alpha=0.8)
ax.legend(loc='center left')
locator = mpl.dates.AutoDateLocator(maxticks=15)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(mpl.dates.AutoDateFormatter(locator))
ax.xaxis.grid(False)
ax.set_xlim(left=xbegin)
ax.tick_params(axis='x', direction='out')
# -------------------------------------------------------------------------
ax2 = fig.add_subplot(212, sharex=ax)
plt.title('OF commit statistics')
annot_tags_events(ax2, tags_list, OFEvents, OFEventTitles)
ax2.hist(mpl.dates.date2num([x['author_date'] for x in commits_list]),
bins=bin_edges,
label=(target_branch + ' commits authored'),
color='blue',
alpha=0.5)
ax2.legend(loc='center left')
ax2.xaxis.set_major_locator(locator)
ax2.xaxis.set_major_formatter(mpl.dates.AutoDateFormatter(locator))
ax2.xaxis.grid(False)
ax2.set_xlim(left=xbegin)
ax2.tick_params(axis='x', direction='out')
fig.autofmt_xdate()
plt.tight_layout()
plt.show()
fig.savefig(os.path.join(autosave_dir, 'OF_repo_viz_' + str(xend.date()) +
'.png'))
print('\nFinished!')
###########################################################################
if __name__ == '__main__':
main()
| mit |
suku248/nest-simulator | pynest/examples/aeif_cond_beta_multisynapse.py | 8 | 1852 | # -*- coding: utf-8 -*-
#
# aeif_cond_beta_multisynapse.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
aeif_cond_beta_multisynapse
---------------------------
"""
import nest
import numpy as np
import matplotlib.pyplot as plt
neuron = nest.Create('aeif_cond_beta_multisynapse')
nest.SetStatus(neuron, {"V_peak": 0.0, "a": 4.0, "b": 80.5})
nest.SetStatus(neuron, {'E_rev': [0.0, 0.0, 0.0, -85.0],
'tau_decay': [50.0, 20.0, 20.0, 20.0],
'tau_rise': [10.0, 10.0, 1.0, 1.0]})
spike = nest.Create('spike_generator', params={'spike_times':
np.array([10.0])})
voltmeter = nest.Create('voltmeter')
delays = [1.0, 300.0, 500.0, 700.0]
w = [1.0, 1.0, 1.0, 1.0]
for syn in range(4):
nest.Connect(spike, neuron, syn_spec={'synapse_model': 'static_synapse',
'receptor_type': 1 + syn,
'weight': w[syn],
'delay': delays[syn]})
nest.Connect(voltmeter, neuron)
nest.Simulate(1000.0)
Vms = voltmeter.get("events", "V_m")
ts = voltmeter.get("events", "times")
plt.plot(ts, Vms)
plt.show()
| gpl-2.0 |
jungla/ICOM-fluidity-toolbox | Detectors/offline_advection/advect_particles_convection.py | 1 | 4065 | import os, sys
import myfun
import numpy as np
import lagrangian_stats
import scipy.interpolate as interpolate
import csv
import matplotlib.pyplot as plt
import advect_functions
import fio
from intergrid import Intergrid
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
#label = 'm_25_2_512'
label = 'm_25_1_particles'
dayi = 481 #10*24*1
dayf = 3400 #10*24*4
days = 1
#label = sys.argv[1]
#basename = sys.argv[2]
#dayi = int(sys.argv[3])
#dayf = int(sys.argv[4])
#days = int(sys.argv[5])
path = '../../2D/U/Velocity_CG/'
time = range(dayi,dayf,days)
# dimensions archives
# ML exp
#Xlist = np.linspace(0,10000,801)
#Ylist = np.linspace(0,4000,321)
Xlist = np.linspace(0,2000,161)
Ylist = np.linspace(0,2000,161)
dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]
Zlist = 1.*np.cumsum(dl)
maps = [Xlist,Ylist,Zlist]
lo = np.array([ 0, 0, 0])
hi = np.array([ 2000, 2000, 50]) # highest lat, highest lon
#lo = np.array([ 0, 0, 0])
#hi = np.array([ 10000, 4000, 50]) # highest lat, highest lon
[X,Y,Z] = myfun.meshgrid2(Xlist,Ylist,Zlist)
Y = np.reshape(Y,(np.size(Y),))
X = np.reshape(X,(np.size(X),))
Z = np.reshape(Z,(np.size(Z),))
xn = len(Xlist)
yn = len(Ylist)
zn = len(Zlist)
dx = np.gradient(Xlist)
dy = np.gradient(Ylist)
dz = np.gradient(Zlist)
dt = 360
#dt = 1200
time = np.asarray(range(dayi,dayf,days))
print time[0]
# initial particles position
x0 = range(500,1510,10)
y0 = range(500,1510,10)
z0 = [5,10,15]
#x0 = range(3000,4010,10)
#y0 = range(2000,3010,10)
#z0 = range(1,20,4)
xp = len(x0)
yp = len(y0)
zp = len(z0)
pt = xp*yp*zp
[z0,y0,x0] = myfun.meshgrid2(z0,y0,x0)
x0 = np.reshape(x0, (np.size(x0)))
y0 = np.reshape(y0, (np.size(y0)))
z0 = np.reshape(z0, (np.size(z0)))
#levels = np.zeros(x0.shape) + 1.
#levels[np.where(z0 != 2)] = np.nan
#x0 = lo[0] + np.random.uniform( size=(pt) ) * (hi[0] - lo[0])
#y0 = lo[1] + np.random.uniform( size=(pt) ) * (hi[1] - lo[1])
#z0 = lo[2] + np.random.uniform( size=(pt) ) * (hi[2] - lo[2])
#z0 = z0*0-1.
x = np.zeros((pt))
y = np.zeros((pt))
z = np.zeros((pt))
## ADVECT PARTICLES
kick = 5.
filename = './traj_'+label+'_'+str(dayi)+'_'+str(dayf)+'_3D.csv'
#filename = './traj_'+label+'_'+str(dayi)+'_'+str(dayf)+'_2D.csv'
print filename
fd = open(filename,'wb')
for p in range(pt):
fd.write(str(x0[p])+', '+str(y0[p])+', '+str(-1.*z0[p])+', '+str(time[0])+'\n')
import random
for t in range(len(time)-1):
print 'time:', time[t]
file0 = path+'Velocity_CG_0_'+label+'_'+str(time[t])+'.csv'
file1 = path+'Velocity_CG_1_'+label+'_'+str(time[t])+'.csv'
file2 = path+'Velocity_CG_2_'+label+'_'+str(time[t])+'.csv'
Ut0 = fio.read_Scalar(file0,xn,yn,zn)
Vt0 = fio.read_Scalar(file1,xn,yn,zn)
Wt0 = -1.*fio.read_Scalar(file2,xn,yn,zn) #0*Ut0
file0 = path+'Velocity_CG_0_'+label+'_'+str(time[t+1])+'.csv'
file1 = path+'Velocity_CG_1_'+label+'_'+str(time[t+1])+'.csv'
file2 = path+'Velocity_CG_2_'+label+'_'+str(time[t+1])+'.csv'
Ut1 = fio.read_Scalar(file0,xn,yn,zn)
Vt1 = fio.read_Scalar(file1,xn,yn,zn)
Wt1 = -1.*fio.read_Scalar(file2,xn,yn,zn) #0*Ut0
# subcycling
nt = 20
ds = 1.*dt / nt
# for st in range(nt+1):
# print st
# Us0 = (Ut1*st + Ut0*(nt-st))/(nt)
# Us1 = (Ut1*(st+1) + Ut0*(nt-st-1))/(nt)
# Vs0 = (Vt1*st + Vt0*(nt-st))/(nt)
# Vs1 = (Vt1*(st+1) + Vt0*(nt-st-1))/(nt)
# Ws0 = (Wt1*st + Wt0*(nt-st))/(nt)
# Ws1 = (Wt1*(st+1) + Wt0*(nt-st-1))/(nt)
# x0,y0,z0 = advect_functions.RK4(x0,y0,z0,Us0,Vs0,Ws0,Us1,Vs1,Ws1,lo,hi,maps,ds)
x0,y0,z0 = advect_functions.RK4(x0,y0,z0,Ut0,Vt0,Wt0,Ut1,Vt1,Wt1,lo,hi,maps,dt)
#x0,y0,z0 = advect_functions.EULER(x0,y0,z0,Ut0,Vt0,Wt0,lo,hi,maps,dt)
# random.seed()
# random kick
# for i in range(len(x0)):
# x0[i] = x0[i] + random.uniform(-kick,kick)
# y0[i] = y0[i] + random.uniform(-kick,kick)
x0,y0,z0 = advect_functions.pBC(x0,y0,z0,lo,hi)
# x1,y1,z1 = x0,y0,z0
# write
for p in range(pt):
fd.write(str(x0[p])+', '+str(y0[p])+', '+str(-1.*z0[p])+', '+str(time[t+1])+'\n')
fd.close()
| gpl-2.0 |
fzalkow/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/axes_grid/demo_parasite_axes2.py | 16 | 1208 | from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
if 1:
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
offset = 60
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all=True)
host.set_xlim(0, 2)
host.set_ylim(0, 2)
host.set_xlabel("Distance")
host.set_ylabel("Density")
par1.set_ylabel("Temperature")
par2.set_ylabel("Velocity")
p1, = host.plot([0, 1, 2], [0, 1, 2], label="Density")
p2, = par1.plot([0, 1, 2], [0, 3, 2], label="Temperature")
p3, = par2.plot([0, 1, 2], [50, 30, 15], label="Velocity")
par1.set_ylim(0, 4)
par2.set_ylim(1, 65)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
par2.axis["right"].label.set_color(p3.get_color())
plt.draw()
plt.show()
#plt.savefig("Test")
| mit |
developEdwin/Proyecto-Browniano | rayleigh.py | 1 | 1026 | # -*- coding: utf-8 -*-
"""
Created on Wed May 11 17:48:38 2016
@author: Edwin
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def rayleigh(x0, n, dt, delta):
x0 = np.asfarray(x0)
shp = (n+1,) + x0.shape
r = np.random.normal(size=shp, scale=delta*np.sqrt(dt))
r[0] = 0.0
x = r.cumsum(axis=0)
x += x0
return x
xinicial = np.zeros(2) # número de partículas
n = 1000
dt = 10.0
delta = 0.25
xini = np.array(rayleigh(xinicial, n, dt, delta))
yini = np.array(rayleigh(xinicial, n, dt, 0.25))
zini = np.array(rayleigh(xinicial, n, dt, 0.2))
# *--- Plot en 3D ---*
# Crear mapa de colores
number = 2 # número de color por partícula
cmap = plt.get_cmap('gnuplot')
colors = [cmap(i) for i in np.linspace(0, 1, number)]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i, color in enumerate(colors, start=1):
xini = np.array(rayleigh(xinicial, n, dt, i))
ax.scatter(xini, yini, zini, color=color)
plt.show()
| gpl-3.0 |
rjferrier/fluidity | tests/wetting_and_drying_balzano2_cg/plotfs_detec.py | 5 | 5473 | #!/usr/bin/env python
import vtktools
import sys
import math
import re
import commands
import matplotlib.pyplot as plt
import getopt
from scipy.special import erf
from numpy import poly1d
from matplotlib.pyplot import figure, show
from numpy import pi, sin, linspace
from matplotlib.mlab import stineman_interp
from numpy import exp, cos
from fluidity_tools import stat_parser
def mirror(x):
return 13800-x
def usage():
print 'Usage:'
print 'plotfs_detec.py [-w] --file=detector_filename --save=filename'
print '--save=... saves the plots as images instead of plotting them on the screen.'
print '-w plots the wetting procedure (drying is default).'
# should be copied from the diamond extrude function. X is 2 dimensional
def bathymetry_function(X):
if X<=3600 or X>6000:
return -X/2760
elif X>3600 and X<=4800:
return -30.0/23
elif X>4800 and X<=6000:
return -X/1380+50.0/23
################# Main ###########################
def main(argv=None):
filename=''
timestep_ana=0.0
dzero=0.01
save='' # If nonempty, we save the plots as images instead if showing them
wetting=False
try:
opts, args = getopt.getopt(sys.argv[1:], ":w", ['file=','save='])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '--file':
filename=arg
elif opt == '--save':
save=arg
elif opt == '-w':
wetting=True
if filename=='':
print 'No filename specified. You have to give the detectors filename.'
usage()
sys.exit(2)
####################### Print time plot ###########################
print 'Generating time plot'
s = stat_parser(filename)
timesteps=s["ElapsedTime"]["value"]
timestep=timesteps[1]-timesteps[0]
print "Found ", len(timesteps), " timesteps with dt=", timestep
if timestep_ana==0.0:
timestep_ana=timestep
fs=s["water"]["FreeSurface"]
print "Found ", len(fs), " detectors. We assume they are equidistant distributed over the domain (", 0, "-", 13800, ")."
# Get and plot results
plt.ion() # swith on interactive mode
fig2 = figure()
ax2 = fig2.add_subplot(111)
if wetting:
##plot_start=90 # in timesteps
plot_start=22 # in timesteps, after 18 timesteps the waterlevel reaches its lowest point
##plot_end=114 # in timesteps
plot_end=54 # in timesteps
plot_name='Wetting'
else:
plot_start=54 # in timesteps
plot_end=90 # in timesteps
plot_name='Drying'
for t in range(0,len(timesteps)):
# ignore the first waveperiod
if t<plot_start:
continue
if t>plot_end:
continue
fsvalues=[]
xcoords=[]
for name, item in fs.iteritems():
#print name
xcoords.append(mirror(s[name]['position'][0][0]))
#print xcoord
fsvalues.append(fs[name][t])
# Plot result of one timestep
ax2.plot(xcoords,fsvalues,'r,', label='Numerical solution')
# Plot Analytical solution
fsvalues_ana=[]
offset=-bathymetry_function(0.0)+dzero
xcoords.sort()
for x in xcoords:
fsvalues_ana.append(bathymetry_function(mirror(x))-offset)
# Plot vertical line in bathmetry on right boundary
xcoords.append(xcoords[len(xcoords)-1]+0.000000001)
fsvalues_ana.append(2.1)
ax2.plot(xcoords, fsvalues_ana, 'k', label='Bathymetry')
#plt.legend()
if t==plot_end:
plt.ylim(-2.2,1.4)
# change from meters in kilometers in the x-axis
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = plt.xticks()
for i in range(0,len(locs)):
labels[i]=str(locs[i]/1000)
plt.xticks(locs, labels)
#plt.title(plot_name)
plt.xlabel('Position [km]')
plt.ylabel('Free surface [m]')
if save=='':
plt.draw()
raw_input("Please press Enter")
else:
plt.savefig(save+'_'+plot_name+'.pdf', facecolor='white', edgecolor='black', dpi=100)
plt.cla()
t=t+1
# Make video from the images:
# mencoder "mf://*.png" -mf type=png:fps=30 -ovc lavc -o output.avi
if __name__ == "__main__":
main()
| lgpl-2.1 |
exe0cdc/PyscesToolbox | psctb/analyse/_ratechar.py | 1 | 41049 | from os import path
from colorsys import hsv_to_rgb, rgb_to_hsv
from collections import OrderedDict
from random import shuffle
import numpy
from pysces.PyscesModelMap import ModelMap
from pysces import Scanner
import pysces
from matplotlib.pyplot import get_cmap
from .. import modeltools
from ..latextools import LatexExpr
from ..utils.plotting import ScanFig, LineData, Data2D
from ..utils.misc import silence_print
from ..utils.misc import DotDict
from ..utils.misc import formatter_factory
exportLAWH = silence_print(pysces.write.exportLabelledArrayWithHeader)
__all__ = ['RateChar']
def strip_nan_from_scan(array_like):
# this function assumes that column
# zero contains valid data (the scan input)
t_f = list(numpy.isnan(array_like[:, 1]))
start = t_f.index(False)
end = len(t_f) - t_f[::-1].index(False)
return array_like[start:end, :]
class RateChar(object):
def __init__(self, mod, min_concrange_factor=100,
max_concrange_factor=100,
scan_points=256,
auto_load=False):
super(RateChar, self).__init__()
self.mod = mod
self.mod.SetQuiet()
self._model_map = ModelMap(mod)
self.mod.doState()
self._analysis_method = 'ratechar'
self._working_dir = modeltools.make_path(self.mod,
self._analysis_method)
self._min_concrange_factor = min_concrange_factor
self._max_concrange_factor = max_concrange_factor
self._scan_points = scan_points
self._ltxe = LatexExpr(self.mod)
for species in self.mod.species:
setattr(self, species, None)
if auto_load:
self.load_session()
def do_ratechar(self, fixed='all',
scan_min=None,
scan_max=None,
min_concrange_factor=None,
max_concrange_factor=None,
scan_points=None,
solver=0,
auto_save=False):
# this function wraps _do_scan functionality in a user friendly bubble
if fixed == 'all':
to_scan = self.mod.species
elif type(fixed) is list or type(fixed) is tuple:
for each in fixed:
assert each in self.mod.species, 'Invalid species'
to_scan = fixed
else:
assert fixed in self.mod.species, 'Invalid species'
to_scan = [fixed]
for each in to_scan:
fixed_mod, fixed_ss = self._fix_at_ss(each)
scan_start = self._min_max_chooser(fixed_ss,
scan_min,
min_concrange_factor,
'min')
scan_end = self._min_max_chooser(fixed_ss,
scan_max,
max_concrange_factor,
'max')
# here there could be a situation where a scan_min > scan_max
# I wonder what will happen....
if not scan_points:
scan_points = self._scan_points
column_names, results = self._do_scan(fixed_mod,
each,
scan_start,
scan_end,
scan_points)
cleaned_results = strip_nan_from_scan(results)
rcd = RateCharData(fixed_ss,
fixed_mod,
self.mod,
column_names,
cleaned_results,
self._model_map,
self._ltxe)
setattr(self, each, rcd)
if auto_save:
self.save_session()
def _min_max_chooser(self, ss, point, concrange, min_max):
# chooses a minimum or maximum point based
# on the information given by a user
# ie if a specific min/max point is given - use that
# if only concentration range is given -use that
# if nothing is given - use the defualt conc_range_factor
# pretty simple stuff
if point:
the_point = point
if not point and concrange:
if min_max == 'min':
the_point = ss / concrange
elif min_max == 'max':
the_point = ss * concrange
if not point and not concrange:
if min_max == 'min':
the_point = ss / self._min_concrange_factor
elif min_max == 'max':
the_point = ss * self._max_concrange_factor
return the_point
@silence_print
def _do_scan(self,
fixed_mod,
fixed,
scan_min,
scan_max,
scan_points,
solver=0):
# do scan is a simplified interface to pysces.Scanner
# more intuitive than Scan1 (functional vs OO??)
# returns the names of the scanned blocks together with
# the results of the scan
assert solver in (0, 1, 2), 'Solver mode can only be one of 0, 1 or 2'
fixed_mod.mode_solver = solver
demand_blocks = [
'J_' + r for r in getattr(self._model_map, fixed).isSubstrateOf()]
supply_blocks = [
'J_' + r for r in getattr(self._model_map, fixed).isProductOf()]
user_output = [fixed] + demand_blocks + supply_blocks
scanner = Scanner(fixed_mod)
scanner.quietRun = True
scanner.addScanParameter(
fixed, scan_min, scan_max, scan_points, log=True)
scanner.addUserOutput(*user_output)
scanner.Run()
return user_output, scanner.UserOutputResults
@silence_print
def _fix_at_ss(self, fixed):
# fixes the metabolite at the steady_state
# (calls psctb.modeltools.fix_metabolite)
# and returns both the ss value and the fixed model
self.mod.doState()
fixed_ss = getattr(self.mod, fixed + '_ss')
fixed_mod = modeltools.fix_metabolite(self.mod, fixed)
fixed_mod.SetQuiet()
# i don't like this approach at all, too many possible unintended side
# effects
# setattr(fixed_mod, fixed, fixed_ss)
# setattr(fixed_mod, 'fixed', fixed)
# setattr(fixed_mod, 'fixed_ss', fixed_ss)
fixed_mod.doState()
return fixed_mod, fixed_ss
def save_session(self, file_name=None):
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename='save_data',
fmt='npz',
file_name=file_name,
write_suffix=False)
to_save = {}
for species in self.mod.species:
species_object = getattr(self, species)
try:
column_array = numpy.array(species_object._column_names)
scan_results = species_object._scan_results
to_save['col_{0}'.format(species)] = column_array
to_save['res_{0}'.format(species)] = scan_results
except:
pass
numpy.savez(file_name, **to_save)
def save_results(self, folder=None, separator=',',format='%f'):
base_folder = folder
for species in self.mod.species:
if folder:
folder = path.join(base_folder, species)
getattr(self, species).save_all_results(folder=folder,
separator=separator)
def load_session(self, file_name=None):
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename='save_data',
fmt='npz',
file_name=file_name,
write_suffix=False)
loaded_data = {}
try:
with numpy.load(file_name) as data_file:
for k, v in data_file.iteritems():
loaded_data[k] = v
except IOError as e:
raise e
for species in self.mod.species:
try:
column_names = [str(each) for each in
list(loaded_data['col_{0}'.format(species)])]
scan_results = loaded_data['res_{0}'.format(species)]
fixed_species = species
fixed_mod, fixed_ss = self._fix_at_ss(fixed_species)
rcd = RateCharData(fixed_ss=fixed_ss,
fixed_mod=fixed_mod,
basemod=self.mod, column_names=column_names,
scan_results=scan_results,
model_map=self._model_map, ltxe=self._ltxe)
setattr(self, fixed_species, rcd)
except:
pass
class RateCharData(object):
def __init__(self,
fixed_ss,
fixed_mod,
basemod,
column_names,
scan_results,
model_map,
ltxe):
super(RateCharData, self).__init__()
self.mod = fixed_mod
self.scan_results = DotDict()
self.mca_results = DotDict()
self._slope_range_factor = 3.0
self.scan_results['fixed'] = column_names[0]
self.scan_results['fixed_ss'] = fixed_ss
self.scan_results['scan_range'] = scan_results[:, 0]
self.scan_results['flux_names'] = column_names[1:]
self.scan_results['flux_data'] = scan_results[:, 1:]
self.scan_results['scan_points'] = len(self.scan_results.scan_range)
self.scan_results['flux_max'] = None
self.scan_results['flux_min'] = None
self.scan_results['scan_max'] = None
self.scan_results['scan_min'] = None
self.scan_results['ec_names'] = None
self.scan_results['ec_data'] = None
self.scan_results['rc_names'] = None
self.scan_results['rc_data'] = None
self.scan_results['prc_names'] = None
self.scan_results['prc_data'] = None
self._column_names = column_names
self._scan_results = scan_results
self._model_map = model_map
self._analysis_method = 'ratechar'
self._basemod = basemod
self._working_dir = modeltools.make_path(self._basemod,
self._analysis_method,
[self.scan_results.fixed])
self._ltxe = ltxe
self._color_dict_ = None
self._data_setup()
self.mca_results._ltxe = ltxe
self.mca_results._make_repr(
'"$" + self._ltxe.expression_to_latex(k) + "$"', 'v',
formatter_factory())
# del self.scan_results
# del self.mca_results
def _data_setup(self):
# reset value to do mcarc
setattr(self.mod, self.scan_results.fixed, self.scan_results.fixed_ss)
self.mod.doMcaRC()
self._make_attach_total_fluxes()
self._min_max_setup()
self._attach_fluxes_to_self()
self._make_all_coefficient_lines()
self._attach_all_coefficients_to_self()
self._make_all_summary()
self._make_all_line_data()
def _change_colour_order(self, order=None):
if not order:
order = self._color_dict_.keys()
shuffle(order)
self._color_dict_ = dict(zip(order, self._color_dict_.values()))
self._make_all_line_data()
def _make_all_line_data(self):
self._make_flux_ld()
self._make_ec_ld()
self._make_rc_ld()
self._make_prc_ld()
self._make_total_flux_ld()
self._line_data_dict = OrderedDict()
self._line_data_dict.update(self._prc_ld_dict)
self._line_data_dict.update(self._flux_ld_dict)
self._line_data_dict.update(self._total_flux_ld_dict)
self._line_data_dict.update(self._ec_ld_dict)
self._line_data_dict.update(self._rc_ld_dict)
del self._flux_ld_dict
del self._ec_ld_dict
del self._rc_ld_dict
del self._prc_ld_dict
del self._total_flux_ld_dict
def _make_all_summary(self):
self._make_ec_summary()
self._make_cc_summary()
self._make_rc_summary()
self._make_prc_summary()
self.mca_results.update(self._ec_summary)
self.mca_results.update(self._cc_summary)
self.mca_results.update(self._rc_summary)
self.mca_results.update(self._prc_summary)
del self._ec_summary
del self._cc_summary
del self._rc_summary
del self._prc_summary
def _make_ec_summary(self):
ecs = {}
reagent_of = [each[2:] for each in self.scan_results.flux_names]
modifier_of = getattr(
self._model_map, self.scan_results.fixed).isModifierOf()
all_reactions = reagent_of + modifier_of
for reaction in all_reactions:
name = 'ec%s_%s' % (reaction, self.scan_results.fixed)
val = getattr(self.mod, name)
ecs[name] = val
self._ec_summary = ecs
def _make_rc_summary(self):
rcs = {}
for flux in self.scan_results.flux_names:
reaction = flux[2:]
name = '%s_%s' % (reaction, self.scan_results.fixed)
val = getattr(self.mod.rc, name)
name = 'rcJ' + name
rcs[name] = val
self._rc_summary = rcs
def _make_cc_summary(self):
ccs = {}
reagent_of = [each[2:] for each in self.scan_results.flux_names]
modifier_of = getattr(
self._model_map, self.scan_results.fixed).isModifierOf()
all_reactions = reagent_of + modifier_of
for flux_reaction in reagent_of:
for reaction in all_reactions:
name = 'ccJ%s_%s' % (flux_reaction, reaction)
val = getattr(self.mod, name)
ccs[name] = val
self._cc_summary = ccs
def _make_prc_summary(self):
prcs = {}
reagent_of = [each[2:] for each in self.scan_results.flux_names]
modifier_of = getattr(
self._model_map, self.scan_results.fixed).isModifierOf()
all_reactions = reagent_of + modifier_of
for flux_reaction in reagent_of:
for route_reaction in all_reactions:
ec = getattr(self.mod,
'ec%s_%s' % (
route_reaction, self.scan_results.fixed))
cc = getattr(self.mod,
'ccJ%s_%s' % (flux_reaction, route_reaction))
val = ec * cc
name = 'prcJ%s_%s_%s' % (flux_reaction,
self.scan_results.fixed,
route_reaction)
prcs[name] = val
self._prc_summary = prcs
def save_summary(self, file_name=None, separator=',',fmt='%f'):
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename='mca_summary',
fmt='csv',
file_name=file_name, )
keys = self.mca_results.keys()
keys.sort()
values = numpy.array([self.mca_results[k]
for k in keys]).reshape(len(keys), 1)
try:
exportLAWH(values,
names=keys,
header=['Value'],
fname=file_name,
sep=separator,
format=fmt)
except IOError as e:
print e.strerror
def save_flux_results(self, file_name=None, separator=',',fmt='%f'):
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename='flux_results',
fmt='csv',
file_name=file_name, )
scan_points = self.scan_results.scan_points
all_cols = numpy.hstack([
self._scan_results,
self.scan_results.total_supply.reshape(scan_points, 1),
self.scan_results.total_demand.reshape(scan_points, 1)])
column_names = self._column_names + ['Total Supply', 'Total Demand']
try:
exportLAWH(all_cols,
names=None,
header=column_names,
fname=file_name,
sep=separator,
format=fmt)
except IOError as e:
print e.strerror
def save_coefficient_results(self,
coefficient,
file_name=None,
separator=',',
folder=None,
fmt='%f'):
assert_message = 'coefficient must be one of "ec", "rc" or "prc"'
assert coefficient in ['rc', 'ec', 'prc'], assert_message
base_name = coefficient + '_results'
file_name = modeltools.get_file_path(working_dir=self._working_dir,
internal_filename=base_name,
fmt='csv',
file_name=file_name, )
results = getattr(self.scan_results, coefficient + '_data')
names = getattr(self.scan_results, coefficient + '_names')
new_names = []
for each in names:
new_names.append('x_vals')
new_names.append(each)
try:
exportLAWH(results,
names=None,
header=new_names,
fname=file_name,
sep=separator,
format=fmt)
except IOError as e:
print e.strerror
# TODO fix this method so that folder is a parameter only her
def save_all_results(self, folder=None, separator=',',fmt='%f'):
if not folder:
folder = self._working_dir
file_name = modeltools.get_file_path(working_dir=folder,
internal_filename='flux_results',
fmt='csv')
self.save_flux_results(separator=separator, file_name=file_name,fmt=fmt)
file_name = modeltools.get_file_path(working_dir=folder,
internal_filename='mca_summary',
fmt='csv')
self.save_summary(separator=separator, file_name=file_name, fmt=fmt)
for each in ['ec', 'rc', 'prc']:
base_name = each + '_results'
file_name = modeltools.get_file_path(working_dir=folder,
internal_filename=base_name,
fmt='csv')
self.save_coefficient_results(coefficient=each,
separator=separator,
file_name=file_name,
fmt=fmt)
def _min_max_setup(self):
# Negative minimum linear values mean nothing
# because they don't translate to a log space
# therefore we want the minimum non-negative/non-zero values.
# lets make sure there are no zeros
n_z_f = self.scan_results.flux_data[
numpy.nonzero(self.scan_results.flux_data)]
n_z_s = self.scan_results.scan_range[
numpy.nonzero(self.scan_results.scan_range)]
totals = numpy.vstack([self.scan_results.total_demand,
self.scan_results.total_supply])
n_z_t = totals[numpy.nonzero(totals)]
# and that the array is not now somehow empty
# although if this happens-you have bigger problems
if len(n_z_f) == 0:
n_z_f = numpy.array([0.01, 1])
if len(n_z_s) == 0:
n_z_s = numpy.array([0.01, 1])
# lets also (clumsily) find the non-negative mins and maxes
# by converting to logspace (to get NaNs) and back
# and then getting the min/max non-NaN
# PS flux max is the max of the totals
with numpy.errstate(all='ignore'):
self.scan_results.flux_max = numpy.nanmax(10 ** numpy.log10(n_z_t))
self.scan_results.flux_min = numpy.nanmin(10 ** numpy.log10(n_z_f))
self.scan_results.scan_max = numpy.nanmax(10 ** numpy.log10(n_z_s))
self.scan_results.scan_min = numpy.nanmin(10 ** numpy.log10(n_z_s))
def _attach_fluxes_to_self(self):
for i, each in enumerate(self.scan_results.flux_names):
# setattr(self, each, self.scan_results.flux_data[:, i])
self.scan_results[each] = self.scan_results.flux_data[:, i]
def _attach_all_coefficients_to_self(self):
setup_for = ['ec', 'rc', 'prc']
for each in setup_for:
eval('self._attach_coefficients_to_self(self.scan_results.' + each + '_names,\
self.scan_results.' + each + '_data)')
def _make_all_coefficient_lines(self):
setup_for = ['ec', 'rc', 'prc']
for each in setup_for:
eval('self._make_' + each + '_lines()')
def _make_attach_total_fluxes(self):
demand_blocks = getattr(
self._model_map, self.scan_results.fixed).isSubstrateOf()
supply_blocks = getattr(
self._model_map, self.scan_results.fixed).isProductOf()
dem_pos = [self.scan_results.flux_names.index('J_' + flux)
for flux in demand_blocks]
sup_pos = [self.scan_results.flux_names.index('J_' + flux)
for flux in supply_blocks]
self.scan_results['total_demand'] = numpy.sum(
[self.scan_results.flux_data[:, i]
for i in dem_pos],
axis=0)
self.scan_results['total_supply'] = numpy.sum(
[self.scan_results.flux_data[:, i]
for i in sup_pos],
axis=0)
def _make_rc_lines(self):
names = []
resps = []
for each in self.scan_results.flux_names:
reaction = each[2:]
name = reaction + '_' + self.scan_results.fixed
J_ss = getattr(self.mod, each)
slope = getattr(self.mod.rc, name)
resp = self._tangent_line(J_ss, slope)
name = 'rcJ' + name
names.append(name)
resps.append(resp)
resps = numpy.hstack(resps)
self.scan_results.rc_names = names
self.scan_results.rc_data = resps
def _make_prc_lines(self):
names = []
prcs = []
reagent_of = [each[2:] for each in self.scan_results.flux_names]
all_reactions = reagent_of + \
getattr(self._model_map,
self.scan_results.fixed).isModifierOf()
for flux_reaction in self.scan_results.flux_names:
J_ss = getattr(self.mod, flux_reaction)
reaction = flux_reaction[2:]
for route_reaction in all_reactions:
ec = getattr(
self.mod,
'ec' + route_reaction + '_' + self.scan_results.fixed)
cc = getattr(self.mod, 'ccJ' + reaction + '_' + route_reaction)
slope = ec * cc
prc = self._tangent_line(J_ss, slope)
name = 'prcJ%s_%s_%s' % (reaction,
self.scan_results.fixed,
route_reaction)
names.append(name)
prcs.append(prc)
prcs = numpy.hstack(prcs)
self.scan_results.prc_names = names
self.scan_results.prc_data = prcs
def _make_ec_lines(self):
names = []
elasts = []
for each in self.scan_results.flux_names:
reaction = each[2:]
name = 'ec' + reaction + '_' + self.scan_results.fixed
J_ss = getattr(self.mod, each)
slope = getattr(self.mod, name)
elast = self._tangent_line(J_ss, slope)
names.append(name)
elasts.append(elast)
elasts = numpy.hstack(elasts)
self.scan_results.ec_names = names
self.scan_results.ec_data = elasts
def _attach_coefficients_to_self(self, names, tangent_lines):
sp = 0
ep = 2
for name in names:
# setattr(self, name, tangent_lines[:, sp:ep])
self.scan_results[name] = tangent_lines[:, sp:ep]
sp = ep
ep += 2
def _tangent_line(self, J_ss, slope):
fix_ss = self.scan_results.fixed_ss
constant = J_ss / (fix_ss ** slope)
ydist = numpy.log10(self.scan_results.flux_max / self.scan_results.flux_min)
xdist = numpy.log10(self.scan_results.scan_max / self.scan_results.scan_min)
golden_ratio = (1 + numpy.sqrt(5)) / 2
xyscale = xdist / (ydist * golden_ratio * 1.5)
scale_factor = numpy.cos(numpy.arctan(slope * xyscale))
distance = numpy.log10(self._slope_range_factor) * scale_factor
range_min = fix_ss / (10 ** distance)
range_max = fix_ss * (10 ** distance)
scan_range = numpy.linspace(range_min, range_max, num=2)
rate = constant * scan_range ** (slope)
return numpy.vstack((scan_range, rate)).transpose()
@property
def _color_dict(self):
if not self._color_dict_:
fix_map = getattr(self._model_map, self.scan_results.fixed)
relavent_reactions = fix_map.isProductOf() + \
fix_map.isSubstrateOf() + \
fix_map.isModifierOf()
num_of_cols = len(relavent_reactions) + 3
cmap = get_cmap('Set2')(
numpy.linspace(0, 1.0, num_of_cols))[:, :3]
color_list = [rgb_to_hsv(*cmap[i, :]) for i in range(num_of_cols)]
relavent_reactions.sort()
color_dict = dict(
zip(['Total Supply'] +
['J_' + reaction for reaction in relavent_reactions] +
['Total Demand'],
color_list))
# just to darken the colors a bit
for k, v in color_dict.iteritems():
color_dict[k] = [v[0], 1, v[2]]
self._color_dict_ = color_dict
return self._color_dict_
def _make_flux_ld(self):
color_dict = self._color_dict
flux_ld_dict = {}
demand_blocks = ['J_' + dem_reac for dem_reac in getattr(
self._model_map, self.scan_results.fixed).isSubstrateOf()]
supply_blocks = ['J_' + sup_reac for sup_reac in getattr(
self._model_map, self.scan_results.fixed).isProductOf()]
for flux in self.scan_results.flux_names:
flux_col = self.scan_results.flux_names.index(flux)
x_data = self.scan_results.scan_range
y_data = self.scan_results.flux_data[:, flux_col]
latex_expr = self._ltxe.expression_to_latex(flux)
flux_color = self._color_dict[flux]
color = hsv_to_rgb(flux_color[0],
flux_color[1],
flux_color[2] * 0.9)
for dem in demand_blocks:
if dem == flux:
flux_ld_dict[flux] = \
LineData(name=flux,
x_data=x_data,
y_data=y_data,
categories=['Fluxes',
'Demand',
flux],
properties={'label': '$%s$' % latex_expr,
'color': color})
break
for sup in supply_blocks:
if sup == flux:
flux_ld_dict[flux] = \
LineData(name=flux,
x_data=x_data,
y_data=y_data,
categories=['Fluxes',
'Supply',
flux],
properties={'label': '$%s$' % latex_expr,
'color': color})
break
self._flux_ld_dict = flux_ld_dict
def _make_ec_ld(self):
ec_ld_dict = {}
for ec_name in self.scan_results.ec_names:
for flux, flux_ld in self._flux_ld_dict.iteritems():
ec_reaction = flux[2:]
if 'ec' + ec_reaction + '_' + self.scan_results.fixed in ec_name:
flux_color = self._color_dict[flux]
color = hsv_to_rgb(flux_color[0],
flux_color[1] * 0.5,
flux_color[2])
ec_data = self.scan_results[ec_name]
categories = ['Elasticity Coefficients'] + \
flux_ld.categories[1:]
latex_expr = self._ltxe.expression_to_latex(ec_name)
ec_ld_dict[ec_name] = \
LineData(name=ec_name,
x_data=ec_data[:, 0],
y_data=ec_data[:, 1],
categories=categories,
properties={'label': '$%s$' % latex_expr,
'color': color})
self._ec_ld_dict = ec_ld_dict
def _make_rc_ld(self):
rc_ld_dict = {}
for rc_name in self.scan_results.rc_names:
for flux, flux_ld in self._flux_ld_dict.iteritems():
rc_flux = 'J' + flux[2:]
if 'rc' + rc_flux + '_' in rc_name:
flux_color = self._color_dict[flux]
color = hsv_to_rgb(flux_color[0],
flux_color[1],
flux_color[2] * 0.7)
rc_data = self.scan_results[rc_name]
categories = ['Response Coefficients'] + \
flux_ld.categories[1:]
latex_expr = self._ltxe.expression_to_latex(rc_name)
rc_ld_dict[rc_name] = \
LineData(name=rc_name,
x_data=rc_data[:, 0],
y_data=rc_data[:, 1],
categories=categories,
properties={'label': '$%s$' % latex_expr,
'color': color,
'ls': '--'})
self._rc_ld_dict = rc_ld_dict
def _make_prc_ld(self):
def get_prc_route(prc, flux, fixed):
without_prefix = prc.split('prc')[1]
without_flux = without_prefix.split(flux)[1][1:]
route = without_flux.split(fixed)[1][1:]
return route
prc_ld_dict = {}
for prc_name in self.scan_results.prc_names:
for flux, flux_ld in self._flux_ld_dict.iteritems():
prc_flux = 'J' + flux[2:]
if 'prc' + prc_flux + '_' + self.scan_results.fixed in prc_name:
route_reaction = get_prc_route(prc_name,
prc_flux,
self.scan_results.fixed)
flux_color = self._color_dict['J_' + route_reaction]
color = hsv_to_rgb(flux_color[0],
flux_color[1] * 0.5,
flux_color[2])
prc_data = self.scan_results[prc_name]
categories = ['Partial Response Coefficients'] + \
flux_ld.categories[1:]
latex_expr = self._ltxe.expression_to_latex(prc_name)
prc_ld_dict[prc_name] = \
LineData(name=prc_name,
x_data=prc_data[:, 0],
y_data=prc_data[:, 1],
categories=categories,
properties={'label': '$%s$' % latex_expr,
'color': color})
self._prc_ld_dict = prc_ld_dict
def _make_total_flux_ld(self):
total_flux_ld_dict = {}
col = self._color_dict['Total Supply']
total_flux_ld_dict['Total Supply'] = \
LineData(name='Total Supply',
x_data=self.scan_results.scan_range,
y_data=self.scan_results.total_supply,
categories=['Fluxes',
'Supply',
'Total Supply'],
properties={'label': '$%s$' % 'Total\,Supply',
'color': hsv_to_rgb(col[0], col[1],
col[2] * 0.9),
'ls': '--'})
col = self._color_dict['Total Demand']
total_flux_ld_dict['Total Demand'] = \
LineData(name='Total Demand',
x_data=self.scan_results.scan_range,
y_data=self.scan_results.total_demand,
categories=['Fluxes',
'Demand',
'Total Demand'],
properties={'label': '$%s$' % 'Total\,Demand',
'color': hsv_to_rgb(col[0], col[1],
col[2] * 0.9),
'ls': '--'})
self._total_flux_ld_dict = total_flux_ld_dict
def plot(self):
category_classes = OrderedDict([
('Supply/Demand', [
'Supply',
'Demand']),
('Reaction Blocks',
self.scan_results.flux_names +
['Total Supply', 'Total Demand']),
('Lines', [
'Fluxes',
'Elasticity Coefficients',
'Response Coefficients',
'Partial Response Coefficients'])])
line_data_list = [v for v in self._line_data_dict.itervalues()]
scan_fig = ScanFig(line_data_list,
ax_properties={'xlabel': '[%s]' %
self.scan_results.fixed.replace(
'_', ' '),
'ylabel': 'Rate',
'xscale': 'log',
'yscale': 'log',
'xlim': [self.scan_results.scan_min,
self.scan_results.scan_max],
'ylim': [self.scan_results.flux_min,
self.scan_results.flux_max * 2
]},
category_classes=category_classes,
base_name=self._analysis_method,
working_dir=self._working_dir)
scan_fig.toggle_category('Supply', True)
scan_fig.toggle_category('Demand', True)
scan_fig.toggle_category('Fluxes', True)
scan_fig.ax.axvline(self.scan_results.fixed_ss, ls=':', color='gray')
return scan_fig
def plot_decompose(self):
from warnings import warn, simplefilter
simplefilter('always', DeprecationWarning)
warn('plot_decompose has been renamed to `do_mca_scan, use that '
'method in the future`', DeprecationWarning, stacklevel=1)
simplefilter('default', DeprecationWarning)
return self.do_mca_scan()
@silence_print
def do_mca_scan(self):
ecs = []
ccs = []
prc_names = []
rc_names = []
rc_pos = []
reagent_of = [each[2:] for each in self.scan_results.flux_names]
all_reactions = reagent_of + \
getattr(self._model_map,
self.scan_results.fixed).isModifierOf()
arl = len(all_reactions)
strt = 0
stp = arl
for flux_reaction in self.scan_results.flux_names:
reaction = flux_reaction[2:]
rc_names.append('rcJ%s_%s' % (reaction, self.scan_results.fixed))
rc_pos.append(range(strt, stp))
strt += arl
stp += arl
for route_reaction in all_reactions:
ec = 'ec' + route_reaction + '_' + self.scan_results.fixed
cc = 'ccJ' + reaction + '_' + route_reaction
name = 'prcJ%s_%s_%s' % (reaction,
self.scan_results.fixed,
route_reaction)
# ecs.append(ec)
if ec not in ecs:
ecs.append(ec)
ccs.append(cc)
prc_names.append(name)
ec_len = len(ecs)
user_output = [self.scan_results.fixed] + ecs + ccs
scanner = pysces.Scanner(self.mod)
scanner.quietRun = True
scanner.addScanParameter(self.scan_results.fixed,
self.scan_results.scan_min,
self.scan_results.scan_max,
self.scan_results.scan_points,
log=True)
scanner.addUserOutput(*user_output)
scanner.Run()
ax_properties = {'ylabel': 'Coefficient Value',
'xlabel': '[%s]' %
self.scan_results.fixed.replace('_', ' '),
'xscale': 'log',
'yscale': 'linear',
'xlim': [self.scan_results.scan_min,
self.scan_results.scan_max]}
cc_ec_data_obj = Data2D(mod=self.mod,
column_names=user_output,
data_array=scanner.UserOutputResults,
ltxe=self._ltxe,
analysis_method=self._analysis_method,
ax_properties=ax_properties,
file_name='cc_ec_scan',
num_of_groups=ec_len,
working_dir=path.split(self._working_dir)[0])
rc_data = []
all_outs = scanner.UserOutputResults[:, 1:]
ec_outs = all_outs[:, :ec_len]
cc_outs = all_outs[:, ec_len:]
ec_positions = range(ec_len) * (len(prc_names)/ec_len)
for i, prc_name in enumerate(prc_names):
ec_col_data = ec_outs[:, ec_positions[i]]
cc_col_data = cc_outs[:, i]
# ec_col_data = outs[:, i]
# cc_col_data = outs[:, i + cc_s_pos]
col = ec_col_data * cc_col_data
rc_data.append(col)
temp = numpy.vstack(rc_data).transpose()
rc_data += [numpy.sum(temp[:, rc_pos[i]], axis=1) for i in
range(len(rc_names))]
rc_out_arr = [scanner.UserOutputResults[:, 0]] + rc_data
rc_out_arr = numpy.vstack(rc_out_arr).transpose()
rc_data_obj = Data2D(mod=self.mod,
column_names=[self.scan_results.fixed] + prc_names + rc_names,
data_array=rc_out_arr,
ltxe=self._ltxe,
analysis_method=self._analysis_method,
ax_properties=ax_properties,
file_name='prc_scan',
num_of_groups=ec_len,
working_dir=path.split(self._working_dir)[0])
#rc_data_obj._working_dir = path.split(self._working_dir)[0]
#cc_ec_data_obj._working_dir = path.split(self._working_dir)[0]
return rc_data_obj, cc_ec_data_obj
| bsd-3-clause |
mgotz/EBT_evaluation | ebttools/core.py | 1 | 21457 | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2016 Malte Gotz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
While I believe due to the GPL exception provided by PyQt the enitre
package may be licensed under the MIT license, I want to make sure this module
is explicitly licensed as such, because it does not import and is therefore
independent of PyQt and the GPL
core functions to load calibration data and calculate dose from scanned films
When used outside the EBT gui, import calibrations using the load_calibrations
function. Afterwards, construct a DoseArray using one of the imported
calibrations, the scanned image, DPI and phi0 info.
The calculate_dose function, performs the dose calculation and it is called by
DoseArray during construction.
"""
#get ready for python 3
from __future__ import (print_function, division, absolute_import,
unicode_literals)
import codecs
from collections import OrderedDict
import logging
import numpy as np
import os
import scipy.ndimage
from math import floor, ceil
try:
from configparser import ConfigParser, MissingSectionHeaderError
except ImportError as e:
#a compatibility hack for python2 if it does not have conifgparser
import sys
if sys.version_info[0] == 2:
from ConfigParser import ConfigParser, MissingSectionHeaderError
else:
raise e
#define which array index corresponds to which color
rgbMap = {"red":0,"green":1,"blue":2}
def load_calibrations(path = None):
"""loads ini style calibration files from path
Parameters
---------
path : file path
either a single calibration file or a folder containing only calibration
files
Returns
-------
calibrations : dictionary of dictionaries
contains a named entry for each calibration with the respective values
as a dictionary
"""
#default calibrations location
if path is None:
path = os.path.join(os.path.dirname(__file__),"calibrations")
#make a new ordered dictionary, that way keys remain in some sensible order
#when iterating over the dict
calibrations = OrderedDict()
if os.path.isfile(path):
head, tail = os.path.split(path)
path = head
fileList = [tail]
elif os.path.isdir(path):
fileList = os.listdir(path)
else:
raise IOError(255, "path is neither directory nor file",path)
if len(fileList) == 0:
raise IOError(255,"no files found",path)
for fileName in fileList:
with codecs.open(os.path.join(path,fileName),"r","utf-8-sig") as configFile:
try:
config = ConfigParser(dict_type=OrderedDict)
config.readfp(configFile)
for key in config.sections():
calibrations[key] = dict(config.items(key))
except MissingSectionHeaderError:
logging.warning("{!s} not a readable calibration".format(fileName))
except UnicodeDecodeError:
logging.warning("{!s} not a readable utf-8 file".format(fileName))
except Exception as e:
logging.error("Exception occured trying to read {!s}: ".format(fileName)+str(e))
if len(calibrations) == 0:
raise IOError(255,"files contain no readable calibration",path)
return calibrations
def calculate_dose(calibration, scan, phi0):
""" calculates the dose from a scanned image and a given calibration
Parameters
----------
calibration : dictionary
describes the calibration with the keys: argument, p1, p2, p3, function
and channel, optional keys are black and background
scan : numpy array
the scanned image or part thereof
phi0 : scalar or lenght 3 iterable
I0 to calculate the net optical density
Returns
-------
dose : numpy array
array containing the dose
"""
try:
black = float(calibration["black"])
except KeyError:
black = 10
logging.warning("no definition of lowest pixel value found in calibration"
" falling back to default 10")
try:
background = float(calibration["background"])
except KeyError:
background = 0.0
logging.warning("no definition of background value found in calibration"
" falling back to default 0")
#check proper format of phi0 and allow treatement as a list in any case
try:
if len(phi0) != 3:
raise ValueError("phi0 should have length 3"+
" (one value for each channel) or be a scalar")
except TypeError: #raised if length is not applicable
phi0 = [phi0]
#get the channels in list
channels = calibration["channel"].replace(" ","").split(",")
#flags set by the different argument options, if a min and/or max is needed
#to ensure against matherrors. Used later to issue warning about min and max
needsMin = False
usesMax = False
#define number of channels used by the argument (also used in input checking)
numberOfChannels = 0
#define the argument as a function
if calibration["argument"] == "netOD":
arg = lambda x, x0: np.log10((x0-background)/
(np.clip(x,int(ceil(black)),int(floor(x0)))-background))
numberOfChannels = 1
needsMin = True #will result in log(-) otherwise
usesMax = True #not really needed, but gives negative dose values
if any(black > x0 for x0 in phi0):
phiStr = ["{:.2f}".format(x0) for x0 in phi0]
raise ValueError("phi0 ("+", ".join(phiStr) +") is smaller"+
" than the black value ({:.2f}). ".format(black) +
"Cannot procede.")
elif calibration["argument"] == "direct":
arg = lambda x, x0: (np.clip(x,black,None)-background)
numberOfChannels = 1
needsMin = False
usesMax = False
elif calibration["argument"] == "normalized":
arg = lambda x, x0: np.divide(np.clip(x,int(ceil(black)),None)-background,
x0-background)
numberOfChannels = 1
needsMin = False
usesMax = False
elif calibration["argument"] == "relativeNetOD":
arg = lambda x1, x2, x01, x02: (
np.log10((x01-background)/(np.clip(x1,int(ceil(black)),int(floor(x01)))-background))/
np.log10((x02-background)/(np.clip(x2,int(ceil(black)),int(floor(x02)))-background)))
numberOfChannels = 2
needsMin = False
usesMax = False
else:
raise ValueError("unknown specification for argument in calibration: "
+calibration["argument"])
#check for proper number of channels
if len(channels) != numberOfChannels:
raise ValueError(calibration["argument"] +
" requires exactly {:d} channels, ".format(numberOfChannels) +
"{:d} given".format(len(channels)))
#check for properly established lower limit
if needsMin:
if (background >= black) and np.any((scan-background) < 0):
raise ValueError("scan contains values below the background level, "+
"cannot compute "+calibration["argument"])
if needsMin and usesMax:
if any(black > x0 for x0 in phi0):
phiStr = ["{:.2f}".format(x0) for x0 in phi0]
raise ValueError("phi0 ("+", ".join(phiStr) +") is smaller"+
" than the black value ({:.2f}). ".format(black) +
"Cannot procede.")
p1 = float(calibration["p1"])
p2 = float(calibration["p2"])
p3 = float(calibration["p3"])
#define the actual function for dose calculation
if calibration["function"] == "exp_poly":
function = lambda x: p1*x+p2*np.power(x,p3)
elif calibration["function"] == "linear":
function = lambda x: p1+p2*x
elif calibration["function"] == "rational":
function = lambda x: np.divide(p1+x,p2+p3*x)
else:
raise ValueError("unknown specification for function in calibration: "
+calibration["function"])
#get the proper channels from the scan and select the corresponding phi0s
if all(color in rgbMap for color in channels):
chIndices = [rgbMap[color] for color in channels]
relevantScanData = [scan[:,:,index] for index in chIndices]
if len(chIndices) == 1 and len(phi0) == 1:
relevantPhi0 = phi0
elif len(phi0) != 3:
raise ValueError("phi0 must contain 3 values or be a scalar for "+
"single channel evaluation")
else:
relevantPhi0 = [phi0[index] for index in chIndices]
elif calibration["channel"] in ["grey","gray"]:
if len(scan.shape) > 2:
raise ValueError("calibration is for grey scale, but scan in multicolor")
relevantScanData = [scan]
if len(phi0) != 1:
raise ValueError("more than 1 phi0 provided for a grey-scale evaluation")
else:
relevantPhi0 = phi0
else:
raise ValueError("unknown specification for color channel in calibration: "
+calibration["channel"])
#use the function and the argument to calculate dose from the relevant data
return function(arg(*(relevantScanData+relevantPhi0)))
class DoseArray(np.ndarray):
""" class to contain a dose distribution
this is basically a ndarray with some additional methods and properties
the dose can be used and accessed just like it were in a standard ndarray
"""
def __new__(cls,DPI,calib,img,phi0):
""" creates a dose_array from scan, calibration and phi0
Parameters
----------
DPI : scalar
dots per inch of the scan
calib : dictionary
describes the calibration with the keys: argument, p1, p2, p3,
function and channel
img : numpy array
the scanned image or part thereof
phi0 : scalar
I0 to calculate the net optical density
"""
#calculate dose and cast returned array to new class
dist = calculate_dose(calib,img,phi0)
obj = np.asarray(dist).view(cls)
if DPI > 0:
#set dot per centimeter
obj.DPC = DPI/2.54
else:
raise ValueError("DPI must be greater than 0")
try:
obj.unit = calib["unit"]
except KeyError:
obj.unit = "Gy"
return obj
def __array_finalize__(self,obj):
#gets called in various construction scenarios, obj is none if it is called from
#__new__, DPC will then be properly set, othwerwise set a default for DPC and unit
if obj is None: return
self.DPC = getattr(obj,'DPC',300./2.54)
self.unit = "Gy"
def rectangle_mask(self,x0, y0, width, height, angle=0.0):
"""get a mask for a rectangular area
Parameters
----------
x0 : scalar
x-value of rectangle center (in cm)
y0 : scalar
y-value of rectangle center (in cm)
width : scalar
half-width (x-dimension) of the rectangle
height : scalar
half-height (y-dimension) of the rectangle
angle : scalar, optional
counter clockwise rotation angle of the rectangle
test : bool, optional
if true the area is set to 0 (used for testing)
Returns
-------
mask : ndarray
a mask to index the array and return a rectangular area
"""
angle_rad = angle*np.pi/180.
#create a boolean mask for a rectangle
#the mgrid creates two arrays with x and y positions of the pixels, respectively
#(y is the first index)
#if the lower edge of the pixels is at 0, the position of their center
#must be shifted by 0.5 pixels width.
y, x = np.mgrid[-y0+0.5/self.DPC:(self.shape[0]+.5)/self.DPC-y0:1.0/self.DPC,
-x0+0.5/self.DPC:(self.shape[1]+.5)/self.DPC-x0:1.0/self.DPC]
#condition for a rectangle
mask = np.logical_and(np.abs(x*np.cos(angle_rad)-y*np.sin(angle_rad)) <= width,
np.abs(x*np.sin(angle_rad)+y*np.cos(angle_rad)) <= height)
#ensure proper format:
mask = mask[0:self.shape[0],0:self.shape[1]]
return mask
def rectangle_stats(self,x0, y0, width, height, angle=0.0, test=False):
""" get statistics from a rectangular area
Parameters
----------
x0 : scalar
x-value of rectangle center (in cm)
y0 : scalar
y-value of rectangle center (in cm)
width : scalar
half-width (x-dimension) of the rectangle
height : scalar
half-height (y-dimension) of the rectangle
angle : scalar, optional
counter clockwise rotation angle of the rectangle
test : bool, optional
if true the area is set to 0 (used for testing)
Returns
-------
sum : scalar
sum of all the pixels in the area
mean : scalar
average pixel value
std : scalar
standard devition of the pixel values
min : scalar
minimal pixel value in the area
max : scalar
maximum pixel value in the area
"""
mask = self.rectangle_mask(x0, y0, width, height, angle)
if test:
self[mask] = 0
return(float(self[mask].sum()), float(self[mask].mean()),
float(self[mask].std()), float(self[mask].min()),
float(self[mask].max()))
def ellipse_stats(self,x0, y0, a, b, angle=0.0, test=False):
""" get statistics from an ellipse area
Parameters
----------
x0 : scalar
x-value of ellipse center (in cm)
y0 : scalar
y-value of ellipse center (in cm)
a : scalar
half-axis in x-direction
b : scalar
half-axis in y-direction
angle : scalar, optional
counter clockwise rotation angle of the ellipse
test : bool, optional
if true the area is set to 0 (used for testing)
Returns
-------
sum : scalar
sum of all the pixels in the area
mean : scalar
average pixel value
std : scalar
standard devition of the pixel values
min : scalar
minimal pixel value in the area
max : scalar
maximum pixel value in the area
"""
angle_rad = angle*np.pi/180
#create a boolean mask for a circle
#the mgrid creates two arrays with x and y positions the pixels, respectively
#(y is the first index)
#if the lower edge of the pixels is at 0, the position of their center
#must be shifted by 0.5 pixels width.
y, x = np.mgrid[-y0+0.5/self.DPC:(self.shape[0]+.5)/self.DPC-y0:1.0/self.DPC,
-x0+0.5/self.DPC:(self.shape[1]+.5)/self.DPC-x0:1.0/self.DPC]
#condition for an ellipse
mask = ((x*np.cos(angle_rad)-y*np.sin(angle_rad))**2/(a*a) +
(-x*np.sin(angle_rad)-y*np.cos(angle_rad))**2/(b*b)) <= 1.0
#ensure proper format:
mask = mask[0:self.shape[0],0:self.shape[1]]
if test:
self[mask] = 0
return(float(self[mask].sum()), float(self[mask].mean()),
float(self[mask].std()), float(self[mask].min()),
float(self[mask].max()))
def profile(self, x0, y0, x1, y1, interpolation="nearest",test=False):
""" returns a profile along a line
Parameters
----------
x0 : scalar
x start of line (in cm)
y0 : scalar
y start of line (in cm)
x1 : scalar
x end of line (in cm)
y1 : scalar
y end of line (in cm)
interpolation : string, optional
interpolation method used, nearest, linear or spline
test : bool, optional
if true the profile is set to 0 (used for testing)
Returns
-------
profile : ndarray
the dose values along the specified line
"""
#transform to image indexes
coords = (np.array([y0,x0,y1,x1])*self.DPC).astype(np.int)
#force coordinates to match array, i.e. avoid exceeding index range
for idx, coord in enumerate(coords):
if coord > self.shape[idx%2]:
coord = self.shape[idx%2]-1
while coord < 0:
coord += self.shape[idx%2]
coords[idx] = coord
#make two arrays with indexes from start to end coordinates
length = int(np.hypot(coords[3]-coords[1], coords[2]-coords[0]))
x, y = np.linspace(coords[1], coords[3], length), np.linspace(coords[0], coords[2], length)
if test:
self[y.astype(np.int),x.astype(np.int)]=0.0
if interpolation == "nearest":
return self[y.astype(np.int),x.astype(np.int)]
elif interpolation == "linear":
return scipy.ndimage.map_coordinates(self, np.vstack((x,y)),order=1)
elif interpolation == "spline":
return scipy.ndimage.map_coordinates(self, np.vstack((x,y)))
else:
logging.warning("unkown interpolation: {!s} using nearest".format(interpolation))
return self[y.astype(np.int),x.astype(np.int)]
# for testing
if __name__ == '__main__':
#load some calibrations
calibs = load_calibrations(os.path.join(os.path.abspath(os.path.curdir),"calibrations"))
#create a simple scan (~4x4 cm)
scan = np.zeros((470,470,3),dtype="uint8")
#create a coordinate grid and a 2D gaussian with noise
x,y = np.meshgrid(np.linspace(0,470,470),np.linspace(0,470,470))
scan[:,:,0] = (-120.0*np.exp(-np.divide(np.square(x-235)+np.square(y-235),
2*50**2))
+200+np.random.rand(470,470)*5.0)
doseDistribution = DoseArray(300.,calibs["example"],scan,255)
doseDistribution.rectangle_stats(1.0,1.0,0.5,0.5,0.,True)
mask = doseDistribution.rectangle_mask(0.4,0.5,0.2,0.1,0.0)
print (doseDistribution[mask].shape)
summed, avg, std, minimum, maximum = doseDistribution.rectangle_stats(0.4,0.5,0.2,0.1,45.0,False)
print (summed, avg, std, minimum, maximum)
doseDistribution.ellipse_stats(2.0,1.0,0.5,0.2,0.0,False)
x0 = 0.5
x1 = 3.5
y0 = 0.5
y1 = 3.5
profile1 = doseDistribution.profile(x0,y0,x1,y1,)
profile2 = doseDistribution.profile(x0,y0,x1,y1,interpolation="spline")
profile3 = doseDistribution.profile(x0,y0,x1,y1,interpolation="linear")
import matplotlib.pyplot as plt
fig1, ax = plt.subplots(nrows = 2)
ax[0].imshow(doseDistribution,extent=[0,470.*2.54/300.,470.*2.54/300.,0],
interpolation="nearest",cmap="inferno")
x = np.linspace(0,np.hypot(x1-x0,y1-y0),len(profile1))
ax[1].plot(x,profile1,label="nearest")
ax[1].plot(x,profile2,label="spline")
ax[1].plot(x,profile3,label="linear")
ax[1].legend()
plt.show()
| mit |