repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bsipocz/statsmodels | statsmodels/tools/testing.py | 23 | 1443 | """assert functions from numpy and pandas testing
"""
import re
from distutils.version import StrictVersion
import numpy as np
import numpy.testing as npt
import pandas
import pandas.util.testing as pdt
# for pandas version check
def strip_rc(version):
return re.sub(r"rc\d+$", "", version)
def is_pandas_min_version(min_version):
'''check whether pandas is at least min_version
'''
from pandas.version import short_version as pversion
return StrictVersion(strip_rc(pversion)) >= min_version
# local copies, all unchanged
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_equal, assert_array_less,
assert_array_max_ulp, assert_raises, assert_string_equal, assert_warns)
# adjusted functions
def assert_equal(actual, desired, err_msg='', verbose=True, **kwds):
if not is_pandas_min_version('0.14.1'):
npt.assert_equal(actual, desired, err_msg='', verbose=True)
else:
if isinstance(desired, pandas.Index):
pdt.assert_index_equal(actual, desired)
elif isinstance(desired, pandas.Series):
pdt.assert_series_equal(actual, desired, **kwds)
elif isinstance(desired, pandas.DataFrame):
pdt.assert_frame_equal(actual, desired, **kwds)
else:
npt.assert_equal(actual, desired, err_msg='', verbose=True)
| bsd-3-clause |
czhengsci/pymatgen | pymatgen/command_line/critic2_caller.py | 5 | 22763 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import os
import subprocess
import warnings
import numpy as np
import glob
import itertools
import matplotlib.pyplot as plt
from scipy.spatial import KDTree
from pymatgen.core import Structure
from pymatgen.io.vasp.outputs import Chgcar
from pymatgen.io.vasp.inputs import Potcar
from pymatgen.symmetry.groups import PointGroup
from pymatgen.analysis.graphs import StructureGraph
from monty.os.path import which
from monty.dev import requires
from monty.json import MSONable
from monty.tempfile import ScratchDir
from enum import Enum
from textwrap import dedent
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
"""
This module implements an interface to the critic2 Bader analysis code.
For most Bader analysis purposes, users are referred to
pymatgen.command_line.bader_caller instead, this module is for advanced
usage requiring identification of critical points in the charge density.
This module depends on a compiled critic2 executable available in the path.
Please follow the instructions at https://github.com/aoterodelaroza/critic2
to compile or, if using macOS and homebrew, use `brew tap homebrew/science`
followed by `brew install critic2`.
New users are *strongly* recommended to read the critic2 manual first.
In brief,
* critic2 searches for critical points in charge density
* a critical point can be one of four types: nucleus, bond, ring
or cage
* it does this by seeding locations for likely critical points
and then searching in these regions
* there are two lists of critical points in the output, a list
of non-equivalent points (with in-depth information about the
field at those points), and a full list of points generated
by the appropriate symmetry operations
* connectivity between these points is also provided when
appropriate (e.g. the two nucleus critical points linked to
a bond critical point)
* critic2 can do many other things besides
If you use this module, please cite the following:
A. Otero-de-la-Roza, E. R. Johnson and V. Luaña,
Comput. Phys. Commun. 185, 1007-1018 (2014)
(http://dx.doi.org/10.1016/j.cpc.2013.10.026)
A. Otero-de-la-Roza, M. A. Blanco, A. Martín Pendás and
V. Luaña, Comput. Phys. Commun. 180, 157–166 (2009)
(http://dx.doi.org/10.1016/j.cpc.2008.07.018)
"""
__author__ = "Matthew Horton"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "July 2017"
class Critic2Caller:
@requires(which("critic2"), "Critic2Caller requires the executable critic to be in the path. "
"Please follow the instructions at https://github.com/aoterodelaroza/critic2.")
def __init__(self, structure, chgcar=None, chgcar_ref=None,
user_input_settings=None, write_cml=False):
"""
Run Critic2 in automatic mode on a supplied structure, charge
density (chgcar) and reference charge density (chgcar_ref).
The reason for a separate reference field is that in
VASP, the CHGCAR charge density only contains valence
electrons and may be missing substantial charge at
nuclei leading to misleading results. Thus, a reference
field is commonly constructed from the sum of AECCAR0
and AECCAR2 which is the total charge density, but then
the valence charge density is used for the final analysis.
If chgcar_ref is not supplied, chgcar will be used as the
reference field. If chgcar is not supplied, the promolecular
charge density will be used as the reference field -- this can
often still give useful results if only topological information
is wanted.
User settings is a dictionary that can contain:
* GRADEPS, float (field units), gradient norm threshold
* CPEPS, float (Bohr units in crystals), minimum distance between
critical points for them to be equivalent
* NUCEPS, same as CPEPS but specifically for nucleus critical
points (critic2 default is depedent on grid dimensions)
* NUCEPSH, same as NUCEPS but specifically for hydrogen nuclei
since associated charge density can be significantly displaced
from hydrogen nucleus
* EPSDEGEN, float (field units), discard critical point if any
element of the diagonal of the Hessian is below this value,
useful for discarding points in vacuum regions
* DISCARD, float (field units), discard critical points with field
value below this value, useful for discarding points in vacuum
regions
* SEED, list of strings, strategies for seeding points, default
is ['WS 1', 'PAIR 10'] which seeds critical points by
sub-dividing the Wigner-Seitz cell and between every atom pair
closer than 10 Bohr, see critic2 manual for more options
:param structure: Structure to analyze
:param chgcar: Charge density to use for analysis. If None, will
use promolecular density.
:param chgcar_ref: Reference charge density. If None, will use
chgcar as reference.
:param user_input_settings (dict):
:param write_cml (bool): Useful for debug, if True will write all
critical points to a file 'table.cml' in the working directory
useful for visualization
"""
settings = {'CPEPS': 0.1, 'SEED': ["WS", "PAIR DIST 10"]}
if user_input_settings:
settings.update(user_input_settings)
# Load crystal structure
input_script = ["crystal POSCAR"]
# Load data to use as reference field
if chgcar_ref:
input_script += ["load VASPCHG CHGCAR_ref id chgcar_ref",
"reference chgcar_ref"]
# Load data to use for analysis
if chgcar:
input_script += ["load VASPCHG CHGCAR_int id chgcar",
"integrable chgcar"]
# Command to run automatic analysis
auto = "auto "
for k, v in settings.items():
if isinstance(v, list):
for item in v:
auto += '{} {} '.format(k, item)
else:
auto += '{} {} '.format(k, v)
input_script += [auto]
if write_cml:
input_script += ["cpreport ../table.cml cell border graph"]
input_script = "\n".join(input_script)
with ScratchDir(".") as temp_dir:
os.chdir(temp_dir)
with open('input_script.cri', 'w') as f:
f.write(input_script)
structure.to(filename='POSCAR')
if chgcar:
chgcar.write_file('CHGCAR_int')
if chgcar_ref:
chgcar_ref.write_file('CHGCAR_ref')
args = ["critic2", "input_script.cri"]
rs = subprocess.Popen(args,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=True)
stdout, stderr = rs.communicate()
stdout = stdout.decode()
if stderr:
stderr = stderr.decode()
warnings.warn(stderr)
if rs.returncode != 0:
raise RuntimeError("critic2 exited with return code {}.".format(rs.returncode))
self._stdout = stdout
self._stderr = stderr
self.output = Critic2Output(structure, stdout)
@classmethod
def from_path(cls, path, suffix=''):
"""
Convenience method to run critic2 analysis on a folder containing
typical VASP output files.
This method will:
1. Look for files CHGCAR, AECAR0, AECAR2, POTCAR or their gzipped
counterparts.
2. If AECCAR* files are present, constructs a temporary reference
file as AECCAR0 + AECCAR2
3. Runs critic2 analysis twice: once for charge, and a second time
for the charge difference (magnetization density).
:param path: path to folder to search in
:param suffix: specific suffix to look for (e.g. '.relax1' for
'CHGCAR.relax1.gz')
:return:
"""
def _get_filepath(filename, warning, path=path, suffix=suffix):
paths = glob.glob(os.path.join(path, filename + suffix + '*'))
if not paths:
warnings.warn(warning)
return None
if len(paths) > 1:
# using reverse=True because, if multiple files are present,
# they likely have suffixes 'static', 'relax', 'relax2', etc.
# and this would give 'static' over 'relax2' over 'relax'
# however, better to use 'suffix' kwarg to avoid this!
paths.sort(reverse=True)
warnings.warn('Multiple files detected, using {}'.format(os.path.basename(path)))
path = paths[0]
return path
chgcar_path = _get_filepath('CHGCAR', 'Could not find CHGCAR!')
chgcar = Chgcar.from_file(chgcar_path)
aeccar0_path = _get_filepath('AECCAR0', 'Could not find AECCAR0, interpret Bader results with caution.')
aeccar0 = Chgcar.from_file(aeccar0_path) if aeccar0_path else None
aeccar2_path = _get_filepath('AECCAR2', 'Could not find AECCAR2, interpret Bader results with caution.')
aeccar2 = Chgcar.from_file(aeccar2_path) if aeccar2_path else None
chgcar_ref = aeccar0.linear_add(aeccar2) if (aeccar0 and aeccar2) else None
return cls(chgcar.structure, chgcar, chgcar_ref)
class CriticalPointType(Enum):
nucleus = "nucleus" # (3, -3)
bond = "bond" # (3, -1)
ring = "ring" # (3, 1)
cage = "cage" # (3, 3)
class CriticalPoint(MSONable):
def __init__(self, index, type, frac_coords, point_group,
multiplicity, field, field_gradient,
coords=None, field_hessian=None):
"""
Class to characterise a critical point from a topological
analysis of electron charge density.
Note this class is usually associated with a Structure, so
has information on multiplicity/point group symmetry.
:param index: index of point
:param type: type of point, given as a string
:param coords: Cartesian co-ordinates in Angstroms
:param frac_coords: fractional co-ordinates
:param point_group: point group associated with critical point
:param multiplicity: number of equivalent critical points
:param field: value of field at point (f)
:param field_gradient: gradient of field at point (grad f)
:param field_hessian: hessian of field at point (del^2 f)
"""
self.index = index
self._type = type
self.coords = coords
self.frac_coords = frac_coords
self.point_group = point_group
self.multiplicity = multiplicity
self.field = field
self.field_gradient = field_gradient
self.field_hessian = field_hessian
@property
def type(self):
return CriticalPointType(self._type)
def __str__(self):
return "Critical Point: {} ({})".format(self.type.name, self.frac_coords)
@property
def laplacian(self):
return np.trace(self.field_hessian)
@property
def ellipticity(self):
'''
Most meaningful for bond critical points,
can be physically interpreted as e.g. degree
of pi-bonding in organic molecules. Consult
literature for more information.
:return:
'''
eig = np.linalg.eig(self.field_hessian)
eig.sort()
return eig[0]/eig[1] - 1
class Critic2Output(MSONable):
def __init__(self, structure, critic2_stdout):
"""
This class is used to store results from the Critic2Caller.
To explore the bond graph, use the "structure_graph"
method, which returns a user-friendly StructureGraph
class with bonding information. By default, this returns
a StructureGraph with edge weights as bond lengths, but
can optionally return a graph with edge weights as any
property supported by the `CriticalPoint` class, such as
bond ellipticity.
This class also provides an interface to explore just the
non-symmetrically-equivalent critical points via the
`critical_points` attribute, and also all critical
points (via nodes dict) and connections between them
(via edges dict). The user should be familiar with critic2
before trying to understand these.
Indexes of nucleus critical points in the nodes dict are the
same as the corresponding sites in structure, with indices of
other critical points arbitrarily assigned.
:param structure: associated Structure
:param critic2_stdout: stdout from running critic2 in automatic
mode
"""
self.structure = structure
self._critic2_stdout = critic2_stdout
self.nodes = {}
self.edges = {}
self._parse_stdout(critic2_stdout)
def structure_graph(self, edge_weight="bond_length", edge_weight_units="Å"):
"""
A StructureGraph object describing bonding information
in the crystal. Lazily constructed.
:param edge_weight: a value to store on the Graph edges,
by default this is "bond_length" but other supported
values are any of the attributes of CriticalPoint
:return:
"""
sg = StructureGraph.with_empty_graph(self.structure, name="bonds",
edge_weight_name=edge_weight,
edge_weight_units=edge_weight_units)
edges = self.edges.copy()
idx_to_delete = []
# check for duplicate bonds
for idx, edge in edges.items():
unique_idx = self.nodes[idx]['unique_idx']
# only check edges representing bonds, not rings
if self.critical_points[unique_idx].type == CriticalPointType.bond:
if idx not in idx_to_delete:
for idx2, edge2 in edges.items():
if idx != idx2 and edge == edge2:
idx_to_delete.append(idx2)
warnings.warn("Duplicate edge detected, try re-running "
"critic2 with custom parameters to fix this. "
"Mostly harmless unless user is also "
"interested in rings/cages.")
logger.debug("Duplicate edge between points {} (unique point {})"
"and {} ({}).".format(idx, self.nodes[idx]['unique_idx'],
idx2, self.nodes[idx2]['unique_idx']))
# and remove any duplicate bonds present
for idx in idx_to_delete:
del edges[idx]
for idx, edge in edges.items():
unique_idx = self.nodes[idx]['unique_idx']
# only add edges representing bonds, not rings
if self.critical_points[unique_idx].type == CriticalPointType.bond:
from_idx = edge['from_idx']
to_idx = edge['to_idx']
from_lvec = edge['from_lvec']
to_lvec = edge['to_lvec']
if edge_weight == "bond_length":
weight = self.structure.get_distance(from_idx, to_idx)
else:
weight = getattr(self.critical_points[unique_idx],
edge_weight, None)
sg.add_edge(from_idx, to_idx,
from_jimage=from_lvec, to_jimage=to_lvec,
weight=weight)
return sg
def get_critical_point_for_site(self, n):
return self.critical_points[self.nodes[n]['unique_idx']]
def _parse_stdout(self, stdout):
stdout = stdout.split("\n")
# NOTE WE ARE USING 0-BASED INDEXING:
# This is different from critic2 which
# uses 1-based indexing, so all parsed
# indices have 1 subtracted.
# Parsing happens in two stages:
# 1. We construct a list of unique critical points
# (i.e. non-equivalent by the symmetry of the crystal)
# and the properties of the field at those points
# 2. We construct a list of nodes and edges describing
# all critical points in the crystal
# Steps 1. and 2. are essentially indepdendent, except
# that the critical points in 2. have a pointer to their
# associated unique critical point in 1. so that more
# information on that point can be retrieved if necessary.
unique_critical_points = []
# parse unique critical points
for i, line in enumerate(stdout):
if "* Critical point list, final report (non-equivalent cps)" in line:
start_i = i + 4
elif "* Analysis of system bonds" in line:
end_i = i - 2
# if start_i and end_i haven't been found, we
# need to re-evaluate assumptions in this parser!
for i, line in enumerate(stdout):
if i >= start_i and i <= end_i:
l = line.replace("(", "").replace(")", "").split()
unique_idx = int(l[0]) - 1
point_group = l[1]
# type = l[2] # type from definition of critical point e.g. (3, -3)
type = l[3] # type from name, e.g. nucleus
frac_coords = [float(l[4]), float(l[5]), float(l[6])]
multiplicity = float(l[7])
# name = float(l[8])
field = float(l[9])
field_gradient = float(l[10])
# laplacian = float(l[11])
point = CriticalPoint(unique_idx, type, frac_coords, point_group,
multiplicity, field, field_gradient)
unique_critical_points.append(point)
# TODO: may be other useful information to parse here too
for i, line in enumerate(stdout):
if '+ Critical point no.' in line:
unique_idx = int(line.split()[4]) - 1
elif "Hessian:" in line:
l1 = list(map(float, stdout[i + 1].split()))
l2 = list(map(float, stdout[i + 2].split()))
l3 = list(map(float, stdout[i + 3].split()))
hessian = [[l1[0], l1[1], l1[2]],
[l2[0], l2[1], l2[2]],
[l3[0], l3[1], l3[2]]]
unique_critical_points[unique_idx].field_hessian = hessian
self.critical_points = unique_critical_points
# parse graph connecting critical points
for i, line in enumerate(stdout):
if "* Complete CP list, bcp and rcp connectivity table" in line:
start_i = i + 3
elif "* Attractor connectivity matrix" in line:
end_i = i - 2
# if start_i and end_i haven't been found, we
# need to re-evaluate assumptions in this parser!
# Order of nuclei provided by critic2 doesn't
# necessarily match order of sites in Structure.
# We perform a mapping from one to the other,
# and re-index all nodes accordingly.
node_mapping = {} # critic2_index:structure_index
# ensure frac coords are in [0,1] range
frac_coords = np.array(self.structure.frac_coords) % 1
kd = KDTree(frac_coords)
for i, line in enumerate(stdout):
if i >= start_i and i <= end_i:
l = line.split()
if l[2] == "n":
critic2_idx = int(l[0]) - 1
frac_coord = np.array([float(l[3]), float(l[4]), float(l[5])]) % 1
node_mapping[critic2_idx] = kd.query(frac_coord)[1]
if len(node_mapping) != len(self.structure):
warnings.warn("Check that all sites in input structure have "
"been detected by critic2.")
def _remap(critic2_idx):
return node_mapping.get(critic2_idx, critic2_idx)
for i, line in enumerate(stdout):
if i >= start_i and i <= end_i:
l = line.replace("(", "").replace(")", "").split()
idx = _remap(int(l[0]) - 1)
unique_idx = int(l[1]) - 1
frac_coords = [float(l[3]), float(l[4]), float(l[5])]
self._add_node(idx, unique_idx, frac_coords)
if len(l) > 6:
from_idx = _remap(int(l[6]) - 1)
to_idx = _remap(int(l[10]) - 1)
self._add_edge(idx, from_idx=from_idx, from_lvec=(int(l[7]), int(l[8]), int(l[9])),
to_idx=to_idx, to_lvec=(int(l[11]), int(l[12]), int(l[13])))
self._map = node_mapping
def _add_node(self, idx, unique_idx, frac_coords):
"""
Add information about a node describing a critical point.
:param idx: unique index
:param unique_idx: index of unique CriticalPoint,
used to look up more information of point (field etc.)
:param frac_coord: fractional co-ordinates of point
:return:
"""
self.nodes[idx] = {'unique_idx': unique_idx, 'frac_coords': frac_coords}
def _add_edge(self, idx, from_idx, from_lvec, to_idx, to_lvec):
"""
Add information about an edge linking two critical points.
This actually describes two edges:
from_idx ------ idx ------ to_idx
However, in practice, from_idx and to_idx will typically be
atom nuclei, with the center node (idx) referring to a bond
critical point. Thus, it will be more convenient to model
this as a single edge linking nuclei with the properties
of the bond critical point stored as an edge attribute.
:param idx: index of node
:param from_idx: from index of node
:param from_lvec: vector of lattice image the from node is in
as tuple of ints
:param to_idx: to index of node
:param to_lvec: vector of lattice image the to node is in as
tuple of ints
:return:
"""
self.edges[idx] = {'from_idx': from_idx, 'from_lvec': from_lvec,
'to_idx': to_idx, 'to_lvec': to_lvec}
| mit |
TinyOS-Camp/DDEA-DEV | Archive/[14_09_12] DDEA_example_code/df_data_analysis_gsbc.py | 1 | 17508 | # coding: utf-8
"""
======================================================================
Learning and Visualizing the BMS sensor-time-weather data structure
======================================================================
This example employs several unsupervised learning techniques to extract
the energy data structure from variations in Building Automation System (BAS)
and historial weather data.
The fundermental timelet for analysis are 15 min, referred to as Q.
** currently use H (Hour) as a fundermental timelet, need to change later **
The following analysis steps are designed and to be executed.
Data Pre-processing
--------------------------
- Data Retrieval and Standardization
- Outlier Detection
- Interpolation
Data Summarization
--------------------------
- Data Transformation
- Sensor Clustering
Model Discovery Bayesian Network
--------------------------
- Automatic State Classification
- Structure Discovery and Analysis
"""
#print(__doc__)
# Author: Deokwooo Jung [email protected]
##################################################################
# General Moduels
from __future__ import division # To forace float point division
import os
import sys
import numpy as np
from numpy.linalg import inv
from numpy.linalg import norm
import uuid
import pylab as pl
from scipy import signal
from scipy import stats
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from multiprocessing import Pool
#from datetime import datetime
import datetime as dt
from dateutil import tz
import shlex, subprocess
import mytool as mt
import time
import retrieve_weather as rw
import itertools
import calendar
import random
from matplotlib.collections import LineCollection
#from stackedBarGraph import StackedBarGrapher
import pprint
#import radar_chart
# Custom library
from data_tools import *
from data_retrieval import *
from pack_cluster import *
from data_preprocess import *
from shared_constants import *
from pre_bn_state_processing import *
from data_summerization import *
##################################################################
# Interactive mode for plotting
plt.ion()
##################################################################
# Processing Configuraiton Settings
##################################################################
# Analysis buildings set
# Main building x where x is 1-16
# Conference bldg
# Machine Room
# All Power Measurements
IS_USING_SAVED_DICT=-1
print 'Extract a common time range...'
##################################################################
# List buildings and substation names
gsbc_bgid_dict=mt.loadObjectBinaryFast('gsbc_bgid_dict.bin')
PRE_BN_STAGE=0
if PRE_BN_STAGE==0:
bldg_key_set=[]
print 'skip PRE_BN_STAGE....'
else:
bldg_key_set=gsbc_bgid_dict.keys()
#########################################
# 1. Electricity Room and Machine Room - 'elec_machine_room_bldg'
#########################################
#########################################
# 2. Conference Building - 'conference_bldg'
#########################################
#########################################
# 3. Main Building - 'main_bldg_x'
#########################################
for bldg_key in bldg_key_set:
print '###############################################################################'
print '###############################################################################'
print 'Processing '+ bldg_key+'.....'
print '###############################################################################'
print '###############################################################################'
bldg_id=[key_val[1] for key_val in gsbc_bgid_dict.items() if key_val[0]==bldg_key][0]
temp=''
for bldg_id_temp in bldg_id:
temp=temp+subprocess.check_output('ls '+DATA_DIR+'*'+bldg_id_temp+'*.bin', shell=True)
input_files_temp =shlex.split(temp)
# Get rid of duplicated files
input_files_temp=list(set(input_files_temp))
input_files=input_files_temp
#input_files=['../gvalley/Binfiles/'+temp for temp in input_files_temp]
IS_USING_SAVED_DICT=0
print 'Extract a common time range...'
# Analysis period
ANS_START_T=dt.datetime(2013,6,1,0)
ANS_END_T=dt.datetime(2013,11,15,0)
# Interval of timelet, currently set to 1 Hour
TIMELET_INV=dt.timedelta(minutes=30)
print TIMELET_INV, 'time slot interval is set for this data set !!'
print '-------------------------------------------------------------------'
PROC_AVG=True
PROC_DIFF=False
###############################################################################
# This directly searches files from bin file name
print '###############################################################################'
print '# Data Pre-Processing'
print '###############################################################################'
# define input_files to be read
if IS_USING_SAVED_DICT==0:
ANS_START_T,ANS_END_T,input_file_to_be_included=\
time_range_check(input_files,ANS_START_T,ANS_END_T,TIMELET_INV)
print 'time range readjusted to (' ,ANS_START_T, ', ', ANS_END_T,')'
start__dictproc_t=time.time()
if IS_SAVING_INDIVIDUAL==True:
data_dict=construct_data_dict_2\
(input_files,ANS_START_T,ANS_END_T,TIMELET_INV,binfilename='data_dict', \
IS_USING_PARALLEL=IS_USING_PARALLEL_OPT)
else:
data_dict,purge_list=\
construct_data_dict(input_file_to_be_included,ANS_START_T,ANS_END_T,TIMELET_INV,\
binfilename='data_dict',IS_USING_PARALLEL=IS_USING_PARALLEL_OPT)
end__dictproc_t=time.time()
print 'the time of construct data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
elif IS_USING_SAVED_DICT==1:
print 'Loading data dictionary......'
start__dictproc_t=time.time()
data_dict = mt.loadObjectBinaryFast('data_dict.bin')
end__dictproc_t=time.time()
print 'the time of loading data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
else:
print 'Skip data dict'
CHECK_DATA_FORMAT=0
if CHECK_DATA_FORMAT==1:
if IS_SAVING_INDIVIDUAL==True:
list_of_wrong_data_format=verify_data_format_2(data_used,data_dict,time_slots)
else:
list_of_wrong_data_format=verify_data_format(data_used,data_dict,time_slots)
if len(list_of_wrong_data_format)>0:
print 'Measurement list below'
print '----------------------------------------'
print list_of_wrong_data_format
raise NameError('Errors in data format')
Data_Summarization=1
if Data_Summarization==1:
bldg_out=data_summerization(bldg_key,data_dict,PROC_AVG=True,PROC_DIFF=False)
print '###############################################################################'
print '# Model_Discovery'
print '###############################################################################'
gsbc_key_dict=mt.loadObjectBinaryFast('./gsbc_key_dict_all.bin')
# Analysis of BN network result
def convert_gsbc_name(id_labels):
if isinstance(id_labels,list)==False:
id_labels=[id_labels]
out_name=[gsbc_key_dict[key_label_[2:]] if key_label_[2:] \
in gsbc_key_dict else key_label_ for key_label_ in id_labels ]
return out_name
Model_Discovery=1
if Model_Discovery==1:
pwr_key='30......$';dict_dir='./GSBC/'
LOAD_BLDG_OBJ=1
if LOAD_BLDG_OBJ==1:
print 'not yet ready'
bldg_=mt.loadObjectBinaryFast(PROC_OUT_DIR+'gsbc_bldg_obj.bin')
else:
bldg_dict={}
for bldg_load_key in gsbc_bgid_dict.keys():
print 'Building for ',bldg_load_key, '....'
try:
bldg_tag='gsbc_'+bldg_load_key
bldg_load_out=mt.loadObjectBinaryFast(dict_dir+bldg_load_key+'_out.bin')
except:
print 'not found, skip....'
pass
mt.saveObjectBinaryFast(bldg_load_out['data_dict'],dict_dir+'data_dict.bin')
if 'avgdata_dict' in bldg_load_out.keys():
mt.saveObjectBinaryFast(bldg_load_out['avgdata_dict'],dict_dir+'avgdata_dict.bin')
if 'diffdata_dict' in bldg_load_out.keys():
mt.saveObjectBinaryFast(bldg_load_out['avgdata_dict'],dict_dir+'diffdata_dict.bin')
pname_key= pwr_key
bldg_dict.update({bldg_tag:create_bldg_obj(dict_dir,bldg_tag,pname_key)})
bldg_=obj(bldg_dict)
#cmd_str='bldg_.'+bldg_tag+'.data_out=obj(bldg_load_out)'
#exec(cmd_str)
cmd_str='bldg_obj=bldg_.'+bldg_tag
exec(cmd_str)
anal_out={}
if 'avgdata_dict' in bldg_load_out.keys():
anal_out.update({'avg':bn_prob_analysis(bldg_obj,sig_tag_='avg')})
if 'diffdata_dict' in bldg_load_out.keys():
anal_out.update({'diff':bn_prob_analysis(bldg_obj,sig_tag_='diff')})
cmd_str='bldg_.'+bldg_tag+'.anal_out=obj(anal_out)'
exec(cmd_str)
cmd_str='bldg_.'+'convert_name=convert_gsbc_name'
exec(cmd_str)
mt.saveObjectBinaryFast(bldg_ ,PROC_OUT_DIR+'gsbc_bldg_obj.bin')
mt.saveObjectBinaryFast('LOAD_BLDG_OBJ' ,PROC_OUT_DIR+'gsbc_bldg_obj_is_done.txt')
#######################################################################################
# Analysis For GSBC
#######################################################################################
# Analysis of BN network result
BN_ANAL=1
if BN_ANAL==1:
# Plotting individual LHs
PLOTTING_LH=0
if PLOTTING_LH==1:
plotting_bldg_lh(bldg_,attr_class='sensor',num_picks=30)
plotting_bldg_lh(bldg_,attr_class='time',num_picks=30)
plotting_bldg_lh(bldg_,attr_class='weather',num_picks=30)
PLOTTING_BN=1
if PLOTTING_BN==1:
plotting_bldg_bn(bldg_)
More_BN_ANAL=0
if More_BN_ANAL==1:
#######################################################################################
# Analysis For GSBC
#######################################################################################
#bldg_obj=bldg_.GSBC_main_bldg_power_machine_room
bldg_obj=bldg_.GSBC_main_bldg_power_machine_room
bldg_.GSBC_main_bldg_power_machine_room.anal_out=bn_prob_analysis(bldg_obj,sig_tag_='avg')
bldg_obj=bldg_.GSBC_main_bldg_1
bldg_.GSBC_main_bldg_1.anal_out=bn_prob_analysis(bldg_obj,sig_tag_='avg')
import pdb;pdb.set_trace()
#--------------------------------------------------------------------------
# Analysis Display
#--------------------------------------------------------------------------
# Data set 1 - GSBC_main_bldg_power_machine_room
p_name_sets_1=bldg_.GSBC_main_bldg_power_machine_room.anal_out.__dict__.keys()
bn_out_sets_1=bldg_.GSBC_main_bldg_power_machine_room.anal_out.__dict__
# Data set 2 - GSBC_main_bldg_1
p_name_sets_2=bldg_.GSBC_main_bldg_1.anal_out.__dict__.keys()
bn_out_sets_2=bldg_.GSBC_main_bldg_1.anal_out.__dict__
# Data set 2 Analysis
print 'List power meters for analysis'
print '------------------------------------'
pprint.pprint(np.array([p_name_sets_1,convert_gsbc_name(p_name_sets_1)]).T)
print '------------------------------------'
p_name=p_name_sets_1[3]
bn_out=bn_out_sets_1[p_name]
fig_name='BN for Sensors '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name,figsize=(30.0,30.0))
col_name=[str(np.array([[lab1],[remove_dot(lab2)]])) \
for lab1,lab2 in zip(bn_out.s_labels, convert_gsbc_name(bn_out.s_labels))]
rbn.nx_plot(bn_out.s_hc,col_name,graph_layout='spring',node_text_size=15)
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig_name='BN for Time '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.t_hc,convert_gsbc_name(bn_out.t_labels),graph_layout='spring',node_text_size=12)
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig_name='BN for Weather '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.w_hc,convert_gsbc_name(bn_out.w_labels),graph_layout='spring',node_text_size=12)
png_name=fig_name+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig_name='BN for Sensor-Time-Weather '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.all_hc,convert_gsbc_name(bn_out.all_labels),graph_layout='spring',node_text_size=20)
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig_name='BN PEAK LH Analysis for Sensor-Time-Weather '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name, figsize=(30.0,30.0))
subplot(2,1,1)
plot(bn_out.all_cause_symbol_xtick,bn_out.high_peak_prob,'-^')
plot(bn_out.all_cause_symbol_xtick,bn_out.low_peak_prob,'-v')
plt.ylabel('Likelihood',fontsize='large')
plt.xticks(bn_out.all_cause_symbol_xtick,bn_out.all_cause_symbol_xlabel,rotation=270, fontsize=10)
plt.tick_params(labelsize='large')
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.tick_params(labelsize='large')
plt.grid();plt.ylim([-0.05,1.05])
plt.title('Likelihood of '+ str(remove_dot(convert_gsbc_name(p_name)))+\
' given '+'\n'+str(remove_dot(convert_gsbc_name(bn_out.all_cause_label))))
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# Compare with the raw data
#-------------------------------------------
start_t=datetime.datetime(2013, 8, 9, 0, 0, 0)
end_t=datetime.datetime(2013, 8, 13, 0, 0, 0)
data_x=get_data_set([label_[2:] for label_ in bn_out.all_cause_label]+[p_name[2:]],start_t,end_t)
png_namex=plot_data_x(data_x,stype='raw',smark='-^')
png_namex=plot_data_x(data_x,stype='diff',smark='-^')
name_list_out=[[p_name]+bn_out.all_cause_label,convert_gsbc_name([p_name]+bn_out.all_cause_label)]
pprint.pprint(np.array(name_list_out).T)
pprint.pprint(name_list_out)
start_t=datetime.datetime(2013, 7, 1, 0, 0, 0)
end_t=datetime.datetime(2013, 12, 31, 0, 0, 0)
data_x=get_data_set([label_[2:] for label_ in bn_out.s_labels],start_t,end_t)
png_namex=plot_data_x(data_x,stype='raw',smark='-^',fontsize='small',xpos=0.00)
png_namex=plot_data_x(data_x,stype='diff',smark='-^')
"""
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
print '----------------------------------------'
print 'Likelihoods '
print '----------------------------------------'
print cause_label+['Low Peak','High Peak']
print '----------------------------------------'
print np.vstack((np.int0(peak_state).T,np.int0(100*lowpeak_prob).T,np.int0(100*peak_prob).T)).T
print '----------------------------------------'
s_val_set=set(peak_state[:,0])
m_val_set=set(peak_state[:,1])
Z_peak=np.ones((len(s_val_set),len(m_val_set)))*np.inf
for i,s_val in enumerate(s_val_set):
for j,m_val in enumerate(m_val_set):
idx=np.nonzero((peak_state[:,0]==s_val)&(peak_state[:,1]==m_val))[0][0]
Z_peak[i,j]=peak_prob[idx]
s_val_set=set(lowpeak_state[:,0])
m_val_set=set(lowpeak_state[:,1])
Z_lowpeak=np.ones((len(s_val_set),len(m_val_set)))*np.inf
for i,s_val in enumerate(s_val_set):
for j,m_val in enumerate(m_val_set):
idx=np.nonzero((lowpeak_state[:,0]==s_val)&(lowpeak_state[:,1]==m_val))[0][0]
Z_lowpeak[i,j]=lowpeak_prob[idx]
Z_lowpeak=lowpeak_prob.reshape((len(s_val_set),len(m_val_set)))
Z_peak=peak_prob.reshape((len(s_val_set),len(m_val_set)))
fig1=figure()
im = plt.imshow(Z_peak, cmap='hot',vmin=0, vmax=1,aspect='auto')
plt.colorbar(im, orientation='horizontal')
plt.xticks(monthDict.keys(),monthDict.values(),fontsize='large')
plt.yticks(range(len(s_val_set)),list(s_val_set),fontsize='large')
plt.xlabel(cause_label[1],fontsize='large')
plt.ylabel(cause_label[0],fontsize='large')
plt.title('Likelihood of High-Peak')
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig1.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig2=figure()
im = plt.imshow(Z_lowpeak, cmap='hot',vmin=0, vmax=1,aspect='auto')
plt.colorbar(im, orientation='horizontal')
plt.xticks(monthDict.keys(),monthDict.values(),fontsize='large')
plt.yticks(range(len(s_val_set)),list(s_val_set),fontsize='large')
plt.xlabel(cause_label[1],fontsize='large')
plt.ylabel(cause_label[0],fontsize='large')
plt.title('Likelihood of Low-Peak')
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig2.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
"""
print '**************************** End of Program ****************************'
| gpl-2.0 |
gdiminin/HaSAPPy | HaSAPPy/IIDefinition.py | 1 | 6622 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 08:33:32 2016
@author: GDM
"""
##### Importing modules #####
import HTSeq
import cPickle as pickle
import pandas as pd
from collections import Counter
import os
from HaSAPPy.HaSAPPY_time import *
import itertools
############################################################
#1) Defining the Class Library
class Library():
"""...defintion..."""
def __init__(self,exp,input_):
self.name = exp
self.input = input_
self.informations = {'Total':'nd','Aligned':'nd','Unique_reads':'nd','Insertions':'nd','I.I.':'nd'}
self.raw = pd.Series()
#2) Function library_gneration
def library_generation (exp, Info):
#Generation of the Class Library specific for "exp"
library = Library(exp,Info.IIDefinition.input_files[Info.IIDefinition.lib_names.index(exp)])
print library.name
string ='\n\n***\tInedependent Insertions (I.I.) definition\t***\n\n- Input file: %s\n- Pair ends: %s\n- Alignment cutoff: %s\n- Remove duplicates: %s\n- Insertion cutoff: %i' %(library.input, Info.General.pair_ends,Info.IIDefinition.fidelity_limit,Info.IIDefinition.reads_duplicate,Info.IIDefinition.ins_iv)
Info.print_save(exp,string)
startTime = getCurrTime()
string = '\tSelection of Insertions (I.): %s' %startTime
Info.print_save(exp,string)
aligned_file = HTSeq.SAM_Reader(library.input)
#aligned_file = [seq for seq in itertools.islice(aligned_file,100000)]
insertions_counts = Counter()
count_aligned = 0
count_GoodQualityAlignment = 0
count_total = 0
for algnt in aligned_file:
if algnt.aligned:
if algnt.iv.chrom.startswith('chr'):
chromosome_style = ''
else:
chromosome_style = 'chr'
break
if Info.General.pair_ends: #Pair ends library
for bundle in HTSeq.pair_SAM_alignments(aligned_file, bundle=True):
if len(bundle) != 1:
continue # Skip multiple alignments
first_almnt, second_almnt = bundle[0] # extract pair
if first_almnt.aligned and second_almnt.aligned:
if first_almnt.aQual >= Info.IIDefinition.fidelity_limit:
ins = HTSeq.GenomicPosition('%s%s' %(chromosome_style,str(first_almnt.iv.chrom)),first_almnt.iv.start_d,first_almnt.iv.strand)
insertions_counts[ins] +=1
count_GoodQualityAlignment +=1
count_aligned +=1
count_total +=1
else: #Single ends library
for algnt in aligned_file:
if algnt.aligned:
if algnt.aQual >= Info.IIDefinition.fidelity_limit:
ins = HTSeq.GenomicPosition('%s%s' %(chromosome_style,str(algnt.iv.chrom)),algnt.iv.start_d,algnt.iv.strand)
insertions_counts[ins] +=1
count_GoodQualityAlignment +=1
count_aligned +=1
count_total +=1
del aligned_file
string = '\t-Total reads: %i\n\t-Aligned reads: %i\n\t-Aligned Reads trusted: %i\n\t-Insertions identified: %i' %(count_total,count_aligned,count_GoodQualityAlignment, len(insertions_counts.keys()))
Info.print_save(exp,string)
string = '\tRunTime: %s' % computeRunTime(startTime, getCurrTime())
Info.print_save(exp,string)
### To collapse insertions in insertion array that are in the same interval (4bps)
startTime = getCurrTime()
string = 'Define Independent Insertions\n\tStarted: %s' %startTime
Info.print_save(exp,string)
insertions_series = pd.Series(insertions_counts, index = insertions_counts.keys())
del insertions_counts
insertions_order = insertions_series.copy()
insertions_order.sort_values(ascending = False)
insertions_genomicarray = HTSeq.GenomicArray("auto",stranded = True)
count_indipendent_insertions = 0
count_indipendent_insertions_aborted = 0
insertions_tuple = zip(insertions_order.index,insertions_order.values)
del insertions_order
del insertions_series
for ins in insertions_tuple:
insertions_genomicarray[ins[0]] = ins[1]
insertions_collapsed = {}
for n in insertions_tuple:
i = n[0]
if insertions_genomicarray[i]>0:
counted = 0
iv_i = HTSeq.GenomicInterval(i.chrom,i.start-2,i.start+2,i.strand)
for i_2 in iv_i.xrange(step=1):
try:
counted += insertions_genomicarray[i_2]
insertions_genomicarray[i_2] = 0
except IndexError:
string = "\t!!!Skipped from analysis: %s" % i_2
Info.print_save(exp,string)
continue
if counted >= Info.IIDefinition.ins_iv:
if insertions_collapsed.has_key(i):
insertions_collapsed[i] += counted
else:
insertions_collapsed[i] = counted
count_indipendent_insertions +=1
else:
count_indipendent_insertions_aborted +=1
string = '\t-Total insertions: %i\n\t-Independent Insertions (I.I.): %i' % ((count_indipendent_insertions + count_indipendent_insertions_aborted), count_indipendent_insertions)
Info.print_save(exp,string)
string = '\tRunTime: %s' % computeRunTime(startTime, getCurrTime())
Info.print_save(exp,string)
###Storing data in library class that will be returned modifed as result of the function
library.informations['Total'] = count_total
library.informations['Aligned'] = count_aligned
library.informations['Insertions'] = count_indipendent_insertions
library.informations['II'] = count_indipendent_insertions
if Info.IIDefinition.reads_duplicate:
library.informations['Unique_reads'] = count_reads
library.raw = pd.Series(insertions_collapsed, index = insertions_collapsed.keys())
#####Store the class!!!!!#####
location = os.path.join(Info.General.storing_loc,exp + '_' +Info.General.date,'raw',exp + '_IIRawdata.pkl')
with open (location,'wb') as saving:
pickle.dump(library,saving)
#####END the program#####
string = 'Informations stored in %s\n***\tEND of Inedependent Insertions (I.I.) definition\t***' % location
Info.print_save(exp,string)
return library
def load(Info):
for exp in Info.IIDefinition.lib_names:
library_generation(exp,Info)
| mit |
clemkoa/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 30 | 10322 | """Unsupervised evaluation metrics."""
# Authors: Robert Layton <[email protected]>
# Arnaud Fouchet <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ...utils import check_X_y
from ..pairwise import pairwise_distances
from ...preprocessing import LabelEncoder
def check_number_of_labels(n_labels, n_samples):
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance or None, optional (default=None)
The generator used to randomly select a subset of samples. If int,
random_state is the seed used by the random number generator; If
RandomState instance, random_state is the random number generator; If
None, the random number generator is the RandomState instance used by
`np.random`. Used when ``sample_size is not None``.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
check_number_of_labels(len(le.classes_), X.shape[0])
distances = pairwise_distances(X, metric=metric, **kwds)
unique_labels = le.classes_
n_samples_per_label = np.bincount(labels, minlength=len(unique_labels))
# For sample i, store the mean distance of the cluster to which
# it belongs in intra_clust_dists[i]
intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype)
# For sample i, store the mean distance of the second closest
# cluster in inter_clust_dists[i]
inter_clust_dists = np.inf + intra_clust_dists
for curr_label in range(len(unique_labels)):
# Find inter_clust_dist for all samples belonging to the same
# label.
mask = labels == curr_label
current_distances = distances[mask]
# Leave out current sample.
n_samples_curr_lab = n_samples_per_label[curr_label] - 1
if n_samples_curr_lab != 0:
intra_clust_dists[mask] = np.sum(
current_distances[:, mask], axis=1) / n_samples_curr_lab
# Now iterate over all other labels, finding the mean
# cluster distance that is closest to every sample.
for other_label in range(len(unique_labels)):
if other_label != curr_label:
other_mask = labels == other_label
other_distances = np.mean(
current_distances[:, other_mask], axis=1)
inter_clust_dists[mask] = np.minimum(
inter_clust_dists[mask], other_distances)
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# score 0 for clusters of size 1, according to the paper
sil_samples[n_samples_per_label.take(labels) == 1] = 0
return sil_samples
def calinski_harabaz_score(X, labels):
"""Compute the Calinski and Harabaz score.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabaz_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabaz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0., 0.
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (1. if intra_disp == 0. else
extra_disp * (n_samples - n_labels) /
(intra_disp * (n_labels - 1.)))
| bsd-3-clause |
sunny94/temp | sympy/physics/quantum/tests/test_circuitplot.py | 24 | 2071 | from sympy.physics.quantum.circuitplot import labeller, render_label, Mz, CreateOneQubitGate,\
CreateCGate
from sympy.physics.quantum.gate import CNOT, H, X, Z, SWAP, CGate, S, T
from sympy.external import import_module
from sympy.utilities.pytest import skip
mpl = import_module('matplotlib')
def test_render_label():
assert render_label('q0') == r'$|q0\rangle$'
assert render_label('q0', {'q0': '0'}) == r'$|q0\rangle=|0\rangle$'
def test_Mz():
assert str(Mz(0)) == 'Mz(0)'
def test_create1():
Qgate = CreateOneQubitGate('Q')
assert str(Qgate(0)) == 'Q(0)'
def test_createc():
Qgate = CreateCGate('Q')
assert str(Qgate([1],0)) == 'C((1),Q(0))'
def test_labeller():
"""Test the labeller utility"""
assert labeller(2) == ['q_1', 'q_0']
assert labeller(3,'j') == ['j_2', 'j_1', 'j_0']
def test_cnot():
"""Test a simple cnot circuit. Right now this only makes sure the code doesn't
raise an exception, and some simple properties
"""
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(CNOT(1,0),2,labels=labeller(2))
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == ['q_1', 'q_0']
c = CircuitPlot(CNOT(1,0),2)
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == []
def test_ex1():
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(CNOT(1,0)*H(1),2,labels=labeller(2))
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == ['q_1', 'q_0']
def test_ex4():
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(SWAP(0,2)*H(0)* CGate((0,),S(1)) *H(1)*CGate((0,),T(2))\
*CGate((1,),S(2))*H(2),3,labels=labeller(3,'j'))
assert c.ngates == 7
assert c.nqubits == 3
assert c.labels == ['j_2', 'j_1', 'j_0']
| bsd-3-clause |
0asa/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
aetilley/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam | PyFoam/Applications/ConvertToCSV.py | 1 | 13416 | """
Application-class that implements pyFoamConvertToCSV.py
"""
from optparse import OptionGroup
from .PyFoamApplication import PyFoamApplication
from .CommonReadWriteCSV import CommonReadWriteCSV
from PyFoam.Basics.SpreadsheetData import SpreadsheetData
from PyFoam.ThirdParty.six import print_
from os import path,listdir
from copy import deepcopy
from glob import glob
hasXlsxWriter=False
try:
import xlsxwriter
hasXlsxWriter=True
except ImportError:
pass
class ConvertToCSV(PyFoamApplication,
CommonReadWriteCSV):
def __init__(self,
args=None,
**kwargs):
description="""\
Takes a plain file with column-oriented data and converts it to a
csv-file. If more than one file are specified, they are joined
according to the first column.
Note: the first file determines the resolution of the time-axis
"""
if not hasXlsxWriter:
description+="""
Warning: The module 'xlsxwriter' is not installed. Therefor no addition of formulas
to excel files is possible"""
CommonReadWriteCSV.__init__(self)
PyFoamApplication.__init__(self,
args=args,
description=description,
usage="%prog <source> ... <dest.csv>",
interspersed=True,
changeVersion=False,
nr=2,
exactNr=False,
**kwargs)
def addOptions(self):
CommonReadWriteCSV.addOptions(self)
inp=OptionGroup(self.parser,
"Input",
"Manipulating the input data")
self.parser.add_option_group(inp)
inp.add_option("--strip-characters",
action="store",
dest="stripCharacters",
default=None,
help="A string with the characters that should be stripped from the input file before it is processed. For instance '()'")
inp.add_option("--replace-first-line",
action="store",
dest="replaceFirstLine",
default=None,
help="Replace the first line of the input with this string")
how=OptionGroup(self.parser,
"How",
"How the data should be joined")
self.parser.add_option_group(how)
how.add_option("--force",
action="store_true",
dest="force",
default=False,
help="Overwrite the destination csv if it already exists")
how.add_option("--extend-data",
action="store_true",
dest="extendData",
default=False,
help="Extend the time range if other files exceed the range of the first file")
how.add_option("--names-from-filename",
action="store_true",
dest="namesFromFilename",
default=False,
help="Read the value names from the file-name (assuming that names are split by _ and the names are in the tail - front is the general filename)")
how.add_option("--add-times",
action="store_true",
dest="addTimes",
default=False,
help="Actually add the times from the second file instead of interpolating")
how.add_option("--interpolate-new-times",
action="store_true",
dest="interpolateNewTime",
default=False,
help="Interpolate data if new times are added")
how.add_option("--new-data-no-interpolate",
action="store_false",
dest="newDataInterpolate",
default=True,
help="Don't interpolate new data fields to the existing times")
excel=OptionGroup(self.parser,
"Excel",
"Stuff for excel file output")
self.parser.add_option_group(excel)
excel.add_option("--add-sheets",
action="store_true",
dest="addSheets",
default=False,
help="Add the input data in unmodified form as additional sheets to the excel file")
if hasXlsxWriter:
excel.add_option("--add-formula-to-sheet",
action="append",
dest="addFormulas",
default=[],
help="Add columns with formulas calculated from other data. This only works when writing XLSX-files. The formula is 'nane:::ExcelFormula'. In the ExcelFormula the written column names can be used. These have to be enclosed in '' (this is necessary to allow names with spaces and digits). Can be used more than once")
def run(self):
dest=self.parser.getArgs()[-1]
if path.exists(dest) and not self.opts.force:
self.error("CSV-file",dest,"exists already. Use --force to overwrite")
sources=[]
for s in self.parser.getArgs()[0:-1]:
if s.find("/*lastTime*/")>=0:
front,back=s.split("/*lastTime*/",1)
for d in glob(front):
lastTime=None
for f in listdir(d):
if path.exists(path.join(d,f,back)):
try:
t=float(f)
if lastTime:
if t>float(lastTime):
lastTime=f
else:
lastTime=f
except ValueError:
pass
if lastTime:
sources.append(path.join(d,lastTime,back))
else:
sources.append(s)
diffs=[None]
if len(sources)>1:
# find differing parts
commonStart=1e4
commonEnd=1e4
for s in sources[1:]:
a=path.abspath(sources[0])
b=path.abspath(s)
start=0
end=0
for i in range(min(len(a),len(b))):
start=i
if a[i]!=b[i]:
break
commonStart=min(commonStart,start)
for i in range(min(len(a),len(b))):
end=i
if a[-(i+1)]!=b[-(i+1)]:
break
commonEnd=min(commonEnd,end)
diffs=[]
for s in sources:
b=path.abspath(s)
if commonEnd>0:
diffs.append(b[commonStart:-(commonEnd)])
else:
diffs.append(b[commonStart:])
names=self.names
title=path.splitext(path.basename(sources[0]))[0]
if self.opts.namesFromFilename:
if not names is None:
self.error("Names already specified as",names,". Can't calc from filename")
names=path.splitext(path.basename(sources[0]))[0].split("_")
title=None
data=SpreadsheetData(names=names,
timeName=self.opts.time,
validData=self.opts.columns,
skip_header=self.opts.skipHeaderLines,
stripCharacters=self.opts.stripCharacters,
replaceFirstLine=self.opts.replaceFirstLine,
validMatchRegexp=self.opts.columnsRegexp,
title=title,
**self.dataFormatOptions(sources[0]))
rawData=[deepcopy(data)]
self.printColumns(sources[0],data)
self.recalcColumns(data)
self.rawAddColumns(data)
if self.opts.time==None:
self.opts.time=data.timeName()
if not diffs[0] is None:
data.rename(lambda c:diffs[0]+" "+c)
for i,s in enumerate(sources[1:]):
names=None
title=path.splitext(path.basename(s))[0]
if self.opts.namesFromFilename:
names=title.split("_")
title=None
sData=SpreadsheetData(names=names,
skip_header=self.opts.skipHeaderLines,
stripCharacters=self.opts.stripCharacters,
replaceFirstLine=self.opts.replaceFirstLine,
timeName=self.opts.time,
validData=self.opts.columns,
validMatchRegexp=self.opts.columnsRegexp,
title=title,
**self.dataFormatOptions(s))
rawData.append(sData)
self.printColumns(s,sData)
self.recalcColumns(sData)
self.rawAddColumns(sData)
if self.opts.addTimes:
data.addTimes(time=self.opts.time,
times=sData.data[self.opts.time],
interpolate=self.opts.interpolateNewTime)
for n in sData.names():
if n!=self.opts.time and (self.opts.columns==[] or data.validName(n,self.opts.columns,True)):
d=data.resample(sData,
n,
time=self.opts.time,
extendData=self.opts.extendData,
noInterpolation=not self.opts.newDataInterpolate)
data.append(diffs[i+1]+" "+n,d)
self.joinedAddColumns(data)
data.rename(self.processName,renameTime=True)
data.rename(lambda c:c.strip())
data.eliminatedNames=None
if len(sources)>1:
self.printColumns("written data",data)
if self.opts.automaticFormat:
if self.getDataFormat(dest)=="excel":
self.opts.writeExcel=True
if self.opts.writeExcel:
from pandas import ExcelWriter
with ExcelWriter(dest) as writer:
data.getData().to_excel(writer,sheet_name="Data")
if self.opts.addSheets:
for n,d in enumerate(rawData):
d.getData().to_excel(writer,
sheet_name="Original file %d" % n)
if hasXlsxWriter:
if len(self.opts.addFormulas)>0:
from xlsxwriter.utility import xl_rowcol_to_cell as rowCol2Cell
rows=len(data.getData())
sheet=writer.sheets["Data"]
cols={}
for i,n in enumerate(data.names()):
cols[n]=i
newC=i
for f in self.opts.addFormulas:
newC+=1
name,formula=f.split(":::")
sheet.write(0,newC,name)
cols[name]=newC
splitted=[]
ind=0
while ind>=0:
if ind>=len(formula):
break
nInd=formula.find("'",ind)
if nInd<0:
splitted.append(formula[ind:])
ind=nInd
elif nInd!=ind:
splitted.append(formula[ind:nInd])
ind=nInd
else:
nInd=formula.find("'",ind+1)
if nInd<0:
self.error("No closing ' in formula",formula)
name=formula[ind+1:nInd]
if name not in cols:
self.error("Name",name,"not in column names",cols.keys())
splitted.append(cols[name])
ind=nInd+1
for row in range(rows):
cellFormula="="
for s in splitted:
if type(s)==int:
cellFormula+=rowCol2Cell(row+1,s)
else:
cellFormula+=s
sheet.write(row+1,newC,cellFormula)
print_("Formulas written. In LibreOffice recalculate with Ctrl+Shift+F9")
else:
data.writeCSV(dest,
delimiter=self.opts.delimiter)
# Should work with Python3 and Python2
| gpl-2.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/spines.py | 8 | 19263 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import matplotlib
import matplotlib.artist as martist
from matplotlib.artist import allow_rasterization
from matplotlib import docstring
import matplotlib.transforms as mtransforms
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.path as mpath
import matplotlib.cbook as cbook
import numpy as np
import warnings
rcParams = matplotlib.rcParams
class Spine(mpatches.Patch):
"""an axis spine -- the line noting the data area boundaries
Spines are the lines connecting the axis tick marks and noting the
boundaries of the data area. They can be placed at arbitrary
positions. See function:`~matplotlib.spines.Spine.set_position`
for more information.
The default position is ``('outward',0)``.
Spines are subclasses of class:`~matplotlib.patches.Patch`, and
inherit much of their behavior.
Spines draw a line or a circle, depending if
function:`~matplotlib.spines.Spine.set_patch_line` or
function:`~matplotlib.spines.Spine.set_patch_circle` has been
called. Line-like is the default.
"""
def __str__(self):
return "Spine"
@docstring.dedent_interpd
def __init__(self, axes, spine_type, path, **kwargs):
"""
- *axes* : the Axes instance containing the spine
- *spine_type* : a string specifying the spine type
- *path* : the path instance used to draw the spine
Valid kwargs are:
%(Patch)s
"""
super(Spine, self).__init__(**kwargs)
self.axes = axes
self.set_figure(self.axes.figure)
self.spine_type = spine_type
self.set_facecolor('none')
self.set_edgecolor(rcParams['axes.edgecolor'])
self.set_linewidth(rcParams['axes.linewidth'])
self.set_capstyle('projecting')
self.axis = None
self.set_zorder(2.5)
self.set_transform(self.axes.transData) # default transform
self._bounds = None # default bounds
self._smart_bounds = False
# Defer initial position determination. (Not much support for
# non-rectangular axes is currently implemented, and this lets
# them pass through the spines machinery without errors.)
self._position = None
if not isinstance(path, matplotlib.path.Path):
msg = "'path' must be an instance of 'matplotlib.path.Path'"
raise ValueError(msg)
self._path = path
# To support drawing both linear and circular spines, this
# class implements Patch behavior two ways. If
# self._patch_type == 'line', behave like a mpatches.PathPatch
# instance. If self._patch_type == 'circle', behave like a
# mpatches.Ellipse instance.
self._patch_type = 'line'
# Behavior copied from mpatches.Ellipse:
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = mtransforms.IdentityTransform()
def set_smart_bounds(self, value):
"""set the spine and associated axis to have smart bounds"""
self._smart_bounds = value
# also set the axis if possible
if self.spine_type in ('left', 'right'):
self.axes.yaxis.set_smart_bounds(value)
elif self.spine_type in ('top', 'bottom'):
self.axes.xaxis.set_smart_bounds(value)
self.stale = True
def get_smart_bounds(self):
"""get whether the spine has smart bounds"""
return self._smart_bounds
def set_patch_circle(self, center, radius):
"""set the spine to be circular"""
self._patch_type = 'circle'
self._center = center
self._width = radius * 2
self._height = radius * 2
self._angle = 0
# circle drawn on axes transform
self.set_transform(self.axes.transAxes)
self.stale = True
def set_patch_line(self):
"""set the spine to be linear"""
self._patch_type = 'line'
self.stale = True
# Behavior copied from mpatches.Ellipse:
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
assert self._patch_type == 'circle'
center = (self.convert_xunits(self._center[0]),
self.convert_yunits(self._center[1]))
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
self._patch_transform = mtransforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self._angle) \
.translate(*center)
def get_patch_transform(self):
if self._patch_type == 'circle':
self._recompute_transform()
return self._patch_transform
else:
return super(Spine, self).get_patch_transform()
def get_path(self):
return self._path
def _ensure_position_is_set(self):
if self._position is None:
# default position
self._position = ('outward', 0.0) # in points
self.set_position(self._position)
def register_axis(self, axis):
"""register an axis
An axis should be registered with its corresponding spine from
the Axes instance. This allows the spine to clear any axis
properties when needed.
"""
self.axis = axis
if self.axis is not None:
self.axis.cla()
self.stale = True
def cla(self):
"""Clear the current spine"""
self._position = None # clear position
if self.axis is not None:
self.axis.cla()
def is_frame_like(self):
"""return True if directly on axes frame
This is useful for determining if a spine is the edge of an
old style MPL plot. If so, this function will return True.
"""
self._ensure_position_is_set()
position = self._position
if cbook.is_string_like(position):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
if len(position) != 2:
raise ValueError("position should be 2-tuple")
position_type, amount = position
if position_type == 'outward' and amount == 0:
return True
else:
return False
def _adjust_location(self):
"""automatically set spine bounds to the view interval"""
if self.spine_type == 'circle':
return
if self._bounds is None:
if self.spine_type in ('left', 'right'):
low, high = self.axes.viewLim.intervaly
elif self.spine_type in ('top', 'bottom'):
low, high = self.axes.viewLim.intervalx
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
if self._smart_bounds:
# attempt to set bounds in sophisticated way
if low > high:
# handle inverted limits
low, high = high, low
viewlim_low = low
viewlim_high = high
del low, high
if self.spine_type in ('left', 'right'):
datalim_low, datalim_high = self.axes.dataLim.intervaly
ticks = self.axes.get_yticks()
elif self.spine_type in ('top', 'bottom'):
datalim_low, datalim_high = self.axes.dataLim.intervalx
ticks = self.axes.get_xticks()
# handle inverted limits
ticks = list(ticks)
ticks.sort()
ticks = np.array(ticks)
if datalim_low > datalim_high:
datalim_low, datalim_high = datalim_high, datalim_low
if datalim_low < viewlim_low:
# Data extends past view. Clip line to view.
low = viewlim_low
else:
# Data ends before view ends.
cond = (ticks <= datalim_low) & (ticks >= viewlim_low)
tickvals = ticks[cond]
if len(tickvals):
# A tick is less than or equal to lowest data point.
low = tickvals[-1]
else:
# No tick is available
low = datalim_low
low = max(low, viewlim_low)
if datalim_high > viewlim_high:
# Data extends past view. Clip line to view.
high = viewlim_high
else:
# Data ends before view ends.
cond = (ticks >= datalim_high) & (ticks <= viewlim_high)
tickvals = ticks[cond]
if len(tickvals):
# A tick is greater than or equal to highest data
# point.
high = tickvals[0]
else:
# No tick is available
high = datalim_high
high = min(high, viewlim_high)
else:
low, high = self._bounds
v1 = self._path.vertices
assert v1.shape == (2, 2), 'unexpected vertices shape'
if self.spine_type in ['left', 'right']:
v1[0, 1] = low
v1[1, 1] = high
elif self.spine_type in ['bottom', 'top']:
v1[0, 0] = low
v1[1, 0] = high
else:
raise ValueError('unable to set bounds for spine "%s"' %
self.spine_type)
@allow_rasterization
def draw(self, renderer):
self._adjust_location()
ret = super(Spine, self).draw(renderer)
self.stale = False
return ret
def _calc_offset_transform(self):
"""calculate the offset transform performed by the spine"""
self._ensure_position_is_set()
position = self._position
if cbook.is_string_like(position):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
assert len(position) == 2, "position should be 2-tuple"
position_type, amount = position
assert position_type in ('axes', 'outward', 'data')
if position_type == 'outward':
if amount == 0:
# short circuit commonest case
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif self.spine_type in ['left', 'right', 'top', 'bottom']:
offset_vec = {'left': (-1, 0),
'right': (1, 0),
'bottom': (0, -1),
'top': (0, 1),
}[self.spine_type]
# calculate x and y offset in dots
offset_x = amount * offset_vec[0] / 72.0
offset_y = amount * offset_vec[1] / 72.0
self._spine_transform = ('post',
mtransforms.ScaledTranslation(
offset_x,
offset_y,
self.figure.dpi_scale_trans))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif position_type == 'axes':
if self.spine_type in ('left', 'right'):
self._spine_transform = ('pre',
mtransforms.Affine2D.from_values(
# keep y unchanged, fix x at
# amount
0, 0, 0, 1, amount, 0))
elif self.spine_type in ('bottom', 'top'):
self._spine_transform = ('pre',
mtransforms.Affine2D.from_values(
# keep x unchanged, fix y at
# amount
1, 0, 0, 0, 0, amount))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
elif position_type == 'data':
if self.spine_type in ('right', 'top'):
# The right and top spines have a default position of 1 in
# axes coordinates. When specifying the position in data
# coordinates, we need to calculate the position relative to 0.
amount -= 1
if self.spine_type in ('left', 'right'):
self._spine_transform = ('data',
mtransforms.Affine2D().translate(
amount, 0))
elif self.spine_type in ('bottom', 'top'):
self._spine_transform = ('data',
mtransforms.Affine2D().translate(
0, amount))
else:
warnings.warn('unknown spine type "%s": no spine '
'offset performed' % self.spine_type)
self._spine_transform = ('identity',
mtransforms.IdentityTransform())
def set_position(self, position):
"""set the position of the spine
Spine position is specified by a 2 tuple of (position type,
amount). The position types are:
* 'outward' : place the spine out from the data area by the
specified number of points. (Negative values specify placing the
spine inward.)
* 'axes' : place the spine at the specified Axes coordinate (from
0.0-1.0).
* 'data' : place the spine at the specified data coordinate.
Additionally, shorthand notations define a special positions:
* 'center' -> ('axes',0.5)
* 'zero' -> ('data', 0.0)
"""
if position in ('center', 'zero'):
# special positions
pass
else:
if len(position) != 2:
raise ValueError("position should be 'center' or 2-tuple")
if position[0] not in ['outward', 'axes', 'data']:
msg = ("position[0] should be in [ 'outward' | 'axes' |"
" 'data' ]")
raise ValueError(msg)
self._position = position
self._calc_offset_transform()
self.set_transform(self.get_spine_transform())
if self.axis is not None:
self.axis.reset_ticks()
self.stale = True
def get_position(self):
"""get the spine position"""
self._ensure_position_is_set()
return self._position
def get_spine_transform(self):
"""get the spine transform"""
self._ensure_position_is_set()
what, how = self._spine_transform
if what == 'data':
# special case data based spine locations
data_xform = self.axes.transScale + \
(how + self.axes.transLimits + self.axes.transAxes)
if self.spine_type in ['left', 'right']:
result = mtransforms.blended_transform_factory(
data_xform, self.axes.transData)
elif self.spine_type in ['top', 'bottom']:
result = mtransforms.blended_transform_factory(
self.axes.transData, data_xform)
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
return result
if self.spine_type in ['left', 'right']:
base_transform = self.axes.get_yaxis_transform(which='grid')
elif self.spine_type in ['top', 'bottom']:
base_transform = self.axes.get_xaxis_transform(which='grid')
else:
raise ValueError('unknown spine spine_type: %s' %
self.spine_type)
if what == 'identity':
return base_transform
elif what == 'post':
return base_transform + how
elif what == 'pre':
return how + base_transform
else:
raise ValueError("unknown spine_transform type: %s" % what)
def set_bounds(self, low, high):
"""Set the bounds of the spine."""
if self.spine_type == 'circle':
raise ValueError(
'set_bounds() method incompatible with circular spines')
self._bounds = (low, high)
self.stale = True
def get_bounds(self):
"""Get the bounds of the spine."""
return self._bounds
@classmethod
def linear_spine(cls, axes, spine_type, **kwargs):
"""
(staticmethod) Returns a linear :class:`Spine`.
"""
# all values of 13 get replaced upon call to set_bounds()
if spine_type == 'left':
path = mpath.Path([(0.0, 13), (0.0, 13)])
elif spine_type == 'right':
path = mpath.Path([(1.0, 13), (1.0, 13)])
elif spine_type == 'bottom':
path = mpath.Path([(13, 0.0), (13, 0.0)])
elif spine_type == 'top':
path = mpath.Path([(13, 1.0), (13, 1.0)])
else:
raise ValueError('unable to make path for spine "%s"' % spine_type)
result = cls(axes, spine_type, path, **kwargs)
result.set_visible(rcParams['axes.spines.{0}'.format(spine_type)])
return result
@classmethod
def circular_spine(cls, axes, center, radius, **kwargs):
"""
(staticmethod) Returns a circular :class:`Spine`.
"""
path = mpath.Path.unit_circle()
spine_type = 'circle'
result = cls(axes, spine_type, path, **kwargs)
result.set_patch_circle(center, radius)
return result
def set_color(self, c):
"""
Set the edgecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
# The facecolor of a spine is always 'none' by default -- let
# the user change it manually if desired.
self.set_edgecolor(c)
self.stale = True
| mit |
juliusbierk/scikit-image | doc/examples/plot_glcm.py | 26 | 3307 | """
=====================
GLCM Texture Features
=====================
This example illustrates texture classification using texture
classification using grey level co-occurrence matrices (GLCMs).
A GLCM is a histogram of co-occurring greyscale values at a given
offset over an image.
In this example, samples of two different textures are extracted from
an image: grassy areas and sky areas. For each patch, a GLCM with
a horizontal offset of 5 is computed. Next, two features of the
GLCM matrices are computed: dissimilarity and correlation. These are
plotted to illustrate that the classes form clusters in feature space.
In a typical classification problem, the final step (not included in
this example) would be to train a classifier, such as logistic
regression, to label image patches from new images.
"""
import matplotlib.pyplot as plt
from skimage.feature import greycomatrix, greycoprops
from skimage import data
PATCH_SIZE = 21
# open the camera image
image = data.camera()
# select some patches from grassy areas of the image
grass_locations = [(474, 291), (440, 433), (466, 18), (462, 236)]
grass_patches = []
for loc in grass_locations:
grass_patches.append(image[loc[0]:loc[0] + PATCH_SIZE,
loc[1]:loc[1] + PATCH_SIZE])
# select some patches from sky areas of the image
sky_locations = [(54, 48), (21, 233), (90, 380), (195, 330)]
sky_patches = []
for loc in sky_locations:
sky_patches.append(image[loc[0]:loc[0] + PATCH_SIZE,
loc[1]:loc[1] + PATCH_SIZE])
# compute some GLCM properties each patch
xs = []
ys = []
for patch in (grass_patches + sky_patches):
glcm = greycomatrix(patch, [5], [0], 256, symmetric=True, normed=True)
xs.append(greycoprops(glcm, 'dissimilarity')[0, 0])
ys.append(greycoprops(glcm, 'correlation')[0, 0])
# create the figure
fig = plt.figure(figsize=(8, 8))
# display original image with locations of patches
ax = fig.add_subplot(3, 2, 1)
ax.imshow(image, cmap=plt.cm.gray, interpolation='nearest',
vmin=0, vmax=255)
for (y, x) in grass_locations:
ax.plot(x + PATCH_SIZE / 2, y + PATCH_SIZE / 2, 'gs')
for (y, x) in sky_locations:
ax.plot(x + PATCH_SIZE / 2, y + PATCH_SIZE / 2, 'bs')
ax.set_xlabel('Original Image')
ax.set_xticks([])
ax.set_yticks([])
ax.axis('image')
# for each patch, plot (dissimilarity, correlation)
ax = fig.add_subplot(3, 2, 2)
ax.plot(xs[:len(grass_patches)], ys[:len(grass_patches)], 'go',
label='Grass')
ax.plot(xs[len(grass_patches):], ys[len(grass_patches):], 'bo',
label='Sky')
ax.set_xlabel('GLCM Dissimilarity')
ax.set_ylabel('GLVM Correlation')
ax.legend()
# display the image patches
for i, patch in enumerate(grass_patches):
ax = fig.add_subplot(3, len(grass_patches), len(grass_patches)*1 + i + 1)
ax.imshow(patch, cmap=plt.cm.gray, interpolation='nearest',
vmin=0, vmax=255)
ax.set_xlabel('Grass %d' % (i + 1))
for i, patch in enumerate(sky_patches):
ax = fig.add_subplot(3, len(sky_patches), len(sky_patches)*2 + i + 1)
ax.imshow(patch, cmap=plt.cm.gray, interpolation='nearest',
vmin=0, vmax=255)
ax.set_xlabel('Sky %d' % (i + 1))
# display the patches and plot
fig.suptitle('Grey level co-occurrence matrix features', fontsize=14)
plt.show()
| bsd-3-clause |
devlinmr/contrib | mungegithub/issue-labeler/simple_app.py | 20 | 4662 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import simplejson
import logging
from logging.handlers import RotatingFileHandler
import numpy as np
from flask import Flask, request
from sklearn.feature_extraction import FeatureHasher
from sklearn.externals import joblib
from sklearn.linear_model import SGDClassifier
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
app = Flask(__name__)
#Parameters
team_fn= "./models/trained_teams_model.pkl"
component_fn= "./models/trained_components_model.pkl"
logFile = "/tmp/issue-labeler.log"
logSize = 1024*1024*100
numFeatures = 262144
myLoss = 'hinge'
myAlpha = .1
myPenalty = 'l2'
myHasher = FeatureHasher(input_type="string", n_features= numFeatures, non_negative=True)
myStemmer = PorterStemmer()
tokenizer = RegexpTokenizer(r'\w+')
try:
if not stopwords:
stop_fn = "./stopwords.txt"
with open(stop_fn, 'r') as f:
stopwords = set([word.strip() for word in f])
except:
#don't remove any stopwords
stopwords = []
@app.errorhandler(500)
def internal_error(exception):
return str(exception), 500
@app.route("/", methods = ["POST"])
def get_labels():
"""
The request should contain 2 form-urlencoded parameters
1) title : title of the issue
2) body: body of the issue
It returns a team/<label> and a component/<label>
"""
title = request.form.get('title', "")
body = request.form.get('body', "")
tokens = tokenize_stem_stop(" ".join([title, body]))
team_mod = joblib.load(team_fn)
comp_mod = joblib.load(component_fn)
vec = myHasher.transform([tokens])
tlabel = team_mod.predict(vec)[0]
clabel = comp_mod.predict(vec)[0]
return ",".join([tlabel, clabel])
def tokenize_stem_stop(inputString):
inputString = inputString.encode('utf-8')
curTitleBody = tokenizer.tokenize(inputString.decode('utf-8').lower())
return map(myStemmer.stem, filter(lambda x: x not in stopwords, curTitleBody))
@app.route("/update_models", methods = ["PUT"])
def update_model():
"""
data should contain three fields
titles: list of titles
bodies: list of bodies
labels: list of list of labels
"""
data = request.json
titles = data.get('titles')
bodies = data.get('bodies')
labels = data.get('labels')
tTokens = []
cTokens = []
team_labels = []
component_labels = []
for (title, body, label_list) in zip(titles, bodies, labels):
tLabel = filter(lambda x: x.startswith('team'), label_list)
cLabel = filter(lambda x: x.startswith('component'), label_list)
tokens = tokenize_stem_stop(" ".join([title, body]))
if tLabel:
team_labels += tLabel
tTokens += [tokens]
if cLabel:
component_labels += cLabel
cTokens += [tokens]
tVec = myHasher.transform(tTokens)
cVec = myHasher.transform(cTokens)
if team_labels:
if os.path.isfile(team_fn):
team_model = joblib.load(team_fn)
team_model.partial_fit(tVec, np.array(team_labels))
else:
#no team model stored so build a new one
team_model = SGDClassifier(loss=myLoss, penalty=myPenalty, alpha=myAlpha)
team_model.fit(tVec, np.array(team_labels))
if component_labels:
if os.path.isfile(component_fn):
component_model = joblib.load(component_fn)
component_model.partial_fit(cVec, np.array(component_labels))
else:
#no comp model stored so build a new one
component_model = SGDClassifier(loss=myLoss, penalty=myPenalty, alpha=myAlpha)
component_model.fit(cVec, np.array(component_labels))
joblib.dump(team_model, team_fn)
joblib.dump(component_model, component_fn)
return ""
def configure_logger():
FORMAT = '%(asctime)-20s %(levelname)-10s %(message)s'
file_handler = RotatingFileHandler(logFile, maxBytes=logSize, backupCount=3)
formatter = logging.Formatter(FORMAT)
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
if __name__ == "__main__":
configure_logger()
app.run(host="0.0.0.0")
| apache-2.0 |
philipan/paparazzi | sw/airborne/test/math/compare_utm_enu.py | 77 | 2714 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import sys
import os
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", "../../../..")
sys.path.append(PPRZ_SRC + "/sw/lib/python")
from pprz_math.geodetic import *
from pprz_math.algebra import DoubleRMat, DoubleEulers, DoubleVect3
from math import radians, degrees, tan
import matplotlib.pyplot as plt
import numpy as np
# Origin at ENAC
UTM_EAST0 = 377349 # in m
UTM_NORTH0 = 4824583 # in m
UTM_ZONE0 = 31
ALT0 = 147.000 # in m
utm_origin = UtmCoor_d(north=UTM_NORTH0, east=UTM_EAST0, alt=ALT0, zone=UTM_ZONE0)
print("origin %s" % utm_origin)
lla_origin = utm_origin.to_lla()
ecef_origin = lla_origin.to_ecef()
ltp_origin = ecef_origin.to_ltp_def()
print(ltp_origin)
# convergence angle to "true north" is approx 1 deg here
earth_radius = 6378137.0
n = 0.9996 * earth_radius
UTM_DELTA_EAST = 500000.
dist_to_meridian = utm_origin.east - UTM_DELTA_EAST
conv = dist_to_meridian / n * tan(lla_origin.lat)
# or (middle meridian of UTM zone 31 is at 3deg)
#conv = atan(tan(lla_origin.lon - radians(3))*sin(lla_origin.lat))
print("approx. convergence angle (north error compared to meridian): %f deg" % degrees(conv))
# Rotation matrix to correct for "true north"
R = DoubleEulers(psi=-conv).to_rmat()
# calculate ENU coordinates for 100 points in 100m distance
nb_points = 100
dist_points = 100
enu_res = np.zeros((nb_points, 2))
enu_res_c = np.zeros((nb_points, 2))
utm_res = np.zeros((nb_points, 2))
for i in range(0, nb_points):
utm = UtmCoor_d()
utm.north = i * dist_points + utm_origin.north
utm.east = i * dist_points+ utm_origin.east
utm.alt = utm_origin.alt
utm.zone = utm_origin.zone
#print(utm)
utm_res[i, 0] = utm.east - utm_origin.east
utm_res[i, 1] = utm.north - utm_origin.north
lla = utm.to_lla()
#print(lla)
ecef = lla.to_ecef()
enu = ecef.to_enu(ltp_origin)
enu_res[i, 0] = enu.x
enu_res[i, 1] = enu.y
enu_c = R * DoubleVect3(enu.x, enu.y, enu.z)
enu_res_c[i, 0] = enu_c.x
enu_res_c[i, 1] = enu_c.y
#print(enu)
dist = np.linalg.norm(utm_res, axis=1)
error = np.linalg.norm(utm_res - enu_res, axis=1)
error_c = np.linalg.norm(utm_res - enu_res_c, axis=1)
plt.figure(1)
plt.subplot(311)
plt.title("utm vs. enu")
plt.plot(enu_res[:, 0], enu_res[:, 1], 'g', label="ENU")
plt.plot(utm_res[:, 0], utm_res[:, 1], 'r', label="UTM")
plt.ylabel("y/north [m]")
plt.xlabel("x/east [m]")
plt.legend(loc='upper left')
plt.subplot(312)
plt.plot(dist, error, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error [m]")
plt.subplot(313)
plt.plot(dist, error_c, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error with north fix [m]")
plt.show()
| gpl-2.0 |
akshaybabloo/Car-ND | Term_1/advanced_lane_finding_10/direction_gradient_10_6.py | 1 | 1218 | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in an image
image = mpimg.imread('curved-lane.jpg')
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# Return the binary image
return binary_output
# Run the function
dir_binary = dir_threshold(image, sobel_kernel=15, thresh=(0.7, 1.3))
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(image)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(dir_binary, cmap='gray')
ax2.set_title('Thresholded Grad. Dir.', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.show()
| mit |
Djabbz/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
phobson/bokeh | bokeh/core/compat/mplexporter/exporter.py | 8 | 12413 | """
Matplotlib Exporter
===================
This submodule contains tools for crawling a matplotlib figure and exporting
relevant pieces to a renderer.
"""
import warnings
import io
from . import utils
import matplotlib
from matplotlib import transforms
from matplotlib.backends.backend_agg import FigureCanvasAgg
class Exporter(object):
"""Matplotlib Exporter
Parameters
----------
renderer : Renderer object
The renderer object called by the exporter to create a figure
visualization. See mplexporter.Renderer for information on the
methods which should be defined within the renderer.
close_mpl : bool
If True (default), close the matplotlib figure as it is rendered. This
is useful for when the exporter is used within the notebook, or with
an interactive matplotlib backend.
"""
def __init__(self, renderer, close_mpl=True):
self.close_mpl = close_mpl
self.renderer = renderer
def run(self, fig):
"""
Run the exporter on the given figure
Parameters
---------
fig : matplotlib.Figure instance
The figure to export
"""
# Calling savefig executes the draw() command, putting elements
# in the correct place.
if fig.canvas is None:
fig.canvas = FigureCanvasAgg(fig)
fig.savefig(io.BytesIO(), format='png', dpi=fig.dpi)
if self.close_mpl:
import matplotlib.pyplot as plt
plt.close(fig)
self.crawl_fig(fig)
@staticmethod
def process_transform(transform, ax=None, data=None, return_trans=False,
force_trans=None):
"""Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
"""
if isinstance(transform, transforms.BlendedGenericTransform):
warnings.warn("Blended transforms not yet supported. "
"Zoom behavior may not work as expected.")
if force_trans is not None:
if data is not None:
data = (transform - force_trans).transform(data)
transform = force_trans
code = "display"
if ax is not None:
for (c, trans) in [("data", ax.transData),
("axes", ax.transAxes),
("figure", ax.figure.transFigure),
("display", transforms.IdentityTransform())]:
if transform.contains_branch(trans):
code, transform = (c, transform - trans)
break
if data is not None:
if return_trans:
return code, transform.transform(data), transform
else:
return code, transform.transform(data)
else:
if return_trans:
return code, transform
else:
return code
def crawl_fig(self, fig):
"""Crawl the figure and process all axes"""
with self.renderer.draw_figure(fig=fig,
props=utils.get_figure_properties(fig)):
for ax in fig.axes:
self.crawl_ax(ax)
def crawl_ax(self, ax):
"""Crawl the axes and process all elements within"""
with self.renderer.draw_axes(ax=ax,
props=utils.get_axes_properties(ax)):
for line in ax.lines:
self.draw_line(ax, line)
for text in ax.texts:
self.draw_text(ax, text)
for (text, ttp) in zip([ax.xaxis.label, ax.yaxis.label, ax.title],
["xlabel", "ylabel", "title"]):
if(hasattr(text, 'get_text') and text.get_text()):
self.draw_text(ax, text, force_trans=ax.transAxes,
text_type=ttp)
for artist in ax.artists:
# TODO: process other artists
if isinstance(artist, matplotlib.text.Text):
self.draw_text(ax, artist)
for patch in ax.patches:
self.draw_patch(ax, patch)
for collection in ax.collections:
self.draw_collection(ax, collection)
for image in ax.images:
self.draw_image(ax, image)
legend = ax.get_legend()
if legend is not None:
props = utils.get_legend_properties(ax, legend)
with self.renderer.draw_legend(legend=legend, props=props):
if props['visible']:
self.crawl_legend(ax, legend)
def crawl_legend(self, ax, legend):
"""
Recursively look through objects in legend children
"""
legendElements = list(utils.iter_all_children(legend._legend_box,
skipContainers=True))
legendElements.append(legend.legendPatch)
for child in legendElements:
# force a large zorder so it appears on top
child.set_zorder(1E6 + child.get_zorder())
try:
# What kind of object...
if isinstance(child, matplotlib.patches.Patch):
self.draw_patch(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.text.Text):
if not (child is legend.get_children()[-1]
and child.get_text() == 'None'):
self.draw_text(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.lines.Line2D):
warnings.warn("Legend element %s not implemented" % child)
elif isinstance(child, matplotlib.collections.Collection):
self.draw_collection(ax, child,
force_pathtrans=ax.transAxes)
else:
warnings.warn("Legend element %s not implemented" % child)
except NotImplementedError:
warnings.warn("Legend element %s not implemented" % child)
def draw_line(self, ax, line, force_trans=None):
"""Process a matplotlib line and call renderer.draw_line"""
coordinates, data = self.process_transform(line.get_transform(),
ax, line.get_xydata(),
force_trans=force_trans)
linestyle = utils.get_line_style(line)
if linestyle['dasharray'] is None:
linestyle = None
markerstyle = utils.get_marker_style(line)
if (markerstyle['marker'] in ['None', 'none', None]
or markerstyle['markerpath'][0].size == 0):
markerstyle = None
label = line.get_label()
if markerstyle or linestyle:
self.renderer.draw_marked_line(data=data, coordinates=coordinates,
linestyle=linestyle,
markerstyle=markerstyle,
label=label,
mplobj=line)
def draw_text(self, ax, text, force_trans=None, text_type=None):
"""Process a matplotlib text object and call renderer.draw_text"""
content = text.get_text()
if content:
transform = text.get_transform()
position = text.get_position()
coords, position = self.process_transform(transform, ax,
position,
force_trans=force_trans)
style = utils.get_text_style(text)
self.renderer.draw_text(text=content, position=position,
coordinates=coords,
text_type=text_type,
style=style, mplobj=text)
def draw_patch(self, ax, patch, force_trans=None):
"""Process a matplotlib patch object and call renderer.draw_path"""
vertices, pathcodes = utils.SVG_path(patch.get_path())
transform = patch.get_transform()
coordinates, vertices = self.process_transform(transform,
ax, vertices,
force_trans=force_trans)
linestyle = utils.get_path_style(patch, fill=patch.get_fill())
self.renderer.draw_path(data=vertices,
coordinates=coordinates,
pathcodes=pathcodes,
style=linestyle,
mplobj=patch)
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
path_coords = self.process_transform(
transform, ax, force_trans=force_pathtrans)
processed_paths = [utils.SVG_path(path) for path in paths]
processed_paths = [(self.process_transform(
transform, ax, path[0], force_trans=force_pathtrans)[1], path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection)
def draw_image(self, ax, image):
"""Process a matplotlib image object and call renderer.draw_image"""
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image)
| bsd-3-clause |
PKU-Dragon-Team/Datalab-Utilities | mobile_cluster/Hierachical.py | 1 | 5485 | import numpy as np
import typing as tg
import sklearn.cluster as sklc
import scipy.spatial.distance as spsd
import scipy.signal as spsn
import heapq as hq
import matplotlib.pyplot as plt
def SimpleHierachicalCluster(X: np.ndarray, weight: tg.Optional[np.ndarray]=None) -> np.ndarray:
"""My version of Hierachical Clustering, processing small amount of samples and give easily judgement of the hierachical tree
Running time: O(N^2*D), N is n_samples, D is n_features
"""
n_samples, n_features = X.shape
if weight is None:
weights = np.ones(n_samples, dtype=int)
else:
weights = weight.copy()
hierachical = np.zeros((n_samples - 1, 3), dtype=int) # each row: (index1: int, index2:int, index_new:int)
distances = np.zeros(n_samples - 1)
output_index = 0
nodes = X.copy()
remaining_indexes = set(range(n_samples))
distance_heap = []
calculated = set()
for i in remaining_indexes:
calculated.add(i)
for j in remaining_indexes - calculated:
hq.heappush(distance_heap, (spsd.euclidean(nodes[i], nodes[j]), i, j))
# now go with clustering
while len(remaining_indexes) > 1:
# drop merged ones
min_d, index1, index2 = hq.heappop(distance_heap)
while not (index1 in remaining_indexes and index2 in remaining_indexes):
min_d, index1, index2 = hq.heappop(distance_heap)
centroid = (weights[index1] * nodes[index1] + weights[index2] * nodes[index2]) / (weights[index1] + weights[index2])
# now new centroid comes, drop i and j, and calculate new distances
remaining_indexes.remove(index1)
remaining_indexes.remove(index2)
index_new = nodes.shape[0]
for i in remaining_indexes:
hq.heappush(distance_heap, (spsd.euclidean(nodes[i], centroid), i, index_new))
remaining_indexes.add(index_new)
weights = np.hstack((weights, weights[index1] + weights[index2]))
nodes = np.vstack((nodes, centroid))
# forming output
hierachical[output_index] = (index1, index2, index_new)
distances[output_index] = min_d
output_index += 1
return hierachical, distances, nodes, weights
def LastLocalMinimumCluster(X: np.ndarray, weight: tg.Optional[np.ndarray]=None) -> tg.Tuple[np.ndarray, np.ndarray]:
"""Hierachical Cluster that pick the last local minimum of distance and cut the cluster tree into several clusters
"""
n_samples, n_features = X.shape
hierachical, distances, nodes, weights = SimpleHierachicalCluster(X, weight)
# find local minimums
extrema = spsn.argrelmin(distances) # type: np.ndarray
try:
last_local_minimum = extrema[0][len(extrema[0]) - 1]
except IndexError: # no local_minimum, return all clustered nodes
return nodes[n_samples:], weights[n_samples]
merged_nodes = set(hierachical[:last_local_minimum + 1, 0:2].flat)
post_cluster_nodes = set(hierachical[last_local_minimum + 1:, 2].flat)
total_nodes = set(range(len(nodes)))
cluster_centers = total_nodes - post_cluster_nodes - merged_nodes # nodes that is not merged will be cluster_centers
return nodes[list(cluster_centers)], weights[list(cluster_centers)]
def PercentageCluster(X: np.ndarray, weight: tg.Optional[np.ndarray]=None, percentage: float=0.5) -> tg.Tuple[np.ndarray, np.ndarray]:
"""Hierachical Cluster that cut the cluster tree into several clusters when distance is higher than the require percentage
"""
n_samples, n_features = X.shape
hierachical, distances, nodes, weights = SimpleHierachicalCluster(X, weight)
# find cutting point
max_d = np.max(distances)
extrema = distances < percentage * max_d
for i, x in enumerate(np.flipud(extrema)):
if x:
break
last_less = len(extrema) - 1 - i
merged_nodes = set(hierachical[:last_less + 1, 0:2].flat)
post_cluster_nodes = set(hierachical[last_less + 1:, 2].flat)
total_nodes = set(range(len(nodes)))
cluster_centers = total_nodes - post_cluster_nodes - merged_nodes # nodes that is not merged will be cluster_centers
return nodes[list(cluster_centers)], weights[list(cluster_centers)]
def MeanLogWardTree(X: np.ndarray, weight: tg.Optional[np.ndarray]=None) -> np.ndarray:
"""Hierachical Cluster that pick the mean of log of ward_tree distance and cut the cluster tree into several clusters
"""
if weight is None:
connect = None
else:
connect = np.outer(weight, weight)
n_samples, n_features = X.shape
children, n_components, n_leaves, parents, distances = sklc.ward_tree(X, connectivity=connect, return_distance=True)
c = children.shape[0]
hierachical = np.hstack([children, np.arange(n_samples, n_samples + c).reshape(c, 1)])
log_distance = np.log(np.sqrt(distances**2 / n_features))
mean_log_distance = np.mean(log_distance)
distance_mask = log_distance < mean_log_distance
post_distance_mask = log_distance >= mean_log_distance
merged_nodes = set(hierachical[distance_mask, 0:2].flat)
post_cluster_nodes = set(hierachical[post_distance_mask, 2].flat)
total_nodes = set(range(n_samples, c + n_samples))
cluster_centers = total_nodes - post_cluster_nodes - merged_nodes # nodes that is not merged will be cluster_centers
print(merged_nodes)
print(post_cluster_nodes)
print(total_nodes)
return hierachical[np.asarray(list(cluster_centers)) - n_samples]
| mit |
hdmetor/scikit-learn | examples/manifold/plot_manifold_sphere.py | 258 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
jwyang/JULE-Caffe | python/detect.py | 23 | 5743 | #!/usr/bin/env python
"""
detector.py is an out-of-the-box windowed detector
callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
Note that this model was trained for image classification and not detection,
and finetuning for detection can be expected to improve results.
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO:
- batch up image filenames as well: don't want to load all of them into memory
- come up with a batching scheme that preserved order / keeps a unique ID
"""
import numpy as np
import pandas as pd
import os
import argparse
import time
import caffe
CROP_MODES = ['list', 'selective_search']
COORD_COLS = ['ymin', 'xmin', 'ymax', 'xmax']
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output.
parser.add_argument(
"input_file",
help="Input txt/csv filename. If .txt, must be list of filenames.\
If .csv, must be comma-separated file with header\
'filename, xmin, ymin, xmax, ymax'"
)
parser.add_argument(
"output_file",
help="Output h5/csv filename. Format depends on extension."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/deploy.prototxt.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"),
help="Trained model weights file."
)
parser.add_argument(
"--crop_mode",
default="selective_search",
choices=CROP_MODES,
help="How to generate windows for detection."
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of H x W x K dimensions (numpy array). " +
"Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
help="Multiply input features by this scale to finish preprocessing."
)
parser.add_argument(
"--raw_scale",
type=float,
default=255.0,
help="Multiply raw input by this scale before preprocessing."
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--context_pad",
type=int,
default='16',
help="Amount of surrounding context to collect in input window."
)
args = parser.parse_args()
mean, channel_swap = None, None
if args.mean_file:
mean = np.load(args.mean_file)
if mean.shape[1:] != (1, 1):
mean = mean.mean(1).mean(1)
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.gpu:
caffe.set_mode_gpu()
print("GPU mode")
else:
caffe.set_mode_cpu()
print("CPU mode")
# Make detector.
detector = caffe.Detector(args.model_def, args.pretrained_model, mean=mean,
input_scale=args.input_scale, raw_scale=args.raw_scale,
channel_swap=channel_swap,
context_pad=args.context_pad)
# Load input.
t = time.time()
print("Loading input...")
if args.input_file.lower().endswith('txt'):
with open(args.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif args.input_file.lower().endswith('csv'):
inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception("Unknown input file type: not in txt or csv.")
# Detect.
if args.crop_mode == 'list':
# Unpack sequence of (image filename, windows).
images_windows = [
(ix, inputs.iloc[np.where(inputs.index == ix)][COORD_COLS].values)
for ix in inputs.index.unique()
]
detections = detector.detect_windows(images_windows)
else:
detections = detector.detect_selective_search(inputs)
print("Processed {} windows in {:.3f} s.".format(len(detections),
time.time() - t))
# Collect into dataframe with labeled fields.
df = pd.DataFrame(detections)
df.set_index('filename', inplace=True)
df[COORD_COLS] = pd.DataFrame(
data=np.vstack(df['window']), index=df.index, columns=COORD_COLS)
del(df['window'])
# Save results.
t = time.time()
if args.output_file.lower().endswith('csv'):
# csv
# Enumerate the class probabilities.
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(
data=np.vstack(df['feat']), index=df.index, columns=class_cols)
df.to_csv(args.output_file, cols=COORD_COLS + class_cols)
else:
# h5
df.to_hdf(args.output_file, 'df', mode='w')
print("Saved to {} in {:.3f} s.".format(args.output_file,
time.time() - t))
if __name__ == "__main__":
import sys
main(sys.argv)
| mit |
eickenberg/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ioshchepkov/SHTOOLS | examples/python/GravMag/TestGrav.py | 1 | 5451 | #!/usr/bin/env python
"""
This script tests the gravity and magnetics routines.
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
from pyshtools import gravmag
from pyshtools import spectralanalysis
from pyshtools import shio
from pyshtools import constant
sys.path.append(os.path.join(os.path.dirname(__file__), "../Common"))
from FigStyle import style_shtools
# set shtools plot style:
mpl.rcParams.update(style_shtools)
# ==== MAIN FUNCTION ====
def main():
TestMakeGravGrid()
TestNormalGravity()
TestGravGrad()
TestFilter()
TestMakeMagGrid()
# ==== TEST FUNCTIONS ====
def TestMakeGravGrid():
infile = '../../ExampleDataFiles/jgmro_110b_sha.tab'
clm, lmax, header = shio.shread(infile, header=True)
r0 = float(header[0]) * 1.e3
gm = float(header[1]) * 1.e9
clm[0, 0, 0] = 1.0
print(gm, r0)
geoid = gravmag.MakeGeoidGridDH(clm, r0, gm, constant.w0_mars,
a=constant.a_mars, f=constant.f_mars,
omega=constant.omega_mars)
geoid = geoid / 1.e3 # convert to meters
fig_map = plt.figure()
plt.imshow(geoid)
fig_map.savefig('MarsGeoid.png')
rad, theta, phi, total, pot = gravmag.MakeGravGridDH(
clm, gm, r0, lmax=719, a=constant.a_mars, f=constant.f_mars,
lmax_calc=85, omega=constant.omega_mars, normal_gravity=1)
fig, axes = plt.subplots(2, 2)
for num, vv, s in ((0, rad, "$g_{r}$"), (1, theta, "$g_{\\theta}$"),
(2, phi, "$g_{\phi}$"),
(3, total, "Gravity disturbance")):
if (num == 3):
axes.flat[num].imshow(vv * 1.e5, vmin=-400, vmax=550)
# Convert to mGals
else:
axes.flat[num].imshow(vv)
axes.flat[num].set_title(s)
axes.flat[num].set_xticks(())
axes.flat[num].set_yticks(())
fig.savefig('Mars_Grav.png')
def TestNormalGravity():
gm = constant.gm_mars
omega = constant.omega_mars
a = constant.a_mars
b = constant.b_mars
lat = np.arange(-90., 90., 1.)
ng = np.array([gravmag.NormalGravity(x, gm, omega, a, b) for x in lat])
fig = plt.figure()
plt.plot(lat, ng, '-')
plt.xlim(-90, 90)
plt.xlabel('latitude')
plt.ylabel('$g, m s^{-2}$')
fig.savefig('Mars_normalgravity.png')
def TestGravGrad():
# ---- input parameters ----
lmax = 100
clm = np.zeros((2, lmax + 1, lmax + 1), dtype=float)
clm[0, 2, 2] = 1.0
gm = 1.0
r0 = 1.0
a = 1.0
f = 0.0
vxx, vyy, vzz, vxy, vxz, vyz = gravmag.MakeGravGradGridDH(clm, gm, r0,
a=a, f=f)
print("Maximum Trace(Vxx+Vyy+Vzz) = ", np.max(vxx + vyy + vzz))
print("Minimum Trace(Vxx+Vyy+Vzz) = ", np.min(vxx + vyy + vzz))
fig, axes = plt.subplots(2, 3)
fig.suptitle("Gravity gradient tensor", fontsize=10)
for num, vv, s in ((0, vxx, "$V_{xx}$"), (1, vyy, "$V_{yy}$"),
(2, vzz, "$V_{zz}$"), (3, vxy, "$V_{xy}$"),
(4, vxz, "$V_{xz}$"), (5, vyz, "$V_{yz}$")):
axes.flat[num].imshow(vv, vmin=-5, vmax=5)
axes.flat[num].set_title(s)
axes.flat[num].set_xticks(())
axes.flat[num].set_yticks(())
fig.savefig('GravGrad_C22.png')
def TestFilter():
half = 80
r = constant.r_moon
d = r - 40.e3
deglist = np.arange(1, 200, 1)
wl = np.zeros(len(deglist) + 1)
wlcurv = np.zeros(len(deglist) + 1)
for l in deglist:
wl[l] = gravmag.DownContFilterMA(l, half, r, d)
wlcurv[l] = gravmag.DownContFilterMC(l, half, r, d)
fig = plt.figure()
plt.plot(deglist, wl[1:], 'b-', label='Minimum amplitude')
plt.plot(deglist, wlcurv[1:], 'r-', label='Minimum curvature')
plt.xlabel('degree, l')
plt.ylabel('W')
plt.legend()
fig.savefig('Filter.png')
def TestMakeMagGrid():
infile = '../../ExampleDataFiles/FSU_mars90.sh'
clm, lmax, header = shio.shread(infile, header=True, skip=1)
r0 = float(header[0]) * 1.e3
a = constant.r_mars + 145.0e3 # radius to evaluate the field
rad, theta, phi, total = gravmag.MakeMagGridDH(clm, r0, lmax=719, a=a,
f=constant.f_mars,
lmax_calc=90)
fig, axes = plt.subplots(2, 2)
for num, vv, s in ((0, rad, "$B_{r}$"), (1, theta, "$B_{\\theta}$"),
(2, phi, "$B_{\phi}$"), (3, total, "$|B|$")):
if (num == 3):
axes.flat[num].imshow(vv, vmin=0, vmax=700)
else:
axes.flat[num].imshow(vv)
axes.flat[num].set_title(s)
axes.flat[num].set_xticks(())
axes.flat[num].set_yticks(())
fig.savefig('Mars_Mag.png')
ls = np.arange(lmax + 1)
pspectrum = gravmag.mag_spectrum(clm, r0, r0)
fig_spectrum, ax = plt.subplots(1, 1)
ax.set_xscale('linear')
ax.set_yscale('log')
ax.set_xlabel('degree, l')
ax.set_ylabel('Power')
ax.grid(True, which='both')
ax.plot(ls[1:], pspectrum[1:], label='Magnetic power spectrum')
ax.legend()
fig_spectrum.savefig('Mars_MagPowerSpectrum.png')
# ==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
| bsd-3-clause |
cdeboever3/cdpybio | cdpybio/analysis.py | 1 | 43419 | import pandas as pd
chrom_sizes = pd.Series(
{1: 249250621,
10: 135534747,
11: 135006516,
12: 133851895,
13: 115169878,
14: 107349540,
15: 102531392,
16: 90354753,
17: 81195210,
18: 78077248,
19: 59128983,
2: 243199373,
20: 63025520,
21: 48129895,
22: 51304566,
3: 198022430,
4: 191154276,
5: 180915260,
6: 171115067,
7: 159138663,
8: 146364022,
9: 141213431,
}
)
chrom_sizes_norm = chrom_sizes / chrom_sizes.max()
def _make_tableau20():
# tableau20 from # http://www.randalolson.com/2014/06/28/how-to-make-beautiful-data-visualizations-in-python-with-matplotlib/
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib
# accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
return tableau20
tableau20 = _make_tableau20()
def generate_null_snvs(df, snvs, num_null_sets=5):
"""
Generate a set of null SNVs based on an input list of SNVs and categorical
annotations.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe where each column is a categorization of SNPs.
The index should be SNPs of the form chrom:pos.
snvs : list
List of input SNVs in the format chrom:pos. Entries that aren't in
the index of df will be dropped.
num_null_sets : int
Number of sets of null SNVs to generate.
Returns
-------
null_sets : pandas.Dataframe
Pandas dataframe with input SNVs as first column and null SNVs as
following columns.
"""
import numpy as np
import random
random.seed(20151007)
input_snvs = list(set(df.index) & set(snvs))
sig = df.ix[input_snvs]
not_sig = df.ix[set(df.index) - set(snvs)]
sig['group'] = sig.apply(lambda x: '::'.join(x), axis=1)
not_sig['group'] = not_sig.apply(lambda x: '::'.join(x), axis=1)
null_sets = []
vc = sig.group.value_counts()
bins = {c:sorted(list(df[c].value_counts().index)) for c in df.columns}
ordered_inputs = []
for i in vc.index:
ordered_inputs += list(sig[sig.group == i].index)
tdf = not_sig[not_sig.group == i]
count = vc[i]
for n in xrange(num_null_sets):
if tdf.shape[0] == 0:
groups = [i]
while tdf.shape[0] == 0:
# If there are no potential null SNVs in this group, we'll
# expand the group randomly.
g = groups[-1]
# Choose random bin.
cols = list(not_sig.columns)
cols.remove('group')
b = random.choice(cols)
# Get possibilities for that bin.
t = bins[b]
# Get last set of bin values and the value for the bin we
# want to change.
d = dict(zip(not_sig.columns, g.split('::')))
cat = d[b]
# Randomly walk away from bin value.
ind = t.index(cat)
if ind == 0:
ind += 1
elif ind == len(t) - 1:
ind -= 1
else:
ind += random.choice([-1, 1])
d[b] = t[ind]
groups.append('::'.join(pd.Series(d)[not_sig.columns].astype(str)))
tdf = not_sig[not_sig.group.apply(lambda x: x in groups)]
if count <= tdf.shape[0]:
ind = random.sample(tdf.index, count)
else:
ind = list(np.random.choice(tdf.index, size=count, replace=True))
if i == vc.index[0]:
null_sets.append(ind)
else:
null_sets[n] += ind
null_sets = pd.DataFrame(null_sets).T
null_sets.columns = ['null_{}'.format(x) for x in null_sets.columns]
cs = list(null_sets.columns)
null_sets['input'] = ordered_inputs
null_sets = null_sets[['input'] + cs]
return null_sets
def make_grasp_phenotype_file(fn, pheno, out):
"""
Subset the GRASP database on a specific phenotype.
Parameters
----------
fn : str
Path to GRASP database file.
pheno : str
Phenotype to extract from database.
out : sttr
Path to output file for subset of GRASP database.
"""
import subprocess
c = 'awk -F "\\t" \'NR == 1 || $12 == "{}" \' {} > {}'.format(
pheno.replace("'", '\\x27'), fn, out)
subprocess.check_call(c, shell=True)
def parse_grasp_gwas(fn):
"""
Read GRASP database and filter for unique hits.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
"""
df = pd.read_table(fn, low_memory=False)
df = df[df.Pvalue < 1e-5]
df = df.sort(columns=['chr(hg19)', 'pos(hg19)', 'Pvalue'])
df = df.drop_duplicates(subset=['chr(hg19)', 'pos(hg19)'])
df = df[df.Pvalue < 1e-5]
df['chrom'] = 'chr' + df['chr(hg19)'].astype(str)
df['end'] = df['pos(hg19)']
df['start'] = df.end - 1
df['rsid'] = df['SNPid(in paper)']
df['pvalue'] = df['Pvalue']
df = df[['chrom', 'start', 'end', 'rsid', 'pvalue']]
df.index = df['chrom'].astype(str) + ':' + df['end'].astype(str)
return df
def parse_roadmap_gwas(fn):
"""
Read Roadmap GWAS file and filter for unique, significant (p < 1e-5)
SNPs.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
"""
df = pd.read_table(fn, low_memory=False,
names=['chrom', 'start', 'end', 'rsid', 'pvalue'])
df = df[df.pvalue < 1e-5]
df = df.sort(columns=['chrom', 'start', 'pvalue'])
df = df.drop_duplicates(subset=['chrom', 'start'])
df = df[df['chrom'] != 'chrY']
df.index = df['chrom'].astype(str) + ':' + df['end'].astype(str)
return df
def ld_prune(df, ld_beds, snvs=None):
"""
Prune set of GWAS based on LD and significance. A graph of all SNVs is
constructed with edges for LD >= 0.8 and the most significant SNV per
connected component is kept.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe with unique SNVs. The index is of the form chrom:pos
where pos is the one-based position of the SNV. The columns must include
chrom, start, end, and pvalue. chrom, start, end make a zero-based bed
file with the SNV coordinates.
ld_beds : dict
Dict whose keys are chromosomes and whose values are filenames of
tabixed LD bed files. An LD bed file looks like "chr1 11007 11008
11008:11012:1" where the first three columns are the zero-based
half-open coordinate of the SNV and the fourth column has the one-based
coordinate followed of the SNV followed by the one-based coordinate of a
different SNV and the LD between them. In this example, the variants are
in perfect LD. The bed file should also contain the reciprocal line for
this LD relationship: "chr1 11011 11012 11012:11008:1".
snvs : list
List of SNVs to filter against. If a SNV is not in this list, it will
not be included. If you are working with GWAS SNPs, this is useful for
filtering out SNVs that aren't in the SNPsnap database for instance.
Returns
-------
out : pandas.DataFrame
Pandas dataframe in the same format as the input dataframe but with only
independent SNVs.
"""
import networkx as nx
import tabix
if snvs:
df = df.ix[set(df.index) & set(snvs)]
keep = set()
for chrom in ld_beds.keys():
tdf = df[df['chrom'].astype(str) == chrom]
if tdf.shape[0] > 0:
f = tabix.open(ld_beds[chrom])
# Make a dict where each key is a SNP and the values are all of the
# other SNPs in LD with the key.
ld_d = {}
for j in tdf.index:
p = tdf.ix[j, 'end']
ld_d[p] = []
try:
r = f.query(chrom, p - 1, p)
while True:
try:
n = r.next()
p1, p2, r2 = n[-1].split(':')
if float(r2) >= 0.8:
ld_d[p].append(int(p2))
except StopIteration:
break
except TabixError:
continue
# Make adjacency matrix for LD.
cols = sorted(list(set(
[item for sublist in ld_d.values() for item in sublist])))
t = pd.DataFrame(0, index=ld_d.keys(), columns=cols)
for k in ld_d.keys():
t.ix[k, ld_d[k]] = 1
t.index = ['{}:{}'.format(chrom, x) for x in t.index]
t.columns = ['{}:{}'.format(chrom, x) for x in t.columns]
# Keep all SNPs not in LD with any others. These will be in the index
# but not in the columns.
keep |= set(t.index) - set(t.columns)
# Filter so we only have SNPs that are in LD with at least one other
# SNP.
ind = list(set(t.columns) & set(t.index))
# Keep one most sig. SNP per connected subgraph.
t = t.ix[ind, ind]
g = nx.Graph(t.values)
c = nx.connected_components(g)
while True:
try:
sg = c.next()
s = tdf.ix[t.index[list(sg)]]
keep.add(s[s.pvalue == s.pvalue.min()].index[0])
except StopIteration:
break
out = df.ix[keep]
return out
def ld_expand(df, ld_beds):
"""
Expand a set of SNVs into all SNVs with LD >= 0.8 and return a BedTool of
the expanded SNPs.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe with SNVs. The index is of the form chrom:pos where pos
is the one-based position of the SNV. The columns are chrom, start, end.
chrom, start, end make a zero-based bed file with the SNV coordinates.
ld_beds : dict
Dict whose keys are chromosomes and whose values are filenames of
tabixed LD bed files. The LD bed files should be formatted like this:
chr1 14463 14464 14464:51479:0.254183
where the the first three columns indicate the zero-based coordinates of
a SNV and the the fourth column has the one-based coordinate of that
SNV, the one-based coordinate of another SNV on the same chromosome, and
the LD between these SNVs (all separated by colons).
Returns
-------
bt : pybedtools.BedTool
BedTool with input SNVs and SNVs they are in LD with.
indepdent SNVs.
"""
import pybedtools as pbt
import tabix
out_snps = []
for chrom in ld_beds.keys():
t = tabix.open(ld_beds[chrom])
tdf = df[df['chrom'].astype(str) == chrom]
for ind in tdf.index:
p = tdf.ix[ind, 'end']
out_snps.append('{}\t{}\t{}\t{}\n'.format(chrom, p - 1, p, ind))
try:
r = t.query('{}'.format(chrom), p - 1, p)
while True:
try:
n = r.next()
p1, p2, r2 = n[-1].split(':')
if float(r2) >= 0.8:
out_snps.append('{}\t{}\t{}\t{}\n'.format(
n[0], int(p2) - 1, int(p2), ind))
except StopIteration:
break
except tabix.TabixError:
continue
bt = pbt.BedTool(''.join(out_snps), from_string=True)
bt = bt.sort()
return bt
def liftover_bed(
bed,
chain,
mapped=None,
unmapped=None,
liftOver_path='liftOver',
):
"""
Lift over a bed file using a given chain file.
Parameters
----------
bed : str or pybedtools.BedTool
Coordinates to lift over.
chain : str
Path to chain file to use for lift over.
mapped : str
Path for bed file with coordinates that are lifted over correctly.
unmapped : str
Path for text file to store coordinates that did not lift over
correctly. If this is not provided, these are discarded.
liftOver_path : str
Path to liftOver executable if not in path.
Returns
-------
new_coords : pandas.DataFrame
Pandas data frame with lift over results. Index is old coordinates in
the form chrom:start-end and columns are chrom, start, end and loc
(chrom:start-end) in new coordinate system.
"""
import subprocess
import pybedtools as pbt
if mapped == None:
import tempfile
mapped = tempfile.NamedTemporaryFile()
mname = mapped.name
else:
mname = mapped
if unmapped == None:
import tempfile
unmapped = tempfile.NamedTemporaryFile()
uname = unmapped.name
else:
uname = unmapped
if type(bed) == str:
bt = pbt.BedTool(bed)
elif type(bed) == pbt.bedtool.BedTool:
bt = bed
else:
sys.exit(1)
bt = bt.sort()
c = '{} {} {} {} {}'.format(liftOver_path, bt.fn, chain, mname, uname)
subprocess.check_call(c, shell=True)
with open(uname) as f:
missing = pbt.BedTool(''.join([x for x in f.readlines()[1::2]]),
from_string=True)
bt = bt.subtract(missing)
bt_mapped = pbt.BedTool(mname)
old_loc = []
for r in bt:
old_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_loc = []
new_chrom = []
new_start = []
new_end = []
for r in bt_mapped:
new_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_chrom.append(r.chrom)
new_start.append(r.start)
new_end.append(r.end)
new_coords = pd.DataFrame({'loc':new_loc, 'chrom': new_chrom,
'start': new_start, 'end': new_end},
index=old_loc)
for f in [mapped, unmapped]:
try:
f.close()
except AttributeError:
continue
return new_coords
def deseq2_size_factors(counts, meta, design):
"""
Get size factors for counts using DESeq2.
Parameters
----------
counts : pandas.DataFrame
Counts to pass to DESeq2.
meta : pandas.DataFrame
Pandas dataframe whose index matches the columns of counts. This is
passed to DESeq2's colData.
design : str
Design like ~subject_id that will be passed to DESeq2. The design
variables should match columns in meta.
Returns
-------
sf : pandas.Series
Series whose index matches the columns of counts and whose values are
the size factors from DESeq2. Divide each column by its size factor to
obtain normalized counts.
"""
import rpy2.robjects as r
from rpy2.robjects import pandas2ri
pandas2ri.activate()
r.r('suppressMessages(library(DESeq2))')
r.globalenv['counts'] = counts
r.globalenv['meta'] = meta
r.r('dds = DESeqDataSetFromMatrix(countData=counts, colData=meta, '
'design={})'.format(design))
r.r('dds = estimateSizeFactors(dds)')
r.r('sf = sizeFactors(dds)')
sf = r.globalenv['sf']
return pd.Series(sf, index=counts.columns)
def goseq_gene_enrichment(genes, sig, plot_fn=None, length_correct=True):
"""
Perform goseq enrichment for an Ensembl gene set.
Parameters
----------
genes : list
List of all genes as Ensembl IDs.
sig : list
List of boolean values indicating whether each gene is significant or
not.
plot_fn : str
Path to save length bias plot to. If not provided, the plot is deleted.
length_correct : bool
Correct for length bias.
Returns
-------
go_results : pandas.DataFrame
Dataframe with goseq results as well as Benjamini-Hochberg correct
p-values.
"""
import os
import readline
import statsmodels.stats.multitest as smm
import rpy2.robjects as r
genes = list(genes)
sig = [bool(x) for x in sig]
r.r('suppressMessages(library(goseq))')
r.globalenv['genes'] = list(genes)
r.globalenv['group'] = list(sig)
r.r('group = as.logical(group)')
r.r('names(group) = genes')
r.r('pwf = nullp(group, "hg19", "ensGene")')
if length_correct:
r.r('wall = goseq(pwf, "hg19", "ensGene")')
else:
r.r('wall = goseq(pwf, "hg19", "ensGene", method="Hypergeometric")')
r.r('t = as.data.frame(wall)')
t = r.globalenv['t']
go_results = pd.DataFrame(columns=list(t.colnames))
for i, c in enumerate(go_results.columns):
go_results[c] = list(t[i])
r, c, ask, abf = smm.multipletests(
go_results.over_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['over_represented_pvalue_bh'] = c
r, c, ask, abf = smm.multipletests(
go_results.under_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['under_represented_pvalue_bh'] = c
go_results.index = go_results.category
go_results = go_results.drop('category', axis=1)
if plot_fn and os.path.exists('Rplots.pdf'):
from os import rename
rename('Rplots.pdf', plot_fn)
elif os.path.exists('Rplots.pdf'):
from os import remove
remove('Rplots.pdf')
return go_results
def categories_to_colors(cats, colormap=None):
"""
Map categorical data to colors.
Parameters
----------
cats : pandas.Series or list
Categorical data as a list or in a Series.
colormap : list
List of RGB triples. If not provided, the tableau20 colormap defined in
this module will be used.
Returns
-------
legend : pd.Series
Series whose values are colors and whose index are the original
categories that correspond to those colors.
"""
if colormap is None:
colormap = tableau20
if type(cats) != pd.Series:
cats = pd.Series(cats)
legend = pd.Series(dict(zip(set(cats), colormap)))
# colors = pd.Series([legend[x] for x in cats.values], index=cats.index)
# I've removed this output:
# colors : pd.Series
# Series whose values are the colors for each category. If cats was a
# Series, then out will have the same index as cats.
return(legend)
def plot_color_legend(legend, horizontal=False, ax=None):
"""
Plot a pandas Series with labels and colors.
Parameters
----------
legend : pandas.Series
Pandas Series whose values are RGB triples and whose index contains
categorical labels.
horizontal : bool
If True, plot horizontally.
ax : matplotlib.axis
Axis to plot on.
Returns
-------
ax : matplotlib.axis
Plot axis.
"""
import matplotlib.pyplot as plt
import numpy as np
t = np.array([np.array([x for x in legend])])
if ax is None:
fig, ax = plt.subplots(1, 1)
if horizontal:
ax.imshow(t, interpolation='none')
ax.set_yticks([])
ax.set_xticks(np.arange(0, legend.shape[0]))
t = ax.set_xticklabels(legend.index)
else:
t = t.reshape([legend.shape[0], 1, 3])
ax.imshow(t, interpolation='none')
ax.set_xticks([])
ax.set_yticks(np.arange(0, legend.shape[0]))
t = ax.set_yticklabels(legend.index)
return ax
def make_color_legend_rects(colors, labels=None):
"""
Make list of rectangles and labels for making legends.
Parameters
----------
colors : pandas.Series or list
Pandas series whose values are colors and index is labels.
Alternatively, you can provide a list with colors and provide the labels
as a list.
labels : list
If colors is a list, this should be the list of corresponding labels.
Returns
-------
out : pd.Series
Pandas series whose values are matplotlib rectangles and whose index are
the legend labels for those rectangles. You can add each of these
rectangles to your axis using ax.add_patch(r) for r in out then create a
legend whose labels are out.values and whose labels are
legend_rects.index:
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index)
"""
from matplotlib.pyplot import Rectangle
if labels:
d = dict(zip(labels, colors))
se = pd.Series(d)
else:
se = colors
rects = []
for i in se.index:
r = Rectangle((0, 0), 0, 0, fc=se[i])
rects.append(r)
out = pd.Series(rects, index=se.index)
return out
class SVD:
def __init__(self, df, mean_center=True, scale_variance=False, full_matrices=False):
"""
Perform SVD for data matrix using scipy.linalg.svd. Note that this is currently inefficient
for large matrices due to some of the pandas operations.
Parameters
----------
df : pandas.DataFrame
Pandas data frame with data.
mean_center : bool
If True, mean center the rows. This should be done if not already
done.
scale_variance : bool
If True, scale the variance of each row to be one. Combined with
mean centering, this will transform your data into z-scores.
full_matrices : bool
Passed to scipy.linalg.svd. If True, U and Vh are of shape (M, M), (N, N). If False, the
shapes are (M, K) and (K, N), where K = min(M, N).
"""
import copy
self.data_orig = copy.deepcopy(df)
self.data = copy.deepcopy(df)
if mean_center:
self.data = (self.data.T - self.data.mean(axis=1)).T
if scale_variance:
self.data = (self.data.T / self.data.std(axis=1)).T
self._perform_svd(full_matrices)
def _perform_svd(self, full_matrices):
from scipy.linalg import svd
u, s, vh = svd(self.data, full_matrices=full_matrices)
self.u_orig = u
self.s_orig = s
self.vh_orig = vh
self.u = pd.DataFrame(
u,
index=self.data.index,
columns=['PC{}'.format(x) for x in range(1, u.shape[1] + 1)],
)
self.v = pd.DataFrame(
vh.T,
index=self.data.columns,
columns=['PC{}'.format(x) for x in range(1, vh.shape[0] + 1)],
)
index = ['PC{}'.format(x) for x in range(1, len(s) + 1)]
self.s_norm = pd.Series(s / s.sum(), index=index)
def plot_variance_explained(self, cumulative=False, xtick_start=1,
xtick_spacing=1, num_pc=None):
"""
Plot amount of variance explained by each principal component.
Parameters
----------
num_pc : int
Number of principal components to plot. If None, plot all.
cumulative : bool
If True, include cumulative variance.
xtick_start : int
The first principal component to label on the x-axis.
xtick_spacing : int
The spacing between labels on the x-axis.
"""
import matplotlib.pyplot as plt
from numpy import arange
if num_pc:
s_norm = self.s_norm[0:num_pc]
else:
s_norm = self.s_norm
if cumulative:
s_cumsum = s_norm.cumsum()
plt.bar(range(s_cumsum.shape[0]), s_cumsum.values,
label='Cumulative', color=(0.17254901960784313,
0.6274509803921569,
0.17254901960784313))
plt.bar(range(s_norm.shape[0]), s_norm.values, label='Per PC',
color=(0.12156862745098039, 0.4666666666666667,
0.7058823529411765))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('Variance')
else:
plt.bar(range(s_norm.shape[0]), s_norm.values,
color=(0.12156862745098039, 0.4666666666666667,
0.7058823529411765))
plt.ylabel('Proportion variance explained')
plt.xlabel('PC')
plt.xlim(0, s_norm.shape[0])
tick_locs = arange(xtick_start - 1, s_norm.shape[0],
step=xtick_spacing)
# 0.8 is the width of the bars.
tick_locs = tick_locs + 0.4
plt.xticks(tick_locs,
arange(xtick_start, s_norm.shape[0] + 1, xtick_spacing))
def plot_pc_scatter(self, pc1, pc2, v=True, subset=None, ax=None,
color=None, s=None, marker=None, color_name=None,
s_name=None, marker_name=None):
"""
Make a scatter plot of two principal components. You can create
differently colored, sized, or marked scatter points.
Parameters
----------
pc1 : str
String of form PCX where X is the number of the principal component
you want to plot on the x-axis.
pc2 : str
String of form PCX where X is the number of the principal component
you want to plot on the y-axis.
v : bool
If True, use the v matrix for plotting the principal components
(typical if input data was genes as rows and samples as columns).
If False, use the u matrix.
subset : list
Make the scatter plot using only a subset of the rows of u or v.
ax : matplotlib.axes
Plot the scatter plot on this axis.
color : pandas.Series
Pandas series containing a categorical variable to color the scatter
points.
s : pandas.Series
Pandas series containing a categorical variable to size the scatter
points. Currently limited to 7 distinct values (sizes).
marker : pandas.Series
Pandas series containing a categorical variable to choose the marker
type for the scatter points. Currently limited to 21 distinct values
(marker styles).
color_name : str
Name for the color legend if a categorical variable for color is
provided.
s_name : str
Name for the size legend if a categorical variable for size is
provided.
marker_name : str
Name for the marker legend if a categorical variable for marker type
is provided.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
Scatter plot axis.
TODO: Add ability to label points.
"""
import matplotlib.pyplot as plt
import seaborn as sns
assert s <= 7, 'Error: too many values for "s"'
if v:
df = self.v
else:
df = self.u
if color is not None:
if color.unique().shape[0] <= 10:
colormap = pd.Series(dict(zip(set(color.values),
tableau20[0:2 * len(set(color)):2])))
else:
colormap = pd.Series(dict(zip(set(color.values),
sns.color_palette('husl', len(set(color))))))
color = pd.Series([colormap[x] for x in color.values],
index=color.index)
color_legend = True
if not color_name:
color_name = color.index.name
else:
color = pd.Series([tableau20[0]] * df.shape[0], index=df.index)
color_legend = False
if s is not None:
smap = pd.Series(dict(zip(
set(s.values), range(30, 351)[0::50][0:len(set(s)) + 1])))
s = pd.Series([smap[x] for x in s.values],
index=s.index)
s_legend = True
if not s_name:
s_name = s.index.name
else:
s = pd.Series(30, index=df.index)
s_legend = False
markers = ['o', '*', 's', 'v', '+', 'x', 'd',
'p', '2', '<', '|', '>', '_', 'h',
'1', '2', '3', '4', '8', '^', 'D']
if marker is not None:
markermap = pd.Series(dict(zip(set(marker.values), markers)))
marker = pd.Series([markermap[x] for x in marker.values],
index=marker.index)
marker_legend = True
if not marker_name:
marker_name = marker.index.name
else:
marker = pd.Series('o', index=df.index)
marker_legend = False
if ax is None:
fig, ax = plt.subplots(1, 1)
for m in set(marker.values):
mse = marker[marker == m]
cse = color[mse.index]
sse = s[mse.index]
ax.scatter(df.ix[mse.index, pc1], df.ix[mse.index, pc2],
s=sse.values, color=list(cse.values), marker=m,
alpha=0.8)
ax.set_title('{} vs. {}'.format(pc1, pc2))
ax.set_xlabel(pc1)
ax.set_ylabel(pc2)
if color_legend:
legend_rects = make_color_legend_rects(colormap)
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index,
title=color_name,
loc='upper left',
bbox_to_anchor=(1, 1))
if s_legend:
if lgd:
lgd = ax.add_artist(lgd)
xa, xb = ax.get_xlim()
ya, yb = ax.get_ylim()
for i in smap.index:
ax.scatter([xb + 1], [yb + 1], marker='o',
s=smap[i], color='black', label=i)
lgd = ax.legend(title=s_name, loc='center left',
bbox_to_anchor=(1, 0.5))
ax.set_xlim(xa, xb)
ax.set_ylim(ya, yb)
if marker_legend:
if lgd:
lgd = ax.add_artist(lgd)
xa, xb = ax.get_xlim()
ya, yb = ax.get_ylim()
for i in markermap.index:
t = ax.scatter([xb + 1], [yb + 1], marker=markermap[i],
s=sse.min(), color='black', label=i)
handles, labels = ax.get_legend_handles_labels()
if s_legend:
handles = handles[len(smap):]
labels = labels[len(smap):]
lgd = ax.legend(handles, labels, title=marker_name,
loc='lower left', bbox_to_anchor=(1, 0))
ax.set_xlim(xa, xb)
ax.set_ylim(ya, yb)
# fig.tight_layout()
return fig, ax
def pc_correlation(self, covariates, num_pc=5):
"""
Calculate the correlation between the first num_pc prinicipal components
and known covariates. The size and index of covariates determines
whether u or v is used.
Parameters
----------
covariates : pandas.DataFrame
Dataframe of covariates whose index corresponds to the index of
either u or v.
num_pc : int
Number of principal components to correlate with.
Returns
-------
corr : pandas.Panel
Panel with correlation values and p-values.
"""
from scipy.stats import spearmanr
if (covariates.shape[0] == self.u.shape[0] and
len(set(covariates.index) & set(self.u.index)) == self.u.shape[0]):
mat = self.u
elif (covariates.shape[0] == self.v.shape[0] and
len(set(covariates.index) & set(self.v.index)) == self.v.shape[0]):
mat = self.v
else:
import sys
sys.stderr.write('Covariates differ in size from input data.\n')
sys.exit(1)
corr = pd.Panel(items=['rho', 'pvalue'],
major_axis=covariates.columns,
minor_axis=mat.columns[0:num_pc])
for i in corr.major_axis:
for j in corr.minor_axis:
rho, p = spearmanr(covariates[i], mat[j])
corr.ix['rho', i, j] = rho
corr.ix['pvalue', i, j] = p
return corr
def pc_anova(self, covariates, num_pc=5):
"""
Calculate one-way ANOVA between the first num_pc prinicipal components
and known covariates. The size and index of covariates determines
whether u or v is used.
Parameters
----------
covariates : pandas.DataFrame
Dataframe of covariates whose index corresponds to the index of
either u or v.
num_pc : int
Number of principal components to correlate with.
Returns
-------
anova : pandas.Panel
Panel with F-values and p-values.
"""
from scipy.stats import f_oneway
if (covariates.shape[0] == self.u.shape[0] and
len(set(covariates.index) & set(self.u.index)) == self.u.shape[0]):
mat = self.u
elif (covariates.shape[0] == self.v.shape[0] and
len(set(covariates.index) & set(self.v.index)) == self.v.shape[0]):
mat = self.v
anova = pd.Panel(items=['fvalue', 'pvalue'],
major_axis=covariates.columns,
minor_axis=mat.columns[0:num_pc])
for i in anova.major_axis:
for j in anova.minor_axis:
t = [mat[j][covariates[i] == x] for x in set(covariates[i])]
f, p = f_oneway(*t)
anova.ix['fvalue', i, j] = f
anova.ix['pvalue', i, j] = p
return anova
def manhattan_plot(
res,
ax,
p_filter=1,
p_cutoff=None,
marker_size=10,
font_size=8,
chrom_labels=range(1, 23)[0::2],
label_column=None,
category_order=None,
legend=True,
):
"""
Make Manhattan plot for GWAS results. Currently only support autosomes.
Parameters
----------
res : pandas.DataFrame
GWAS results. The following columns are required - chrom (chromsome,
int), pos (genomic position, int), P (GWAS p-value, float).
ax : matplotlib.axis
Matplotlib axis to make Manhattan plot on.
p_filter : float
Only plot p-values smaller than this cutoff. This is useful for testing
because filtering on p-values speeds up the plotting.
p_cutoff : float
Plot horizontal line at this p-value.
marker_size : int
Size of Manhattan markers.
font_size : int
Font size for plots.
chrom_labels : list
List of ints indicating which chromsomes to label. You may want to
modulate this based on the size of the plot. Currently only integers
1-22 are supported.
label_column : str
String with column name from res. This column should contain a
categorical annotation for each variant. These will be indicated by
colors.
category_order : list
If label_column is not None, you can provide a list of the categories
that are contained in the label_column. This will be used to assign the
color palette and will specify the z-order of the categories.
legend : boolean
If True and label_column is not None, plot a legend.
Returns
-------
res : pandas.Dataframe
GWAS results. The results will have additional columns that were used
for plotting.
ax : matplotlib.axis
Axis with the Manhattan plot.
colors : pd.Series or None
If label_column is None, this will be None. Otherwise, if a label_column
is specified, this will be a series with a mapping between the labels
and the colors for each label.
"""
# TODO: It might make sense to allow a variable that specifies the z-order
# of labels in label_column. If there are many labels and points in the same
# place, certain annotations will be preferentially shown.
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# Filter results based on p-value.
if p_filter < 1:
res = res[res['P'] < p_filter]
# Assign x coordinates for each association.
res['xpos'] = np.nan
chrom_vc = res['chrom'].value_counts()
# total_length is arbitrary, but it's a little easier than working with the
# normalized chromosome sizes to avoid small numbers.
total_length = 1000
right = chrom_sizes_norm.cumsum()
right = right / right[22] * total_length
left = chrom_sizes_norm.cumsum() - chrom_sizes_norm[1]
left = pd.Series(0, range(1, 23))
left[1:23] = right[0:21].values
for chrom in range(1, 23):
if chrom in res['chrom'].values:
res.loc[res['chrom'] == chrom, 'xpos'] = np.linspace(
left[chrom], right[chrom], chrom_vc[chrom])
# Assign colors.
grey = mpl.colors.to_rgb('grey')
light_grey = (0.9, 0.9, 0.9)
middle_grey = (0.8, 0.8, 0.8)
# I first set everything to black, but in the end everything should be
# changed to one of the greys (or other colors if there is an annotation
# column). If there are black points on the plot, that indicates a problem.
res['color'] = 'black'
for chrom in range(1, 23)[0::2]:
if chrom in res['chrom'].values:
ind = res[res.chrom == chrom].index
res.loc[ind, 'color'] = pd.Series([grey for x in ind], index=ind)
for chrom in range(1, 23)[1::2]:
if chrom in res['chrom'].values:
ind = res[res.chrom == chrom].index
res.loc[ind, 'color'] = pd.Series([middle_grey for x in ind], index=ind)
if label_column is not None:
if category_order is not None:
assert set(category_order) == set(res[label_column].dropna())
categories = category_order
else:
categories = list(set(res[label_column].dropna()))
colors = categories_to_colors(
categories,
colormap=sns.color_palette('colorblind'),
)
for cat in categories:
ind = res[res[label_column] == cat].index
res.loc[ind, 'color'] = pd.Series([colors[cat] for x in ind],
index=ind)
# Plot
if label_column is not None:
ind = res[res[label_column].isnull()].index
ax.scatter(
res.loc[ind, 'xpos'],
-np.log10(res.loc[ind, 'P']),
color=res.loc[ind, 'color'],
s=marker_size,
alpha=0.75,
rasterized=True,
label=None,
)
for cat in reversed(categories):
ind = res[res[label_column] == cat].index
ax.scatter(
res.loc[ind, 'xpos'],
-np.log10(res.loc[ind, 'P']),
color=res.loc[ind, 'color'],
s=marker_size,
alpha=0.75,
rasterized=True,
label=None,
)
else:
ax.scatter(
res['xpos'],
-np.log10(res['P']),
color=res['color'],
s=marker_size,
alpha=0.75,
rasterized=True,
label=None,
)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
ax.grid(axis='x')
ax.grid(axis='y')
ax.grid(axis='y', alpha=0.5, ls='-', lw=0.6)
if p_cutoff is not None:
ax.hlines(
-np.log10(p_cutoff),
-5,
total_length + 5,
color='red',
linestyles='--',
lw=0.8,
alpha=0.5,
)
# These next two lines add background shading. I may add back in as option.
# for chrom in range(1, 23)[0::2]:
# ax.axvspan(left[chrom], right[chrom], facecolor=(0.4, 0.4, 0.4), alpha=0.2, lw=0)
ax.set_xlim(-5, total_length + 5)
ax.set_ylim(0, ymax)
# Set chromosome labels
# ind = range(1, 23)[0::2]
# if skip19:
# ind = [x for x in ind if x != 19]
ind = [x for x in chrom_labels if x in range(1, 23)]
ax.set_xticks(left[ind] + (right[ind] - left[ind]) / 2)
ax.set_xticklabels(ind, fontsize=font_size)
ax.set_ylabel('$-\log_{10} p$ value', fontsize=font_size)
for t in ax.get_xticklabels() + ax.get_yticklabels():
t.set_fontsize(font_size)
if label_column is not None and legend:
for cat in categories:
ax.scatter(
-100,
-100,
s=marker_size,
color=colors[cat],
label=cat,
)
if legend:
ax.legend(
fontsize=font_size- 1,
framealpha=0.5,
frameon=True,
facecolor='white',
)
# TODO: eventually, it would be better to be smarter about the x-axis
# limits. Depending on the size of the markers and plot, some of the markers
# might be cut off.
ax.set_xlim(-5, total_length + 5)
# TODO: eventually, it would be better to be smarter about the y-axis
# limits. Depending on the size of the markers and plot, some of the markers
# might be cut off. Matplotlib doesn't know anything about the size of the
# markers, so it might set the y-limit too low.
ax.set_ylim(-1 * np.log10(p_filter), ymax)
if label_column is None:
colors = None
return(res, ax, colors)
| mit |
zingale/pyro2 | swe/problems/logo.py | 2 | 2049 | from __future__ import print_function
import sys
import mesh.patch as patch
import numpy as np
from util import msg
import matplotlib.pyplot as plt
def init_data(my_data, rp):
""" initialize the sedov problem """
msg.bold("initializing the logo problem...")
# make sure that we are passed a valid patch object
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in sedov.py")
print(my_data.__class__)
sys.exit()
# create the logo
myg = my_data.grid
fig = plt.figure(2, (0.64, 0.64), dpi=100*myg.nx/64)
fig.add_subplot(111)
fig.text(0.5, 0.5, "pyro", transform=fig.transFigure, fontsize="16",
horizontalalignment="center", verticalalignment="center")
plt.axis("off")
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
logo = np.rot90(np.rot90(np.rot90((256-data[:, :, 1])/255.0)))
# get the height, momenta as separate variables
h = my_data.get_var("height")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
X = my_data.get_var("fuel")
myg = my_data.grid
# initialize the components
h[:, :] = 1.0
xmom[:, :] = 0.0
ymom[:, :] = 0.0
# set the height in the logo zones to be really large
logo_h = 2
h.v()[:, :] = logo[:, :] * logo_h
X.v()[:, :] = logo[:, :]
corner_height = 2
# explosion
h[myg.ilo, myg.jlo] = corner_height
h[myg.ilo, myg.jhi] = corner_height
h[myg.ihi, myg.jlo] = corner_height
h[myg.ihi, myg.jhi] = corner_height
v = 1
xmom[myg.ilo, myg.jlo] = v
xmom[myg.ilo, myg.jhi] = v
xmom[myg.ihi, myg.jlo] = -v
xmom[myg.ihi, myg.jhi] = -v
ymom[myg.ilo, myg.jlo] = v
ymom[myg.ilo, myg.jhi] = -v
ymom[myg.ihi, myg.jlo] = v
ymom[myg.ihi, myg.jhi] = -v
X[:, :] *= h
def finalize():
""" print out any information to the user at the end of the run """
| bsd-3-clause |
kubeflow/code-intelligence | py/code_intelligence/github_bigquery.py | 1 | 2756 | """This module contains code to get issue data from BigQuery."""
import dateutil
import json
from pandas.io import gbq
import re
def get_issues(login, project, max_age_days=None):
"""Get issue data from bigquery.
Args:
login: Which GitHub organization to query for
project: GCP project to charge BigQuery to
max_age_days: (Optional) If present only fetch issues which were created
less then max age_days ago
"""
query = f"""SELECT
JSON_EXTRACT(payload, '$.issue.html_url') as html_url,
JSON_EXTRACT(payload, '$.issue.title') as title,
JSON_EXTRACT(payload, '$.issue.body') as body,
JSON_EXTRACT(payload, "$.issue.labels") as labels,
JSON_EXTRACT(payload, "$.issue.created_at") as created_at,
JSON_EXTRACT(payload, "$.issue.updated_at") as updated_at,
org.login,
type,
FROM `githubarchive.month.20*`
WHERE (type="IssuesEvent" or type="IssueCommentEvent") and org.login = '{login}'
and _TABLE_SUFFIX >= '1800' """
if max_age_days:
# We need to convert the created_at field to a timestamp.
# JSON_EXTRACT returns a json string meaning it is quoted and we need
# to remove the quotes
query += f""" and DATETIME_DIFF(CURRENT_DATETIME(), PARSE_DATETIME(
"\\"%Y-%m-%dT%TZ\\"", JSON_EXTRACT(payload,
"$.issue.created_at")), DAY)
<= {max_age_days} """
issues_and_pulls=gbq.read_gbq(query, dialect='standard', project_id=project)
# pull request comments also get included so we need to filter those out
pattern = re.compile(".*issues/[\d]+")
issues_index = issues_and_pulls["html_url"].apply(lambda x: pattern.match(x) is not None)
issues = issues_and_pulls[issues_index]
# We need to group the events by issue and then select the most recent event for each
# issue as that should have the most up to date labels for each issue.
# TODO(jlewi): Should we be converting updated_at to a datetime before doing the sort?
latest_issues = issues.groupby("html_url", as_index=False).apply(lambda x: x.sort_values(["updated_at"]).iloc[-1])
# we need to deserialize the json strings to remove escaping
for f in ["html_url", "title", "body", "created_at", "updated_at"]:
latest_issues[f] = latest_issues[f].apply(lambda x : json.loads(x))
# Parse timestamps
for f in ["created_at", "updated_at"]:
latest_issues[f] = latest_issues[f].apply(lambda x : dateutil.parser.parse(x))
# Parse labels
def get_labels(x):
d = json.loads(x)
return [i["name"] for i in d]
latest_issues["parsed_labels"] = latest_issues["labels"].apply(get_labels)
return latest_issues | mit |
poolio/thunder | thunder/viz/colorize.py | 6 | 17783 | from numpy import arctan2, sqrt, pi, abs, dstack, clip, transpose, inf, \
random, zeros, ones, asarray, corrcoef, allclose, maximum, add, multiply, \
nan_to_num, copy, ndarray, around, ceil, rollaxis
class Colorize(object):
"""
Class for turning numerical data into colors.
Supports a set of custom conversions (rgb, hsv, polar, and indexed)
as well as conversions to standard matplotlib colormaps through
either a passed colormap or a string specification.
If vmax and vmin are not specified, numerical data will be automatically
scaled by its maximum and minimum values.
Supports two-dimensional and three-dimensional data.
Attributes
----------
cmap : string, optional, default = rainbow
The colormap to convert to, can be one of a special set of conversions
(rgb, hsv, hv, polar, angle, indexed), a matplotlib colormap object,
or a string specification of a matplotlib colormap
scale : float, optional, default = 1
How to scale amplitude during color conversion, controls brighthness
colors : list, optional, default = None
List of colors for 'indexed' option
vmin : scalar, optional, default = None
Numerical value to set to 0 during normalization, values below will be clipped
vmax : scalar, optional, default = None
Numerical value to set to 1 during normalization, values above will be clipped.
"""
def __init__(self, cmap='rainbow', scale=1, colors=None, vmin=None, vmax=None):
self.cmap = cmap
self.scale = scale
self.colors = colors
if self.cmap == 'index' and self.colors is None:
raise Exception("Must specify colors for indexed conversion")
self.vmin = vmin
self.vmax = vmax
@staticmethod
def tile(imgs, cmap='gray', bar=False, nans=True, clim=None, grid=None, size=9, axis=0):
"""
Display a collection of images as a grid of tiles
Parameters
----------
img : list or ndarray (2D or 3D)
The images to display. Can be a list of either 2D, 3D,
or a mix of 2D and 3D numpy arrays. Can also be a single
numpy array, in which case the axis parameter will be assumed
to index the image list, e.g. a (10, 512, 512) array with axis=0
will be treated as 10 different (512,512) images, and the same
array with axis=1 would be treated as 512 (10,512) images.
cmap : str or Colormap or list, optional, default = 'gray'
A colormap to use, for non RGB images, a list can be used to
specify a different colormap for each image
bar : boolean, optional, default = False
Whether to append a colorbar to each image
nans : boolean, optional, deafult = True
Whether to replace NaNs, if True, will replace with 0s
clim : tuple or list of tuples, optional, default = None
Limits for scaling image, a list can be used to
specify different limits for each image
grid : tuple, optional, default = None
Dimensions of image tile grid, if None, will use a square grid
large enough to include all images
size : scalar, optional, deafult = 11
Size of the figure
"""
from matplotlib.pyplot import figure, colorbar
from mpl_toolkits.axes_grid1 import ImageGrid
if not isinstance(imgs, list):
if isinstance(imgs, ndarray):
if (axis < 0) | (axis >= imgs.ndim):
raise ValueError("Must specify a valid axis to index the images")
imgs = list(rollaxis(imgs, axis, 0))
else:
raise ValueError("Must provide a list of images, or an ndarray")
imgs = [asarray(im) for im in imgs]
if (nans is True) and (imgs[0].dtype != bool):
imgs = [nan_to_num(im) for im in imgs]
fig = figure(figsize=(size, size))
if bar is True:
axes_pad = 0.4
if sum([im.ndim == 3 for im in imgs]):
raise ValueError("Cannot show meaningful colorbar for RGB image")
cbar_mode = "each"
else:
axes_pad = 0.2
cbar_mode = None
nimgs = len(imgs)
if not isinstance(cmap, list):
cmap = [cmap for _ in range(nimgs)]
if not isinstance(clim, list):
clim = [clim for _ in range(nimgs)]
if len(clim) < nimgs:
raise ValueError("Number of clim specifications %g too small for number of images %g"
% (len(clim), nimgs))
if len(cmap) < nimgs:
raise ValueError("Number of cmap specifications %g too small for number of images %g"
% (len(cmap), nimgs))
if grid is None:
c = int(ceil(sqrt(nimgs)))
grid = (c, c)
ngrid = grid[0] * grid[1]
if ngrid < nimgs:
raise ValueError("Total grid count %g too small for number of images %g" % (ngrid, nimgs))
g = ImageGrid(fig, 111, nrows_ncols=grid, axes_pad=axes_pad,
cbar_mode=cbar_mode, cbar_size="5%", cbar_pad="5%")
for i, im in enumerate(imgs):
ax = g[i].imshow(im, cmap=cmap[i], interpolation='none', clim=clim[i])
g[i].axis('off')
if bar:
cb = colorbar(ax, g[i].cax)
rng = abs(cb.vmax - cb.vmin) * 0.05
cb.set_ticks([around(cb.vmin + rng, 1), around(cb.vmax - rng, 1)])
cb.outline.set_visible(False)
if nimgs < ngrid:
for i in range(nimgs, ngrid):
g[i].axis('off')
g[i].cax.axis('off')
@staticmethod
def image(img, cmap='gray', bar=False, nans=True, clim=None, size=7, ax=None):
"""
Streamlined display of images using matplotlib.
Parameters
----------
img : ndarray, 2D or 3D
The image to display
cmap : str or Colormap, optional, default = 'gray'
A colormap to use, for non RGB images
bar : boolean, optional, default = False
Whether to append a colorbar
nans : boolean, optional, deafult = True
Whether to replace NaNs, if True, will replace with 0s
clim : tuple, optional, default = None
Limits for scaling image
size : scalar, optional, deafult = 9
Size of the figure
ax : matplotlib axis, optional, default = None
An existing axis to plot into
"""
from matplotlib.pyplot import axis, colorbar, figure, gca
img = asarray(img)
if (nans is True) and (img.dtype != bool):
img = nan_to_num(img)
if ax is None:
f = figure(figsize=(size, size))
ax = gca()
if img.ndim == 3:
if bar:
raise ValueError("Cannot show meaningful colorbar for RGB images")
if img.shape[2] != 3:
raise ValueError("Size of third dimension must be 3 for RGB images, got %g" % img.shape[2])
mn = img.min()
mx = img.max()
if mn < 0.0 or mx > 1.0:
raise ValueError("Values must be between 0.0 and 1.0 for RGB images, got range (%g, %g)" % (mn, mx))
im = ax.imshow(img, interpolation='none', clim=clim)
else:
im = ax.imshow(img, cmap=cmap, interpolation='none', clim=clim)
if bar is True:
cb = colorbar(im, fraction=0.046, pad=0.04)
rng = abs(cb.vmax - cb.vmin) * 0.05
cb.set_ticks([around(cb.vmin + rng, 1), around(cb.vmax - rng, 1)])
cb.outline.set_visible(False)
axis('off')
def transform(self, img, mask=None, background=None, mixing=1.0):
"""
Colorize numerical image data.
Input can either be a single array or a list of arrays.
Depending on the colorization option, each array must either be
2 or 3 dimensional, see parameters for details.
Parameters
----------
img : array
The image(s) to colorize. For rgb, hsv, polar, and indexed conversions,
must be of shape (c, x, y, z) or (c, x, y), where c is the dimension
containing the information for colorizing. For colormap conversions,
must be of shape (x, y, z) or (x, y).
mask : array
An additional image to mask the luminance channel of the first one.
Must be of shape (x, y, z) or (x, y), and must match dimensions of images.
Must be strictly positive (and will be clipped below at 0).
background : array
An additional image to display as a grayscale background.
Must be of shape (x, y, z) or (x, y), and must match dimensions of images.
mixing : scalar
If adding a background image, mixing controls the relative scale.
Values larger than 1.0 will emphasize the background more.
Returns
-------
Arrays with RGB values, with shape (x, y, z, 3) or (x, y, 3)
"""
from matplotlib.cm import get_cmap
from matplotlib.colors import ListedColormap, LinearSegmentedColormap, hsv_to_rgb, Normalize
img = asarray(img)
dims = img.shape
self._checkDims(dims)
if self.cmap not in ['polar', 'angle']:
if self.cmap in ['rgb', 'hv', 'hsv', 'indexed']:
img = copy(img)
for i, im in enumerate(img):
norm = Normalize(vmin=self.vmin, vmax=self.vmax, clip=True)
img[i] = norm(im)
if isinstance(self.cmap, ListedColormap) or isinstance(self.cmap, str):
norm = Normalize(vmin=self.vmin, vmax=self.vmax, clip=True)
img = norm(copy(img))
if mask is not None:
mask = self._prepareMask(mask)
self._checkMixedDims(mask.shape, dims)
if background is not None:
background = self._prepareBackground(background, mixing)
self._checkMixedDims(background.shape, dims)
if self.cmap == 'rgb':
if img.ndim == 4:
out = transpose(img, [1, 2, 3, 0])
if img.ndim == 3:
out = transpose(img, [1, 2, 0])
elif self.cmap == 'hv':
saturation = ones((dims[1], dims[2])) * 0.8
if img.ndim == 4:
out = zeros((dims[1], dims[2], dims[3], 3))
for i in range(0, dims[3]):
out[:, :, i, :] = hsv_to_rgb(dstack((img[0][:, :, i], saturation, img[1][:, :, i])))
if img.ndim == 3:
out = hsv_to_rgb(dstack((img[0], saturation, img[1])))
elif self.cmap == 'hsv':
if img.ndim == 4:
out = zeros((dims[1], dims[2], dims[3], 3))
for i in range(0, dims[3]):
out[:, :, i, :] = hsv_to_rgb(dstack((img[0][:, :, i], img[1][:, :, i], img[2][:, :, i])))
if img.ndim == 3:
out = hsv_to_rgb(dstack((img[0], img[1], img[2])))
elif self.cmap == 'angle':
theta = ((arctan2(-img[0], -img[1]) + pi/2) % (pi*2)) / (2 * pi)
saturation = ones((dims[1], dims[2])) * 0.8
rho = ones((dims[1], dims[2]))
if img.ndim == 4:
out = zeros((dims[1], dims[2], dims[3], 3))
for i in range(0, dims[3]):
out[:, :, i, :] = hsv_to_rgb(dstack((theta[:, :, i], saturation, rho)))
if img.ndim == 3:
out = hsv_to_rgb(dstack((theta, saturation, rho)))
elif self.cmap == 'polar':
theta = ((arctan2(-img[0], -img[1]) + pi/2) % (pi*2)) / (2 * pi)
rho = sqrt(img[0]**2 + img[1]**2)
saturation = ones((dims[1], dims[2]))
if img.ndim == 4:
out = zeros((dims[1], dims[2], dims[3], 3))
for i in range(0, dims[3]):
out[:, :, i, :] = hsv_to_rgb(dstack((theta[:, :, i], saturation, self.scale * rho[:, :, i])))
if img.ndim == 3:
out = hsv_to_rgb(dstack((theta, saturation, self.scale * rho)))
elif self.cmap == 'indexed':
if img.ndim == 4:
out = zeros((dims[1], dims[2], dims[3], 3))
if img.ndim == 3:
out = zeros((dims[1], dims[2], 3))
for ix, clr in enumerate(self.colors):
cmap = LinearSegmentedColormap.from_list('blend', [[0, 0, 0], clr])
tmp = cmap(img[ix])
if img.ndim == 4:
tmp = tmp[:, :, :, 0:3]
if img.ndim == 3:
tmp = tmp[:, :, 0:3]
out = maximum(out, clip(tmp, 0, 1))
elif isinstance(self.cmap, ListedColormap):
if img.ndim == 3:
out = self.cmap(img)
out = out[:, :, :, 0:3]
if img.ndim == 2:
out = self.cmap(img)
out = out[:, :, 0:3]
elif isinstance(self.cmap, str):
func = lambda x: get_cmap(self.cmap, 256)(x)
out = func(img)
if img.ndim == 3:
out = out[:, :, :, 0:3]
if img.ndim == 2:
out = out[:, :, 0:3]
else:
raise Exception('Colorization method not understood')
out = clip(out * self.scale, 0, 1)
if mask is not None:
out = self.blend(out, mask, multiply)
if background is not None:
out = self.blend(out, background, add)
return clip(out, 0, 1)
@staticmethod
def blend(img, mask, op=add):
"""
Blend two images together using the specified operator.
Parameters
----------
img : array-like
First image to blend
mask : array-like
Second image to blend
op : func, optional, default = add
Operator to use for combining images
"""
if mask.ndim == 3:
for i in range(0, 3):
img[:, :, :, i] = op(img[:, :, :, i], mask)
else:
for i in range(0, 3):
img[:, :, i] = op(img[:, :, i], mask)
return img
def _checkDims(self, dims):
from matplotlib.colors import ListedColormap
if self.cmap in ['rgb', 'hsv', 'hv', 'polar', 'angle', 'indexed']:
if self.cmap in ['rgb', 'hsv']:
if dims[0] != 3:
raise Exception('First dimension must be 3 for %s conversion' % self.cmap)
if self.cmap in ['polar', 'angle', 'hv']:
if dims[0] != 2:
raise Exception('First dimension must be 2 for %s conversion' % self.cmap)
if self.cmap in ['indexed']:
if dims[0] != len(self.colors):
raise Exception('First dimension must be %g for %s conversion with list %s'
% (len(self.colors), self.cmap, self.colors))
elif isinstance(self.cmap, ListedColormap) or isinstance(self.cmap, str):
if len(dims) not in [2, 3]:
raise Exception('Number of dimensions must be 2 or 3 for %s conversion' % self.cmap)
def _checkMixedDims(self, dims1, dims2):
from matplotlib.colors import ListedColormap
if self.cmap in ['rgb', 'hsv', 'hv', 'polar', 'angle', 'indexed']:
if not allclose(dims1, dims2[1:]):
raise Exception
elif isinstance(self.cmap, ListedColormap) or isinstance(self.cmap, str):
if not allclose(dims1, dims2):
raise Exception
@staticmethod
def _prepareMask(mask):
mask = asarray(mask)
mask = clip(mask, 0, inf)
return mask / mask.max()
@staticmethod
def _prepareBackground(background, mixing):
from matplotlib.colors import Normalize
background = asarray(background)
background = Normalize()(background)
return background * mixing
@classmethod
def optimize(cls, mat, asCmap=False):
"""
Optimal colors based on array data similarity.
Given an (n, m) data array with n m-dimensional data points,
tries to find an optimal set of n colors such that the similarity
between colors in 3-dimensional space is well-matched to the similarity
between the data points in m-dimensional space.
Parameters
----------
mat : array-like
Array of data points to use for estimating similarity.
asCmap : boolean, optional, default = False
Whether to return a matplotlib colormap, if False will
return a list of colors.
"""
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Input array must be two-dimensional')
nclrs = mat.shape[0]
from scipy.spatial.distance import pdist, squareform
from scipy.optimize import minimize
distMat = squareform(pdist(mat, metric='cosine')).flatten()
optFunc = lambda x: 1 - corrcoef(distMat, squareform(pdist(x.reshape(nclrs, 3), 'cosine')).flatten())[0, 1]
init = random.rand(nclrs*3)
bounds = [(0, 1) for _ in range(0, nclrs * 3)]
res = minimize(optFunc, init, bounds=bounds, method='L-BFGS-B')
newClrs = res.x.reshape(nclrs, 3).tolist()
from matplotlib.colors import ListedColormap
if asCmap:
newClrs = ListedColormap(newClrs, name='from_list')
return newClrs
| apache-2.0 |
NelisVerhoef/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
trachelr/mne-python | tutorials/plot_cluster_methods_tutorial.py | 15 | 8431 | # doc:slow-example
"""
.. _tut_stats_cluster_methods:
======================================================
Permutation t-test on toy data with spatial clustering
======================================================
Following the illustrative example of Ridgway et al. 2012,
this demonstrates some basic ideas behind both the "hat"
variance adjustment method, as well as threshold-free
cluster enhancement (TFCE) methods in mne-python.
This toy dataset consists of a 40 x 40 square with a "signal"
present in the center (at pixel [20, 20]) with white noise
added and a 5-pixel-SD normal smoothing kernel applied.
For more information, see:
Ridgway et al. 2012, "The problem of low variance voxels in
statistical parametric mapping; a new hat avoids a 'haircut'",
NeuroImage. 2012 Feb 1;59(3):2131-41.
Smith and Nichols 2009, "Threshold-free cluster enhancement:
addressing problems of smoothing, threshold dependence, and
localisation in cluster inference", NeuroImage 44 (2009) 83-98.
In the top row plot the T statistic over space, peaking toward the
center. Note that it has peaky edges. Second, with the "hat" variance
correction/regularization, the peak becomes correctly centered. Third,
the TFCE approach also corrects for these edge artifacts. Fourth, the
the two methods combined provide a tighter estimate, for better or
worse.
Now considering multiple-comparisons corrected statistics on these
variables, note that a non-cluster test (e.g., FDR or Bonferroni) would
mis-localize the peak due to sharpness in the T statistic driven by
low-variance pixels toward the edge of the plateau. Standard clustering
(first plot in the second row) identifies the correct region, but the
whole area must be declared significant, so no peak analysis can be done.
Also, the peak is broad. In this method, all significances are
family-wise error rate (FWER) corrected, and the method is
non-parametric so assumptions of Gaussian data distributions (which do
actually hold for this example) don't need to be satisfied. Adding the
"hat" technique tightens the estimate of significant activity (second
plot). The TFCE approach (third plot) allows analyzing each significant
point independently, but still has a broadened estimate. Note that
this is also FWER corrected. Finally, combining the TFCE and "hat"
methods tightens the area declared significant (again FWER corrected),
and allows for evaluation of each point independently instead of as
a single, broad cluster.
Note that this example does quite a bit of processing, so even on a
fast machine it can take a few minutes to complete.
"""
# Authors: Eric Larson <[email protected]>
# License: BSD (3-clause)
import numpy as np
from scipy import stats
from functools import partial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa; this changes hidden mpl vars
from mne.stats import (spatio_temporal_cluster_1samp_test,
bonferroni_correction, ttest_1samp_no_p)
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
print(__doc__)
###############################################################################
# Set parameters
width = 40
n_subjects = 10
signal_mean = 100
signal_sd = 100
noise_sd = 0.01
gaussian_sd = 5
sigma = 1e-3 # sigma for the "hat" method
threshold = -stats.distributions.t.ppf(0.05, n_subjects - 1)
threshold_tfce = dict(start=0, step=0.2)
n_permutations = 1024 # number of clustering permutations (1024 for exact)
###############################################################################
# Construct simulated data
# Make the connectivity matrix just next-neighbor spatially
n_src = width * width
connectivity = grid_to_graph(width, width)
# For each "subject", make a smoothed noisy signal with a centered peak
rng = np.random.RandomState(42)
X = noise_sd * rng.randn(n_subjects, width, width)
# Add a signal at the dead center
X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
###############################################################################
# Do some statistics
# Note that X needs to be a multi-dimensional array of shape
# samples (subjects) x time x space, so we permute dimensions
X = X.reshape((n_subjects, 1, n_src))
# Now let's do some clustering using the standard method. Note that not
# specifying a connectivity matrix implies grid-like connectivity, which
# we want here:
T_obs, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=2, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
# Let's put the cluster data in a readable format
ps = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps[cl[1]] = -np.log10(p)
ps = ps.reshape((width, width))
T_obs = T_obs.reshape((width, width))
# To do a Bonferroni correction on these data is simple:
p = stats.distributions.t.sf(T_obs, n_subjects - 1)
p_bon = -np.log10(bonferroni_correction(p)[1])
# Now let's do some clustering using the standard method with "hat":
stat_fun = partial(ttest_1samp_no_p, sigma=sigma)
T_obs_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=2, threshold=threshold,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun)
# Let's put the cluster data in a readable format
ps_hat = np.zeros(width * width)
for cl, p in zip(clusters, p_values):
ps_hat[cl[1]] = -np.log10(p)
ps_hat = ps_hat.reshape((width, width))
T_obs_hat = T_obs_hat.reshape((width, width))
# Now the threshold-free cluster enhancement method (TFCE):
T_obs_tfce, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=2, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations)
T_obs_tfce = T_obs_tfce.reshape((width, width))
ps_tfce = -np.log10(p_values.reshape((width, width)))
# Now the TFCE with "hat" variance correction:
T_obs_tfce_hat, clusters, p_values, H0 = \
spatio_temporal_cluster_1samp_test(X, n_jobs=2, threshold=threshold_tfce,
connectivity=connectivity,
tail=1, n_permutations=n_permutations,
stat_fun=stat_fun)
T_obs_tfce_hat = T_obs_tfce_hat.reshape((width, width))
ps_tfce_hat = -np.log10(p_values.reshape((width, width)))
###############################################################################
# Visualize results
plt.ion()
fig = plt.figure(facecolor='w')
x, y = np.mgrid[0:width, 0:width]
kwargs = dict(rstride=1, cstride=1, linewidth=0, cmap='Greens')
Ts = [T_obs, T_obs_hat, T_obs_tfce, T_obs_tfce_hat]
titles = ['T statistic', 'T with "hat"', 'TFCE statistic', 'TFCE w/"hat" stat']
for ii, (t, title) in enumerate(zip(Ts, titles)):
ax = fig.add_subplot(2, 4, ii + 1, projection='3d')
ax.plot_surface(x, y, t, **kwargs)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
p_lims = [1.3, -np.log10(1.0 / n_permutations)]
pvals = [ps, ps_hat, ps_tfce, ps_tfce_hat]
titles = ['Standard clustering', 'Clust. w/"hat"',
'Clust. w/TFCE', 'Clust. w/TFCE+"hat"']
axs = []
for ii, (p, title) in enumerate(zip(pvals, titles)):
ax = fig.add_subplot(2, 4, 5 + ii)
plt.imshow(p, cmap='Purples', vmin=p_lims[0], vmax=p_lims[1])
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(title)
axs.append(ax)
plt.tight_layout()
for ax in axs:
cbar = plt.colorbar(ax=ax, shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025)
cbar.set_label('-log10(p)')
cbar.set_ticks(p_lims)
cbar.set_ticklabels(['%0.1f' % p for p in p_lims])
| bsd-3-clause |
cpcloud/odo | odo/backends/sas.py | 10 | 1321 | from __future__ import absolute_import, division, print_function
import sas7bdat
from sas7bdat import SAS7BDAT
import datashape
from datashape import discover, dshape, var, Record, date_, datetime_
from collections import Iterator
import pandas as pd
from .pandas import coerce_datetimes
from ..append import append
from ..convert import convert
from ..resource import resource
@resource.register('.+\.(sas7bdat)')
def resource_sas(uri, **kwargs):
return SAS7BDAT(uri, **kwargs)
@discover.register(SAS7BDAT)
def discover_sas(f, **kwargs):
lines = f.readlines()
next(lines) # burn header
ln = next(lines)
types = map(discover, ln)
names = [col.name.decode("utf-8") for col in f.header.parent.columns]
return var * Record(list(zip(names, types)))
@convert.register(pd.DataFrame, SAS7BDAT, cost=4.0)
def sas_to_DataFrame(s, dshape=None, **kwargs):
df = s.to_data_frame()
if any(typ in (date_, datetime_) for typ in dshape.measure.types):
df = coerce_datetimes(df)
names = [col.decode('utf-8') for col in s.column_names]
df = df[names] # Reorder names to match sasfile
return df
@convert.register(Iterator, SAS7BDAT, cost=1.0)
def sas_to_iterator(s, **kwargs):
lines = s.readlines()
if not s.skip_header:
next(lines) # burn
return lines
| bsd-3-clause |
ThQ/luigi | examples/pyspark_wc.py | 56 | 3361 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
from luigi.s3 import S3Target
from luigi.contrib.spark import SparkSubmitTask, PySparkTask
class InlinePySparkWordCount(PySparkTask):
"""
This task runs a :py:class:`luigi.contrib.spark.PySparkTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.PySparkTask.main`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
# py-packages: numpy, pandas
"""
driver_memory = '2g'
executor_memory = '3g'
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
def main(self, sc, *args):
sc.textFile(self.input().path) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(self.output().path)
class PySparkWordCount(SparkSubmitTask):
"""
This task is the same as :py:class:`InlinePySparkWordCount` above but uses
an external python driver file specified in :py:meth:`app`
It runs a :py:class:`luigi.contrib.spark.SparkSubmitTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.SparkSubmitTask.run`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
deploy-mode: client
"""
driver_memory = '2g'
executor_memory = '3g'
total_executor_cores = luigi.IntParameter(default=100)
name = "PySpark Word Count"
app = 'wordcount.py'
def app_options(self):
# These are passed to the Spark main args in the defined order.
return [self.input().path, self.output().path]
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
'''
// Corresponding example Spark Job, running Word count with Spark's Python API
// This file would have to be saved into wordcount.py
import sys
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext()
sc.textFile(sys.argv[1]) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(sys.argv[2])
'''
| apache-2.0 |
sonnyhu/scikit-learn | sklearn/svm/tests/test_svm.py | 9 | 35008 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import six
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deterministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(svr.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
def test_linearsvr_fit_sampleweight():
# check correct result when sample_weight is 1
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
n_samples = len(diabetes.target)
unit_weight = np.ones(n_samples)
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=unit_weight)
score1 = lsvr.score(diabetes.data, diabetes.target)
lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=random_weight)
score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
sample_weight=random_weight)
X_flat = np.repeat(diabetes.data, random_weight, axis=0)
y_flat = np.repeat(diabetes.target, random_weight, axis=0)
lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat)
score4 = lsvr_flat.score(X_flat, y_flat)
assert_almost_equal(score3, score4, 2)
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf = svm.SVC(kernel='linear', C=0.1).fit(X_train, y_train)
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_predict():
# Test SVR's decision_function
# Sanity check, test that predict implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='macro')
<= metrics.f1_score(y, y_pred_balanced,
average='macro'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_unicode_kernel():
# Test that a unicode kernel name does not cause a TypeError on clf.fit
if six.PY2:
# Test unicode (same as str on python3)
clf = svm.SVC(kernel=unicode('linear'))
clf.fit(X, Y)
# Test ascii bytes (str is bytes in python2)
clf = svm.SVC(kernel=str('linear'))
clf.fit(X, Y)
else:
# Test unicode (str is unicode in python3)
clf = svm.SVC(kernel=str('linear'))
clf.fit(X, Y)
# Test ascii bytes (same as str on python2)
clf = svm.SVC(kernel=bytes('linear', 'ascii'))
clf.fit(X, Y)
# Test default behavior on both versions
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_hinge", "loss='l2'", "1.0"),
svm.LinearSVC(loss="l2").fit, X, y)
# LinearSVR
# loss l1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l1", "epsilon_insensitive", "loss='l1'",
"1.0"),
svm.LinearSVR(loss="l1").fit, X, y)
# loss l2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
def test_linear_svx_uppercase_loss_penality_raises_error():
# Check if Upper case notation raises error at _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported",
svm.LinearSVC(loss="SQuared_hinge").fit, X, y)
assert_raise_message(ValueError, ("The combination of penalty='L2'"
" and loss='squared_hinge' is not supported"),
svm.LinearSVC(penalty="L2").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_linearsvc_fit_sampleweight():
# check correct result when sample_weight is 1
n_samples = len(X)
unit_weight = np.ones(n_samples)
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf_unitweight = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=unit_weight)
# check if same as sample_weight=None
assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvc_unflat = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=random_weight)
pred1 = lsvc_unflat.predict(T)
X_flat = np.repeat(X, random_weight, axis=0)
y_flat = np.repeat(Y, random_weight, axis=0)
lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat)
pred2 = lsvc_flat.predict(T)
assert_array_equal(pred1, pred2)
assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
def test_decision_function_shape_two_class():
for n_classes in [2, 3]:
X, y = make_blobs(centers=n_classes, random_state=0)
for estimator in [svm.SVC, svm.NuSVC]:
clf = OneVsRestClassifier(estimator(
decision_function_shape="ovr")).fit(X, y)
assert_equal(len(clf.predict(X)), len(y))
| bsd-3-clause |
tjduigna/exatomic | exatomic/core/molecule.py | 3 | 6451 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Molecule Table
###################
"""
import numpy as np
import pandas as pd
import networkx as nx
import warnings
from networkx.algorithms.components import connected_components
from exa import DataFrame
from exatomic.base import sym2mass
from exatomic.formula import string_to_dict, dict_to_string
class Molecule(DataFrame):
"""
Description of molecules in the atomic universe.
"""
_index = 'molecule'
_categories = {'frame': np.int64, 'formula': str, 'classification': object}
#@property
#def _constructor(self):
# return Molecule
def classify(self, *classifiers):
"""
Classify molecules into arbitrary categories.
.. code-block:: Python
u.molecule.classify(('solute', 'Na'), ('solvent', 'H(2)O(1)'))
Args:
classifiers: Any number of tuples of the form ('label', 'identifier', exact) (see below)
Note:
A classifier has 3 parts, "label", e.g. "solvent", "identifier", e.g.
"H(2)O(1)", and exact (true or false). If exact is false (default),
classification is greedy and (in this example) molecules with formulas
"H(1)O(1)", "H(3)O(1)", etc. would get classified as "solvent". If,
instead, exact were set to true, those molecules would remain
unclassified.
Warning:
Classifiers are applied in the order passed; where identifiers overlap,
the latter classification is used.
See Also:
:func:`~exatomic.algorithms.nearest.compute_nearest_molecules`
"""
for c in classifiers:
n = len(c)
if n != 3 and n != 2:
raise ClassificationError()
self['classification'] = None
for classifier in classifiers:
identifier = string_to_dict(classifier[0])
classification = classifier[1]
exact = classifier[2] if len(classifier) == 3 else False
this = self
for symbol, count in identifier.items():
this = this[this[symbol] == count] if exact else this[this[symbol] >= 1]
if len(this) > 0:
self.ix[self.index.isin(this.index), 'classification'] = classification
else:
raise KeyError('No records found for {}, with identifier {}.'.format(classification, identifier))
self['classification'] = self['classification'].astype('category')
if len(self[self['classification'].isnull()]) > 0:
warnings.warn("Unclassified molecules remaining...")
def get_atom_count(self):
"""
Compute the number of atoms per molecule.
"""
symbols = self._get_symbols()
return self[symbols].sum(axis=1)
def get_formula(self, as_map=False):
"""
Compute the string representation of the molecule.
"""
symbols = self._get_symbols()
mcules = self[symbols].to_dict(orient='index')
ret = map(dict_to_string, mcules.values())
if as_map:
return ret
return list(ret)
def _get_symbols(self):
"""
Helper method to get atom symbols.
"""
return [col for col in self if len(col) < 3 and col[0].istitle()]
def compute_molecule(universe):
"""
Cluster atoms into molecules and create the :class:`~exatomic.molecule.Molecule`
table.
Args:
universe: Atomic universe
Returns:
molecule: Molecule table
Warning:
This function modifies the universe's atom (:class:`~exatomic.atom.Atom`)
table in place!
"""
nodes = universe.atom.index.values
bonded = universe.atom_two.ix[universe.atom_two['bond'] == True, ['atom0', 'atom1']]
edges = zip(bonded['atom0'].astype(np.int64), bonded['atom1'].astype(np.int64))
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
# generate molecule indices for the atom table
mapper = {}
i = 0
for k, v in g.degree(): # First handle single atom "molecules"
if v == 0:
mapper[k] = i
i += 1
for seht in connected_components(g): # Second handle multi atom molecules
for adx in seht:
mapper[adx] = i
i += 1
universe.atom['molecule'] = universe.atom.index.map(lambda x: mapper[x])
universe.atom['mass'] = universe.atom['symbol'].map(sym2mass)
grps = universe.atom.groupby('molecule')
molecule = grps['symbol'].value_counts().unstack().fillna(0).astype(np.int64)
molecule.columns.name = None
molecule['mass'] = grps['mass'].sum()
universe.atom['molecule'] = universe.atom['molecule'].astype('category')
del universe.atom['mass']
return molecule
def compute_molecule_count(universe):
"""
"""
if 'molecule' not in universe.atom.columns:
universe.compute_molecule()
universe.atom._revert_categories()
mapper = universe.atom.drop_duplicates('molecule').set_index('molecule')['frame']
universe.atom._set_categories()
universe.molecule['frame'] = universe.molecule.index.map(lambda x: mapper[x])
molecule_count = universe.molecule.groupby('frame').size()
del universe.molecule['frame']
return molecule_count
def compute_molecule_com(universe):
"""
Compute molecules' centers of mass.
"""
if 'molecule' not in universe.atom.columns:
universe.compute_molecule()
mass = universe.atom.get_element_masses()
if universe.frame.is_periodic():
xyz = universe.atom[['x', 'y', 'z']].copy()
xyz.update(universe.visual_atom)
else:
xyz = universe.atom[['x', 'y', 'z']]
xm = xyz['x'].mul(mass)
ym = xyz['y'].mul(mass)
zm = xyz['z'].mul(mass)
#rm = xm.add(ym).add(zm)
df = pd.DataFrame.from_dict({'xm': xm, 'ym': ym, 'zm': zm, 'mass': mass,
'molecule': universe.atom['molecule']})
groups = df.groupby('molecule')
sums = groups.sum()
cx = sums['xm'].div(sums['mass'])
cy = sums['ym'].div(sums['mass'])
cz = sums['zm'].div(sums['mass'])
return cx, cy, cz
| apache-2.0 |
compops/newton-sysid2015 | samplingApproximation/para/ml_helpers.py | 1 | 2733 | ##############################################################################
##############################################################################
# Example code for Newton type maximum likelihood parameter inference
# in ninlinear state space models using particle methods.
#
# Please cite:
#
# M. Kok, J. Dahlin, T. B. Sch\"{o}n and A. Wills,
# "Newton-based maximum likelihood estimation in nonlinear state space models.
# Proceedings of the 17th IFAC Symposium on System Identification,
# Beijing, China, October 2015.
#
# (c) 2015 Johan Dahlin
# johan.dahlin (at) liu.se
#
# Distributed under the MIT license.
#
##############################################################################
##############################################################################
import numpy as np
import pandas
import os
##########################################################################
# Helper: compile the results and write to file
##########################################################################
def writeToFile_helper(ml,sm=None,fileOutName=None,noLLests=False):
# Construct the columns labels
if ( noLLests ):
columnlabels = [None]*(ml.nPars+1);
else:
columnlabels = [None]*(ml.nPars+3);
for ii in range(0,ml.nPars):
columnlabels[ii] = "th" + str(ii);
columnlabels[ml.nPars] = "step";
if ( noLLests == False ):
columnlabels[ml.nPars+1] = "diffLogLikelihood";
columnlabels[ml.nPars+2] = "logLikelihood";
# Compile the results for output
if ( noLLests ):
out = np.hstack((ml.th,ml.step));
else:
out = np.hstack((ml.th,ml.step,ml.llDiff,ml.ll));
# Write out the results to file
fileOut = pandas.DataFrame(out,columns=columnlabels);
fileOutName = 'results/' + str(ml.filePrefix) + '/' + str(ml.optMethod) + '_' + sm.filterType + '_' + sm.smootherType + '_N' + str(sm.nPart) + '/' + str(ml.dataset) + '.csv';
ensure_dir(fileOutName);
fileOut.to_csv(fileOutName);
print("writeToFile_helper: wrote results to file: " + fileOutName)
##############################################################################
# Check if dirs for outputs exists, otherwise create them
##############################################################################
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
##############################################################################
##############################################################################
# End of file
##############################################################################
############################################################################## | gpl-3.0 |
cgre-aachen/gempy | gempy/plot/visualization_2d.py | 1 | 35966 | """
This file is part of gempy.
gempy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
gempy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with gempy. If not, see <http://www.gnu.org/licenses/>.
Module with classes and methods to visualized structural geology data and potential fields of the regional modelling based on
the potential field method.
Created on 23/09/2019
@author: Miguel de la Varga, Elisa Heim
"""
import warnings
import os
import copy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.ticker import FixedFormatter, FixedLocator
import matplotlib.gridspec as gridspect
import matplotlib as mpl
import scipy.spatial.distance as dd
import seaborn as sns
sns.set_context('talk')
plt.style.use(['seaborn-white', 'seaborn-talk'])
warnings.filterwarnings("ignore", message="No contour levels were found")
class Plot2D:
"""
Class with functionality to plot 2D gempy sections
Args:
model: gempy.Model object
cmap: Color map to pass to matplotlib
"""
def __init__(self, model, cmap=None, norm=None, **kwargs):
self.model = model
self._color_lot = dict(zip(self.model._surfaces.df['surface'],
self.model._surfaces.df['color']))
self.axes = list()
if cmap is None:
self.cmap = mcolors.ListedColormap(list(self.model._surfaces.df['color']))
self._custom_colormap = False
else:
self.cmap = cmap
self._custom_colormap = True
if norm is None:
self.norm = mcolors.Normalize(vmin=0.5, vmax=len(self.cmap.colors) + 0.5)
else:
self.norm = norm
def update_colot_lot(self, color_dir=None):
if color_dir is None:
color_dir = dict(zip(self.model._surfaces.df['surface'], self.model._surfaces.df['color']))
self._color_lot = color_dir
if self._custom_colormap is False:
self.cmap = mcolors.ListedColormap(list(self.model._surfaces.df['color']))
self.norm = mcolors.Normalize(vmin=0.5, vmax=len(self.cmap.colors) + 0.5)
@staticmethod
def remove(ax):
while len(ax.collections) != 0:
list(map(lambda x: x.remove(), ax.collections))
def _make_section_xylabels(self, section_name, n=5):
"""
@elisa heim
Setting the axis labels to any combination of vertical crossections
Args:
section_name: name of a defined gempy crossection. See gempy.Model().grid.section
n:
Returns:
"""
if n > 5:
n = 3 # todo I don't know why but sometimes it wants to make a lot of xticks
elif n < 0:
n = 3
j = np.where(self.model._grid.sections.names == section_name)[0][0]
startend = list(self.model._grid.sections.section_dict.values())[j]
p1, p2 = startend[0], startend[1]
xy = self.model._grid.sections.calculate_line_coordinates_2points(p1, p2, n)
if len(np.unique(xy[:, 0])) == 1:
labels = xy[:, 1].astype(int)
axname = 'Y'
elif len(np.unique(xy[:, 1])) == 1:
labels = xy[:, 0].astype(int)
axname = 'X'
else:
labels = [str(xy[:, 0].astype(int)[i]) + ',\n' + str(xy[:, 1].astype(int)[i]) for i in
range(xy[:, 0].shape[0])]
axname = 'X,Y'
return labels, axname
def _slice(self, direction, cell_number=25):
"""
Slice the 3D array (blocks or scalar field) in the specific direction selected in the plot functions
"""
_a, _b, _c = (slice(0, self.model._grid.regular_grid.resolution[0]),
slice(0, self.model._grid.regular_grid.resolution[1]),
slice(0, self.model._grid.regular_grid.resolution[2]))
if direction == "x":
cell_number = int(self.model._grid.regular_grid.resolution[0] / 2) if cell_number == 'mid' else cell_number
_a, x, y, Gx, Gy = cell_number, "Y", "Z", "G_y", "G_z"
extent_val = self.model._grid.regular_grid.extent[[2, 3, 4, 5]]
elif direction == "y":
cell_number = int(self.model._grid.regular_grid.resolution[1] / 2) if cell_number == 'mid' else cell_number
_b, x, y, Gx, Gy = cell_number, "X", "Z", "G_x", "G_z"
extent_val = self.model._grid.regular_grid.extent[[0, 1, 4, 5]]
elif direction == "z":
cell_number = int(self.model._grid.regular_grid.resolution[2] / 2) if cell_number == 'mid' else cell_number
_c, x, y, Gx, Gy = cell_number, "X", "Y", "G_x", "G_y"
extent_val = self.model._grid.regular_grid.extent[[0, 1, 2, 3]]
else:
raise AttributeError(str(direction) + "must be a cartesian direction, i.e. xyz")
return _a, _b, _c, extent_val, x, y, Gx, Gy
def create_figure(self, figsize=None, textsize=None, **kwargs):
"""
Create the figure.
Args:
figsize:
textsize:
Returns:
figure, list axes, subgrid values
"""
cols = kwargs.get('cols', 1)
rows = kwargs.get('rows', 1)
figsize, self.ax_labelsize, _, self.xt_labelsize, self.linewidth, _ = _scale_fig_size(
figsize, textsize, rows, cols)
self.fig = plt.figure( figsize=figsize, constrained_layout=False)
self.fig.is_legend = False
# TODO make grid variable
# self.gs_0 = gridspect.GridSpec(2, 2, figure=self.fig, hspace=.9)
return self.fig, self.axes # , self.gs_0
def add_section(self, section_name=None, cell_number=None, direction='y', ax=None, ax_pos=111,
ve=1., **kwargs):
extent_val = kwargs.get('extent', None)
self.update_colot_lot()
if ax is None:
ax = self.fig.add_subplot(ax_pos)
if section_name is not None:
if section_name == 'topography':
ax.set_title('Geological map')
ax.set_xlabel('X')
ax.set_ylabel('Y')
extent_val = self.model._grid.topography.extent
else:
dist = self.model._grid.sections.df.loc[section_name, 'dist']
extent_val = [0, dist,
self.model._grid.regular_grid.extent[4], self.model._grid.regular_grid.extent[5]]
labels, axname = self._make_section_xylabels(section_name, len(ax.get_xticklabels()) - 2)
pos_list = np.linspace(0, dist, len(labels))
ax.xaxis.set_major_locator(FixedLocator(nbins=len(labels), locs=pos_list))
ax.xaxis.set_major_formatter(FixedFormatter((labels)))
ax.set(title=section_name, xlabel=axname, ylabel='Z')
elif cell_number is not None:
_a, _b, _c, extent_val, x, y = self._slice(direction, cell_number)[:-2]
ax.set_xlabel(x)
ax.set_ylabel(y)
ax.set(title='Cell Number: ' + str(cell_number) + ' Direction: ' + str(direction))
if extent_val is not None:
if extent_val[3] < extent_val[2]: # correct vertical orientation of plot
ax.invert_yaxis()
self._aspect = (extent_val[3] - extent_val[2]) / (extent_val[1] - extent_val[0]) / ve
ax.set_xlim(extent_val[0], extent_val[1])
ax.set_ylim(extent_val[2], extent_val[3])
ax.set_aspect('equal')
# Adding some properties to the axes to make easier to plot
ax.section_name = section_name
ax.cell_number = cell_number
ax.direction = direction
ax.tick_params(axis='x', labelrotation=30)
self.axes = np.append(self.axes, ax)
self.fig.tight_layout()
return ax
@staticmethod
def _check_default_section(ax, section_name, cell_number, direction):
if section_name is None:
try:
section_name = ax.section_name
except AttributeError:
pass
if cell_number is None:
try:
cell_number = ax.cell_number
direction = ax.direction
except AttributeError:
pass
return section_name, cell_number, direction
def plot_regular_grid(self, ax, section_name=None, cell_number=None, direction='y',
block: np.ndarray = None, resolution=None, **kwargs):
"""Generic function to plot all regular data
Args:
block:
section_name:
cell_number:
direction:
ax:
**kwargs: imshow kwargs
Returns:
"""
self.update_colot_lot()
extent_val = [*ax.get_xlim(), *ax.get_ylim()]
if 'cmap' in kwargs:
cmap = kwargs['cmap']
else:
cmap = self.cmap
if 'norm' in kwargs:
norm = kwargs['norm']
else:
norm = self.norm
section_name, cell_number, direction = self._check_default_section(ax, section_name, cell_number, direction)
if section_name is not None:
if section_name == 'topography':
try:
image = self.model.solutions.geological_map[0].reshape(
self.model._grid.topography.values_2d[:, :, 2].shape)
except AttributeError:
raise AttributeError('Geological map not computed. Activate the topography grid.')
else:
assert type(section_name) == str or type(
section_name) == np.str_, 'section name must be a string of the name of the section'
assert self.model.solutions.sections is not None, 'no sections for plotting defined'
l0, l1 = self.model._grid.sections.get_section_args(section_name)
shape = self.model._grid.sections.df.loc[section_name, 'resolution']
image = self.model.solutions.sections[0][0][l0:l1].reshape(shape[0], shape[1]).T
elif cell_number is not None or block is not None:
_a, _b, _c, _, x, y = self._slice(direction, cell_number)[:-2]
if resolution is None:
resolution = self.model._grid.regular_grid.resolution
plot_block = block.reshape(self.model._grid.regular_grid.resolution)
image = plot_block[_a, _b, _c].T
else:
raise AttributeError
ax.imshow(image, origin='lower', zorder=-100,
cmap=cmap, norm=norm, extent=extent_val)
return ax
def plot_lith(self, ax, section_name=None, cell_number=None, direction='y', **kwargs):
block = self.model.solutions.lith_block
self.plot_regular_grid(ax, section_name, cell_number, direction, block=block)
def plot_values(self, ax, series_n=0, section_name=None, cell_number=None,
direction='y', **kwargs):
block = self.model.solutions.values_matrix[series_n]
self.plot_regular_grid(ax, section_name, cell_number, direction, block=block,
**kwargs)
def plot_block(self, ax, series_n=0, section_name=None, cell_number=None, direction='y',
**kwargs):
block = self.model.solutions.block_matrix[series_n]
self.plot_regular_grid(ax, section_name, cell_number, direction, block=block)
def plot_scalar_field(self, ax, section_name=None, cell_number=None, series_n=0, direction='y',
block=None, **kwargs):
"""
Plot the scalar field of a section.
Args:
ax:
section_name:
cell_number:
series_n:
direction:
block:
**kwargs:
Returns:
"""
extent_val = [*ax.get_xlim(), *ax.get_ylim()]
section_name, cell_number, direction = self._check_default_section(ax, section_name, cell_number, direction)
if section_name is not None:
if section_name == 'topography':
try:
image = self.model.solutions.geological_map[1][series_n].reshape(
self.model._grid.topography.values_3D[:, :, 2].shape)
except AttributeError:
raise AttributeError('Geological map not computed. Activate the topography grid.')
else:
l0, l1 = self.model._grid.sections.get_section_args(section_name)
shape = self.model._grid.sections.df.loc[section_name, 'resolution']
image = self.model.solutions.sections[1][series_n][l0:l1].reshape(shape).T
elif cell_number is not None or block is not None:
_a, _b, _c, _, x, y = self._slice(direction, cell_number)[:-2]
if block is None:
_block = self.model.solutions.scalar_field_matrix[series_n]
else:
_block = block
plot_block = _block.reshape(self.model._grid.regular_grid.resolution)
image = plot_block[_a, _b, _c].T
else:
raise AttributeError
ax.contour(image, cmap='autumn', extent=extent_val, zorder=8, **kwargs)
if 'N' in kwargs:
kwargs.pop('N')
ax.contourf(image, cmap='autumn', extent=extent_val, zorder=7, alpha=.8,
**kwargs)
def plot_data(self, ax, section_name=None, cell_number=None, direction='y',
legend=True,
projection_distance=None, **kwargs):
"""
Plot data--i.e. surface_points and orientations--of a section.
Args:
ax:
section_name:
cell_number:
direction:
legend: bool or 'force'
projection_distance:
**kwargs:
Returns:
"""
if projection_distance is None:
projection_distance = 0.2 * self.model._rescaling.df['rescaling factor'].values[0]
self.update_colot_lot()
points = self.model._surface_points.df.copy()
orientations = self.model._orientations.df.copy()
section_name, cell_number, direction = self._check_default_section(ax, section_name, cell_number, direction)
if section_name is not None:
if section_name == 'topography':
topo_comp = kwargs.get('topo_comp', 5000)
decimation_aux = int(self.model._grid.topography.values.shape[0] / topo_comp)
tpp = self.model._grid.topography.values[::decimation_aux + 1, :]
cartesian_point_dist = (dd.cdist(tpp, self.model._surface_points.df[['X', 'Y', 'Z']])
< projection_distance).sum(axis=0).astype(bool)
cartesian_ori_dist = (dd.cdist(tpp, self.model._orientations.df[['X', 'Y', 'Z']])
< projection_distance).sum(axis=0).astype(bool)
x, y, Gx, Gy = 'X', 'Y', 'G_x', 'G_y'
else:
# Project points:
shift = np.asarray(self.model._grid.sections.df.loc[section_name, 'start'])
end_point = np.atleast_2d(np.asarray(self.model._grid.sections.df.loc[section_name, 'stop']) - shift)
A_rotate = np.dot(end_point.T, end_point) / self.model._grid.sections.df.loc[section_name, 'dist'] ** 2
cartesian_point_dist = np.sqrt(((np.dot(
A_rotate, (points[['X', 'Y']]).T).T - points[['X', 'Y']]) ** 2).sum(axis=1))
cartesian_ori_dist = np.sqrt(((np.dot(
A_rotate, (orientations[['X', 'Y']]).T).T - orientations[['X', 'Y']]) ** 2).sum(axis=1))
# This are the coordinates of the data projected on the section
cartesian_point = np.dot(A_rotate, (points[['X', 'Y']] - shift).T).T
cartesian_ori = np.dot(A_rotate, (orientations[['X', 'Y']] - shift).T).T
# Since we plot only the section we want the norm of those coordinates
points[['X']] = np.linalg.norm(cartesian_point, axis=1)
orientations[['X']] = np.linalg.norm(cartesian_ori, axis=1)
x, y, Gx, Gy = 'X', 'Z', 'G_x', 'G_z'
else:
if cell_number is None:
cell_number = int(self.model._grid.regular_grid.resolution[0] / 2)
elif cell_number == 'mid':
cell_number = int(self.model._grid.regular_grid.resolution[0] / 2)
if direction == 'x' or direction == 'X':
arg_ = 0
dx = self.model._grid.regular_grid.dx
dir = 'X'
elif direction == 'y' or direction == 'Y':
arg_ = 2
dx = self.model._grid.regular_grid.dy
dir = 'Y'
elif direction == 'z' or direction == 'Z':
arg_ = 4
dx = self.model._grid.regular_grid.dz
dir = 'Z'
else:
raise AttributeError('Direction must be x, y, z')
_loc = self.model._grid.regular_grid.extent[arg_] + dx * cell_number
cartesian_point_dist = points[dir] - _loc
cartesian_ori_dist = orientations[dir] - _loc
x, y, Gx, Gy = self._slice(direction)[4:]
select_projected_p = cartesian_point_dist < projection_distance
select_projected_o = cartesian_ori_dist < projection_distance
# Hack to keep the right X label:
temp_label = copy.copy(ax.xaxis.label)
points_df = points[select_projected_p]
points_df['colors'] = points_df['surface'].map(self._color_lot)
points_df.plot.scatter(x=x, y=y, ax=ax, c='colors', s=70, zorder=102,
edgecolors='white',
colorbar=False)
# points_df.plot.scatter(x=x, y=y, ax=ax, c='white', s=80, zorder=101,
# colorbar=False)
if self.fig.is_legend is False and legend is True or legend == 'force':
markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o',
linestyle='') for color in
self._color_lot.values()]
ax.legend(markers, self._color_lot.keys(), numpoints=1)
self.fig.is_legend = True
ax.xaxis.label = temp_label
sel_ori = orientations[select_projected_o]
aspect = np.subtract(*ax.get_ylim()) / np.subtract(*ax.get_xlim())
min_axis = 'width' if aspect < 1 else 'height'
# Eli options
ax.quiver(sel_ori[x], sel_ori[y], sel_ori[Gx], sel_ori[Gy],
pivot="tail", scale_units=min_axis, scale=30, color=sel_ori['surface'].map(self._color_lot),
edgecolor='k', headwidth=8, linewidths=1, zorder=102)
try:
ax.legend_.set_frame_on(True)
ax.legend_.set_zorder(10000)
except AttributeError:
pass
def calculate_p1p2(self, direction, cell_number):
if direction == 'y':
cell_number = int(self.model._grid.regular_grid.resolution[1] / 2) if cell_number == 'mid' else cell_number
y = self.model._grid.regular_grid.extent[2] + self.model._grid.regular_grid.dy * cell_number
p1 = [self.model._grid.regular_grid.extent[0], y]
p2 = [self.model._grid.regular_grid.extent[1], y]
elif direction == 'x':
cell_number = int(self.model._grid.regular_grid.resolution[0] / 2) if cell_number == 'mid' else cell_number
x = self.model._grid.regular_grid.extent[0] + self.model._grid.regular_grid.dx * cell_number
p1 = [x, self.model._grid.regular_grid.extent[2]]
p2 = [x, self.model._grid.regular_grid.extent[3]]
else:
raise NotImplementedError
return p1, p2
def _slice_topo_4_sections(self, p1, p2, resx, method='interp2d'):
"""
Slices topography along a set linear section
Args:
:param p1: starting point (x,y) of the section
:param p2: end point (x,y) of the section
:param resx: resolution of the defined section
:param method: interpolation method, 'interp2d' for cubic scipy.interpolate.interp2d
'spline' for scipy.interpolate.RectBivariateSpline
Returns:
:return: returns x,y,z values of the topography along the section
"""
xy = self.model._grid.sections.calculate_line_coordinates_2points(p1, p2, resx)
z = self.model._grid.sections.interpolate_zvals_at_xy(xy, self.model._grid.topography, method)
return xy[:, 0], xy[:, 1], z
def plot_topography(self, ax, fill_contour=False,
contour=True,
section_name=None,
cell_number=None, direction='y', block=None, **kwargs):
hillshade = kwargs.get('hillshade', True)
azdeg = kwargs.get('azdeg', 0)
altdeg = kwargs.get('altdeg', 0)
cmap = kwargs.get('cmap', 'terrain')
self.update_colot_lot()
section_name, cell_number, direction = self._check_default_section(ax, section_name, cell_number, direction)
if section_name is not None and section_name != 'topography':
p1 = self.model._grid.sections.df.loc[section_name, 'start']
p2 = self.model._grid.sections.df.loc[section_name, 'stop']
x, y, z = self._slice_topo_4_sections(p1, p2, self.model._grid.topography.resolution[0])
pseudo_x = np.linspace(0, self.model._grid.sections.df.loc[section_name, 'dist'], z.shape[0])
a = np.vstack((pseudo_x, z)).T
xy = np.append(a,
([self.model._grid.sections.df.loc[section_name, 'dist'], a[:, 1][-1]],
[self.model._grid.sections.df.loc[section_name, 'dist'],
self.model._grid.regular_grid.extent[5]],
[0, self.model._grid.regular_grid.extent[5]],
[0, a[:, 1][0]])).reshape(-1, 2)
ax.fill(xy[:, 0], xy[:, 1], 'k', zorder=10)
elif section_name == 'topography':
import skimage
from gempy.plot.helpers import add_colorbar
topo = self.model._grid.topography
topo_super_res = skimage.transform.resize(
topo.values_2d,
(1600, 1600),
order=3,
mode='edge',
anti_aliasing=True, preserve_range=False)
values = topo_super_res[:, :, 2].T
if contour is True:
CS = ax.contour(values, extent=(topo.extent[:4]),
colors='k', linestyles='solid', origin='lower')
ax.clabel(CS, inline=1, fontsize=10, fmt='%d')
if fill_contour is True:
CS2 = ax.contourf(values, extent=(topo.extent[:4]), cmap=cmap)
add_colorbar(axes=ax, label='elevation [m]', cs=CS2)
if hillshade is True:
from matplotlib.colors import LightSource
ls = LightSource(azdeg=azdeg, altdeg=altdeg)
hillshade_topography = ls.hillshade(values)
ax.imshow(hillshade_topography, origin='lower', extent=topo.extent[:4], alpha=0.5, zorder=11,
cmap='gray')
elif cell_number is not None or block is not None:
p1, p2 = self.calculate_p1p2(direction, cell_number)
resx = self.model._grid.regular_grid.resolution[0]
resy = self.model._grid.regular_grid.resolution[1]
try:
x, y, z = self._slice_topo_4_sections(p1, p2, resx)
if direction == 'x':
a = np.vstack((y, z)).T
ext = self.model._grid.regular_grid.extent[[2, 3]]
elif direction == 'y':
a = np.vstack((x, z)).T
ext = self.model._grid.regular_grid.extent[[0, 1]]
else:
raise NotImplementedError
a = np.append(a,
([ext[1], a[:, 1][-1]],
[ext[1], self.model._grid.regular_grid.extent[5]],
[ext[0], self.model._grid.regular_grid.extent[5]],
[ext[0], a[:, 1][0]]))
line = a.reshape(-1, 2)
ax.fill(line[:, 0], line[:, 1], color='k')
except IndexError:
warnings.warn('Topography needs to be a raster to be able to plot it'
'in 2D sections')
return ax
def plot_contacts(self, ax, section_name=None, cell_number=None, direction='y', block=None,
only_faults=False, **kwargs):
self.update_colot_lot()
section_name, cell_number, direction = self._check_default_section(ax, section_name, cell_number, direction)
if only_faults:
contour_idx = list(self.model._faults.df[self.model._faults.df['isFault'] == True].index)
else:
contour_idx = list(self.model._surfaces.df.index)
extent_val = [*ax.get_xlim(), *ax.get_ylim()]
zorder = kwargs.get('zorder', 100)
if section_name is not None:
if section_name == 'topography':
shape = self.model._grid.topography.resolution
scalar_fields = self.model.solutions.geological_map[1]
c_id = 0 # color id startpoint
for e, block in enumerate(scalar_fields):
level = self.model.solutions.scalar_field_at_surface_points[e][np.where(
self.model.solutions.scalar_field_at_surface_points[e] != 0)]
c_id2 = c_id + len(level) # color id endpoint
ax.contour(block.reshape(shape), 0, levels=np.sort(level),
colors=self.cmap.colors[c_id:c_id2][::-1],
linestyles='solid', origin='lower',
extent=extent_val, zorder=zorder - (e + len(level))
)
c_id = c_id2
else:
l0, l1 = self.model._grid.sections.get_section_args(section_name)
shape = self.model._grid.sections.df.loc[section_name, 'resolution']
scalar_fields = self.model.solutions.sections[1][:, l0:l1]
c_id = 0 # color id startpoint
for e, block in enumerate(scalar_fields):
level = self.model.solutions.scalar_field_at_surface_points[e][np.where(
self.model.solutions.scalar_field_at_surface_points[e] != 0)]
# Ignore warning about some scalars not being on the plot since it is very common
# that an interface does not exit for a given section
c_id2 = c_id + len(level) # color id endpoint
color_list = self.model._surfaces.df.groupby('isActive').get_group(True)['color'][c_id:c_id2][::-1]
ax.contour(block.reshape(shape).T, 0, levels=np.sort(level),
# colors=self.cmap.colors[self.model.surfaces.df['isActive']][c_id:c_id2],
colors=color_list,
linestyles='solid', origin='lower',
extent=extent_val, zorder=zorder - (e + len(level))
)
c_id = c_id2
elif cell_number is not None or block is not None:
_slice = self._slice(direction, cell_number)[:3]
shape = self.model._grid.regular_grid.resolution
c_id = 0 # color id startpoint
for e, block in enumerate(self.model.solutions.scalar_field_matrix):
level = self.model.solutions.scalar_field_at_surface_points[e][np.where(
self.model.solutions.scalar_field_at_surface_points[e] != 0)]
# c_id = e
c_id2 = c_id + len(level)
# print(c_id, c_id2)
color_list = self.model._surfaces.df.groupby('isActive').get_group(True)['color'][c_id:c_id2][::-1]
# print(color_list)
ax.contour(block.reshape(shape)[_slice].T, 0, levels=np.sort(level),
colors=color_list,
linestyles='solid', origin='lower',
extent=extent_val, zorder=zorder - (e + len(level))
)
c_id = c_id2
def plot_section_traces(self, ax, section_names=None, show_data=True, **kwargs):
if section_names is None:
section_names = list(self.model._grid.sections.names)
if show_data:
self.plot_data(ax, section_name='topography', **kwargs)
for section in section_names:
j = np.where(self.model._grid.sections.names == section)[0][0]
x1, y1 = np.asarray(self.model._grid.sections.df.loc[section, 'start'])
x2, y2 = np.asarray(self.model._grid.sections.df.loc[section, 'stop'])
ax.plot([x1, x2], [y1, y2], label=section, linestyle='--')
ax.legend(frameon=True)
def plot_topo_g(self, ax, G, centroids, direction="y",
label_kwargs=None, node_kwargs=None, edge_kwargs=None):
res = self.model._grid.regular_grid.resolution
if direction == "y":
c1, c2 = (0, 2)
e1 = self.model._grid.regular_grid.extent[1] - self.model._grid.regular_grid.extent[0]
e2 = self.model._grid.regular_grid.extent[5] - self.model._grid.regular_grid.extent[4]
d1 = self.model._grid.regular_grid.extent[0]
d2 = self.model._grid.regular_grid.extent[4]
if len(list(centroids.items())[0][1]) == 2:
c1, c2 = (0, 1)
r1 = res[0]
r2 = res[2]
elif direction == "x":
c1, c2 = (1, 2)
e1 = self.model._grid.regular_grid.extent[3] - self.model._grid.regular_grid.extent[2]
e2 = self.model._grid.regular_grid.extent[5] - self.model._grid.regular_grid.extent[4]
d1 = self.model._grid.regular_grid.extent[2]
d2 = self.model._grid.regular_grid.extent[4]
if len(list(centroids.items())[0][1]) == 2:
c1, c2 = (0, 1)
r1 = res[1]
r2 = res[2]
elif direction == "z":
c1, c2 = (0, 1)
e1 = self.model._grid.regular_grid.extent[1] - self.model._grid.regular_grid.extent[0]
e2 = self.model._grid.regular_grid.extent[3] - self.model._grid.regular_grid.extent[2]
d1 = self.model._grid.regular_grid.extent[0]
d2 = self.model._grid.regular_grid.extent[2]
if len(list(centroids.items())[0][1]) == 2:
c1, c2 = (0, 1)
r1 = res[0]
r2 = res[1]
nkw = {
"marker": "o",
"color": "black",
"markersize": 20,
"alpha": 0.75
}
if node_kwargs is not None:
nkw.update(node_kwargs)
tkw = {
"color": "white",
"size": 10,
"ha": "center",
"va": "center",
"weight": "ultralight",
"family": "monospace"
}
if label_kwargs is not None:
tkw.update(label_kwargs)
lkw = {
"linewidth": 0.75,
"color": "black"
}
if edge_kwargs is not None:
lkw.update(edge_kwargs)
for edge in G.edges():
a, b = edge
# plot edges
ax.plot(np.array([centroids[a][c1], centroids[b][c1]]) * e1 / r1 + d1,
np.array([centroids[a][c2], centroids[b][c2]]) * e2 / r2 + d2, **lkw)
for node in G.nodes():
ax.plot(centroids[node][c1] * e1 / r1 + d1, centroids[node][c2] * e2 / r2 + d2,
marker="o", color="black", markersize=10, alpha=0.75)
ax.text(centroids[node][c1] * e1 / r1 + d1,
centroids[node][c2] * e2 / r2 + d2, str(node), **tkw)
def plot_gradient(self, scalar_field, gx, gy, gz, cell_number, quiver_stepsize=5,
# maybe call r sth. like "stepsize"?
direction="y", plot_scalar=True, *args, **kwargs): # include plot data?
"""
Plot the gradient of the scalar field in a given direction.
Args:
geo_data (gempy.DataManagement.InputData): Input data of the model
scalar_field(numpy.array): scalar field to plot with the gradient
gx(numpy.array): gradient in x-direction
gy(numpy.array): gradient in y-direction
gz(numpy.array): gradient in z-direction
cell_number(int): position of the array to plot
quiver_stepsize(int): step size between arrows to indicate gradient
direction(str): xyz. Caartesian direction to be plotted
plot_scalar(bool): boolean to plot scalar field
**kwargs: plt.contour kwargs
Returns:
None
"""
raise NotImplementedError
def _scale_fig_size(figsize, textsize, rows=1, cols=1):
"""Scale figure properties according to rows and cols.
Parameters
----------
figsize : float or None
Size of figure in inches
textsize : float or None
fontsize
rows : int
Number of rows
cols : int
Number of columns
Returns
-------
figsize : float or None
Size of figure in inches
ax_labelsize : int
fontsize for axes label
titlesize : int
fontsize for title
xt_labelsize : int
fontsize for axes ticks
linewidth : int
linewidth
markersize : int
markersize
"""
params = mpl.rcParams
rc_width, rc_height = tuple(params["figure.figsize"])
rc_ax_labelsize = params["axes.labelsize"]
rc_titlesize = params["axes.titlesize"]
rc_xt_labelsize = params["xtick.labelsize"]
rc_linewidth = params["lines.linewidth"]
rc_markersize = params["lines.markersize"]
if isinstance(rc_ax_labelsize, str):
rc_ax_labelsize = 15
if isinstance(rc_titlesize, str):
rc_titlesize = 16
if isinstance(rc_xt_labelsize, str):
rc_xt_labelsize = 14
if figsize is None:
width, height = rc_width, rc_height
sff = 1 if (rows == cols == 1) else 1.2
width = width * cols * sff
height = height * rows * sff
else:
width, height = figsize
if textsize is not None:
scale_factor = textsize / rc_xt_labelsize
elif rows == cols == 1:
scale_factor = ((width * height) / (rc_width * rc_height)) ** 0.5
else:
scale_factor = 1
ax_labelsize = rc_ax_labelsize * scale_factor
titlesize = rc_titlesize * scale_factor
xt_labelsize = rc_xt_labelsize * scale_factor
linewidth = rc_linewidth * scale_factor
markersize = rc_markersize * scale_factor
return (width, height), ax_labelsize, titlesize, xt_labelsize, linewidth, markersize
| lgpl-3.0 |
vortex-ape/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 4 | 8739 | import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises, ignore_warnings)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_consistent_transform():
# X_fit_ needs to retain the old, unmodified copy of X
state = np.random.RandomState(0)
X = state.rand(10, 10)
kpca = KernelPCA(random_state=state).fit(X)
transformed1 = kpca.transform(X)
X_copy = X.copy()
X[:, 0] = 666
transformed2 = kpca.transform(X_copy)
assert_array_almost_equal(transformed1, transformed2)
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca),
("Perceptron", Perceptron(max_iter=5))])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca),
("Perceptron", Perceptron(max_iter=5))])
param_grid = dict(Perceptron__max_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron(max_iter=5).fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron(max_iter=5).fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
rishikksh20/scikit-learn | sklearn/model_selection/_split.py | 7 | 68700 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..utils.fixes import signature
from ..utils.random import choice
from ..base import _pprint
__all__ = ['BaseCrossValidator',
'KFold',
'GroupKFold',
'LeaveOneGroupOut',
'LeaveOneOut',
'LeavePGroupsOut',
'LeavePOut',
'RepeatedStratifiedKFold',
'RepeatedKFold',
'ShuffleSplit',
'GroupShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: {1}.").format(self.n_splits,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes group information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_splits, shuffle, random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> groups = np.array([0, 0, 2, 2])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for train_index, test_index in group_kfold.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=3):
super(GroupKFold, self).__init__(n_splits, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d."
% (self.n_splits, n_groups))
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_splits)``, the last one has
the complementary.
See also
--------
RepeatedStratifiedKFold: Repeats Stratified K-Fold n times.
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_splits, shuffle, random_state)
def _make_test_folds(self, X, y=None, groups=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("All the n_groups for individual classes"
" are less than n_splits=%d."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of groups for any class cannot"
" be less than n_splits=%d."
% (min_groups, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedKFold, self).split(X, y, groups)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)``,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=3):
super(TimeSeriesSplit, self).__init__(n_splits,
shuffle=False,
random_state=None)
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range(test_size + n_samples % n_folds,
n_samples, test_size)
for test_start in test_starts:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> logo = LeaveOneGroupOut()
>>> logo.get_n_splits(X, y, groups)
2
>>> print(logo)
LeaveOneGroupOut()
>>> for train_index, test_index in logo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
# We make a copy of groups to avoid side-effects during iteration
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if len(unique_groups) <= 1:
raise ValueError(
"The groups parameter contains fewer than 2 unique groups "
"(%s). LeaveOneGroupOut expects at least 2." % unique_groups)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
return len(np.unique(groups))
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(X, y, groups)
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for train_index, test_index in lpgo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1))
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
X, y, groups = indexable(X, y, groups)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
class _RepeatedSplits(with_metaclass(ABCMeta)):
"""Repeated splits for an arbitrary randomized CV splitter.
Repeats splits for cross-validators n times with different randomization
in each repetition.
Parameters
----------
cv : callable
Cross-validator class.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
**cvargs : additional params
Constructor parameters for cv. Must not contain random_state
and shuffle.
"""
def __init__(self, cv, n_repeats=10, random_state=None, **cvargs):
if not isinstance(n_repeats, (np.integer, numbers.Integral)):
raise ValueError("Number of repetitions must be of Integral type.")
if n_repeats <= 1:
raise ValueError("Number of repetitions must be greater than 1.")
if any(key in cvargs for key in ('random_state', 'shuffle')):
raise ValueError(
"cvargs must not contain random_state or shuffle.")
self.cv = cv
self.n_repeats = n_repeats
self.random_state = random_state
self.cvargs = cvargs
def split(self, X, y=None, groups=None):
"""Generates indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
n_repeats = self.n_repeats
rng = check_random_state(self.random_state)
for idx in range(n_repeats):
cv = self.cv(random_state=rng, shuffle=True,
**self.cvargs)
for train_index, test_index in cv.split(X, y, groups):
yield train_index, test_index
class RepeatedKFold(_RepeatedSplits):
"""Repeated K-Fold cross validator.
Repeats K-Fold n times with different randomization in each repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
Examples
--------
>>> from sklearn.model_selection import RepeatedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124)
>>> for train_index, test_index in rkf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [0 1] TEST: [2 3]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
See also
--------
RepeatedStratifiedKFold: Repeates Stratified K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super(RepeatedKFold, self).__init__(
KFold, n_repeats, random_state, n_splits=n_splits)
class RepeatedStratifiedKFold(_RepeatedSplits):
"""Repeated Stratified K-Fold cross validator.
Repeats Stratified K-Fold n times with different randomization in each
repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
Examples
--------
>>> from sklearn.model_selection import RepeatedStratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2,
... random_state=36851234)
>>> for train_index, test_index in rskf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
See also
--------
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super(RepeatedStratifiedKFold, self).__init__(
StratifiedKFold, n_repeats, random_state, n_splits=n_splits)
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_splits=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
'''Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the test split. If
int, represents the absolute number of test groups. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_splits=5, test_size=0.2, train_size=None,
random_state=None):
super(GroupShuffleSplit, self).__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super(
GroupShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
Examples
--------
>>> from sklearn.model_selection._split import _approximate_mode
>>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
array([2, 1])
>>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
array([3, 1])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=0)
array([0, 1, 1, 0])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=42)
array([1, 1, 0, 0])
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = choice(inds, size=add_now, replace=False, random_state=rng)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_splits=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_splits, test_size, train_size, random_state)
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
y = check_array(y, ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedShuffleSplit, self).split(X, y, groups)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i' and
test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i' and
train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = list(cv)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv)
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the class labels.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
paladin74/neural-network-animation | matplotlib/projections/polar.py | 11 | 27444 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
import warnings
import numpy as np
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
import matplotlib.axis as maxis
from matplotlib import cbook
from matplotlib import docstring
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, FormatStrFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper, \
ScaledTranslation, blended_transform_factory, BboxTransformToMaxOnly
import matplotlib.spines as mspines
class PolarTransform(Transform):
"""
The base polar transform. This handles projection *theta* and
*r* into Cartesian coordinate space *x* and *y*, but does not
perform the ultimate affine transformation into the correct
position.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform_non_affine(self, tr):
xy = np.empty(tr.shape, np.float_)
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
t *= theta_direction
t += theta_offset
r = r - rmin
mask = r < 0
x[:] = np.where(mask, np.nan, r * np.cos(t))
y[:] = np.where(mask, np.nan, r * np.sin(t))
return xy
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
return Path(self.transform(vertices), path.codes)
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return PolarAxes.InvertedPolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class PolarAffine(Affine2DBase):
"""
The affine part of the polar projection. Scales the output so
that maximum radius rests on the edge of the axes circle.
"""
def __init__(self, scale_transform, limits):
"""
*limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2pi).
"""
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
def get_matrix(self):
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
yscale = limits_scaled.ymax - limits_scaled.ymin
affine = Affine2D() \
.scale(0.5 / yscale) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def __getstate__(self):
return {}
class InvertedPolarTransform(Transform):
"""
The inverse of the polar transform, mapping Cartesian
coordinate space *x* and *y* back to *theta* and *r*.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform_non_affine(self, xy):
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
x = xy[:, 0:1]
y = xy[:, 1:]
r = np.sqrt(x*x + y*y)
theta = np.arccos(x / r)
theta = np.where(y < 0, 2 * np.pi - theta, theta)
theta -= theta_offset
theta *= theta_direction
theta %= 2 * np.pi
r += rmin
return np.concatenate((theta, r), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return PolarAxes.PolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class ThetaFormatter(Formatter):
"""
Used to format the *theta* tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % ((x / np.pi) * 180.0)
else:
# we use unicode, rather than mathtext with \circ, so
# that it will work correctly with any arbitrary font
# (assuming it has a degree sign), whereas $5\circ$
# will only work correctly with one of the supported
# math fonts (Computer Modern and STIX)
return "%0.0f\u00b0" % ((x / np.pi) * 180.0)
class RadialLocator(Locator):
"""
Used to locate radius ticks.
Ensures that all ticks are strictly positive. For all other
tasks, it delegates to the base
:class:`~matplotlib.ticker.Locator` (which may be different
depending on the scale of the *r*-axis.
"""
def __init__(self, base):
self.base = base
def __call__(self):
ticks = self.base()
return [x for x in ticks if x > 0]
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def zoom(self, direction):
return self.base.zoom(direction)
def refresh(self):
return self.base.refresh()
def view_limits(self, vmin, vmax):
vmin, vmax = self.base.view_limits(vmin, vmax)
return 0, vmax
class PolarAxes(Axes):
"""
A polar graph projection, where the input dimensions are *theta*, *r*.
Theta starts pointing east and goes anti-clockwise.
"""
name = 'polar'
def __init__(self, *args, **kwargs):
"""
Create a new Polar Axes for a polar plot.
The following optional kwargs are supported:
- *resolution*: The number of points of interpolation between
each pair of data points. Set to 1 to disable
interpolation.
"""
self.resolution = kwargs.pop('resolution', 1)
self._default_theta_offset = kwargs.pop('theta_offset', 0)
self._default_theta_direction = kwargs.pop('theta_direction', 1)
self._default_rlabel_position = kwargs.pop('rlabel_position', 22.5)
if self.resolution not in (None, 1):
warnings.warn(
"""The resolution kwarg to Polar plots is now ignored.
If you need to interpolate data points, consider running
cbook.simple_linear_interpolation on the data before passing to matplotlib.""")
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
__init__.__doc__ = Axes.__init__.__doc__
def cla(self):
Axes.cla(self)
self.title.set_y(1.05)
self.xaxis.set_major_formatter(self.ThetaFormatter())
self.xaxis.isDefault_majfmt = True
angles = np.arange(0.0, 360.0, 45.0)
self.set_thetagrids(angles)
self.yaxis.set_major_locator(self.RadialLocator(self.yaxis.get_major_locator()))
self.grid(rcParams['polaraxes.grid'])
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.set_theta_offset(self._default_theta_offset)
self.set_theta_direction(self._default_theta_direction)
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Calling polar_axes.xaxis.cla() or polar_axes.xaxis.cla()
# results in weird artifacts. Therefore we disable this for
# now.
# self.spines['polar'].register_axis(self.yaxis)
self._update_transScale()
def _set_lim_and_transforms(self):
self.transAxes = BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = TransformWrapper(IdentityTransform())
# A (possibly non-linear) projection on the (already scaled)
# data. This one is aware of rmin
self.transProjection = self.PolarTransform(self)
# This one is not aware of rmin
self.transPureProjection = self.PolarTransform(self, use_rmin=False)
# An affine transformation on the data, generally to limit the
# range of the axes
self.transProjectionAffine = self.PolarAffine(self.transScale, self.viewLim)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = self.transScale + self.transProjection + \
(self.transProjectionAffine + self.transAxes)
# This is the transform for theta-axis ticks. It is
# equivalent to transData, except it always puts r == 1.0 at
# the edge of the axis circle.
self._xaxis_transform = (
self.transPureProjection +
self.PolarAffine(IdentityTransform(), Bbox.unit()) +
self.transAxes)
# The theta labels are moved from radius == 0.0 to radius == 1.1
self._theta_label1_position = Affine2D().translate(0.0, 1.1)
self._xaxis_text1_transform = (
self._theta_label1_position +
self._xaxis_transform)
self._theta_label2_position = Affine2D().translate(0.0, 1.0 / 1.1)
self._xaxis_text2_transform = (
self._theta_label2_position +
self._xaxis_transform)
# This is the transform for r-axis ticks. It scales the theta
# axis so the gridlines from 0.0 to 1.0, now go from 0.0 to
# 2pi.
self._yaxis_transform = (
Affine2D().scale(np.pi * 2.0, 1.0) +
self.transData)
# The r-axis labels are put at an angle and padded in the r-direction
self._r_label_position = ScaledTranslation(
self._default_rlabel_position, 0.0, Affine2D())
self._yaxis_text_transform = (
self._r_label_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
def get_xaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'center', 'center'
def get_yaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
angle = self.get_rlabel_position()
if angle < 90.:
return self._yaxis_text_transform, 'bottom', 'left'
elif angle < 180.:
return self._yaxis_text_transform, 'bottom', 'right'
elif angle < 270.:
return self._yaxis_text_transform, 'top', 'right'
else:
return self._yaxis_text_transform, 'top', 'left'
def get_yaxis_text2_transform(self, pad):
angle = self.get_rlabel_position()
if angle < 90.:
return self._yaxis_text_transform, 'top', 'right'
elif angle < 180.:
return self._yaxis_text_transform, 'top', 'left'
elif angle < 270.:
return self._yaxis_text_transform, 'bottom', 'left'
else:
return self._yaxis_text_transform, 'bottom', 'right'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'polar':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_rmax(self, rmax):
self.viewLim.y1 = rmax
def get_rmax(self):
return self.viewLim.ymax
def set_rmin(self, rmin):
self.viewLim.y0 = rmin
def get_rmin(self):
return self.viewLim.ymin
def set_theta_offset(self, offset):
"""
Set the offset for the location of 0 in radians.
"""
self._theta_offset = offset
def get_theta_offset(self):
"""
Get the offset for the location of 0 in radians.
"""
return self._theta_offset
def set_theta_zero_location(self, loc):
"""
Sets the location of theta's zero. (Calls set_theta_offset
with the correct value in radians under the hood.)
May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".
"""
mapping = {
'N': np.pi * 0.5,
'NW': np.pi * 0.75,
'W': np.pi,
'SW': np.pi * 1.25,
'S': np.pi * 1.5,
'SE': np.pi * 1.75,
'E': 0,
'NE': np.pi * 0.25 }
return self.set_theta_offset(mapping[loc])
def set_theta_direction(self, direction):
"""
Set the direction in which theta increases.
clockwise, -1:
Theta increases in the clockwise direction
counterclockwise, anticlockwise, 1:
Theta increases in the counterclockwise direction
"""
if direction in ('clockwise',):
self._direction = -1
elif direction in ('counterclockwise', 'anticlockwise'):
self._direction = 1
elif direction in (1, -1):
self._direction = direction
else:
raise ValueError("direction must be 1, -1, clockwise or counterclockwise")
def get_theta_direction(self):
"""
Get the direction in which theta increases.
-1:
Theta increases in the clockwise direction
1:
Theta increases in the counterclockwise direction
"""
return self._direction
def set_rlim(self, *args, **kwargs):
if 'rmin' in kwargs:
kwargs['ymin'] = kwargs.pop('rmin')
if 'rmax' in kwargs:
kwargs['ymax'] = kwargs.pop('rmax')
return self.set_ylim(*args, **kwargs)
def get_rlabel_position(self):
"""
Returns
-------
float
The theta position of the radius labels in degrees.
"""
return self._r_label_position.to_values()[4]
def set_rlabel_position(self, value):
"""Updates the theta position of the radius labels.
Parameters
----------
value : number
The angular position of the radius labels in degrees.
"""
self._r_label_position._t = (value, 0.0)
self._r_label_position.invalidate()
def set_yscale(self, *args, **kwargs):
Axes.set_yscale(self, *args, **kwargs)
self.yaxis.set_major_locator(
self.RadialLocator(self.yaxis.get_major_locator()))
def set_rscale(self, *args, **kwargs):
return Axes.set_yscale(self, *args, **kwargs)
def set_rticks(self, *args, **kwargs):
return Axes.set_yticks(self, *args, **kwargs)
@docstring.dedent_interpd
def set_thetagrids(self, angles, labels=None, frac=None, fmt=None,
**kwargs):
"""
Set the angles at which to place the theta grids (these
gridlines are equal along the theta dimension). *angles* is in
degrees.
*labels*, if not None, is a ``len(angles)`` list of strings of
the labels to use at each angle.
If *labels* is None, the labels will be ``fmt %% angle``
*frac* is the fraction of the polar axes radius at which to
place the label (1 is the edge). e.g., 1.05 is outside the axes
and 0.95 is inside the axes.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
# Make sure we take into account unitized data
angles = self.convert_yunits(angles)
angles = np.asarray(angles, np.float_)
self.set_xticks(angles * (np.pi / 180.0))
if labels is not None:
self.set_xticklabels(labels)
elif fmt is not None:
self.xaxis.set_major_formatter(FormatStrFormatter(fmt))
if frac is not None:
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, 1.0 / frac)
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
@docstring.dedent_interpd
def set_rgrids(self, radii, labels=None, angle=None, fmt=None,
**kwargs):
"""
Set the radial locations and labels of the *r* grids.
The labels will appear at radial distances *radii* at the
given *angle* in degrees.
*labels*, if not None, is a ``len(radii)`` list of strings of the
labels to use at each radius.
If *labels* is None, the built-in formatter will be used.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
# Make sure we take into account unitized data
radii = self.convert_xunits(radii)
radii = np.asarray(radii)
rmin = radii.min()
if rmin <= 0:
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if labels is not None:
self.set_yticklabels(labels)
elif fmt is not None:
self.yaxis.set_major_formatter(FormatStrFormatter(fmt))
if angle is None:
angle = self.get_rlabel_position()
self.set_rlabel_position(angle)
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()
def set_xscale(self, scale, *args, **kwargs):
if scale != 'linear':
raise NotImplementedError("You can not set the xscale on a polar plot.")
def set_xlim(self, *args, **kargs):
# The xlim is fixed, no matter what you do
self.viewLim.intervalx = (0.0, np.pi * 2.0)
def format_coord(self, theta, r):
"""
Return a format string formatting the coordinate using Unicode
characters.
"""
theta /= math.pi
# \u03b8: lower-case theta
# \u03c0: lower-case pi
# \u00b0: degree symbol
return '\u03b8=%0.3f\u03c0 (%0.3f\u00b0), r=%0.3f' % (theta, theta * 180.0, r)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
Polar axes do not support zoom boxes.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
For polar axes, this is slightly misleading. Both panning and
zooming are performed by the same button. Panning is performed
in azimuth while zooming is done along the radial.
"""
return True
def start_pan(self, x, y, button):
angle = np.deg2rad(self.get_rlabel_position())
mode = ''
if button == 1:
epsilon = np.pi / 45.0
t, r = self.transData.inverted().transform_point((x, y))
if t >= angle - epsilon and t <= angle + epsilon:
mode = 'drag_r_labels'
elif button == 3:
mode = 'zoom'
self._pan_start = cbook.Bunch(
rmax = self.get_rmax(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
r_label_angle = self.get_rlabel_position(),
x = x,
y = y,
mode = mode
)
def end_pan(self):
del self._pan_start
def drag_pan(self, button, key, x, y):
p = self._pan_start
if p.mode == 'drag_r_labels':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with theta
dt0 = t - startt
dt1 = startt - t
if abs(dt1) < abs(dt0):
dt = abs(dt1) * sign(dt0) * -1.0
else:
dt = dt0 * -1.0
dt = (dt / np.pi) * 180.0
self.set_rlabel_position(p.r_label_angle - dt)
trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0)
trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0)
for t in self.yaxis.majorTicks + self.yaxis.minorTicks:
t.label1.set_va(vert1)
t.label1.set_ha(horiz1)
t.label2.set_va(vert2)
t.label2.set_ha(horiz2)
elif p.mode == 'zoom':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
dr = r - startr
# Deal with r
scale = r / startr
self.set_rmax(p.rmax / scale)
# to keep things all self contained, we can put aliases to the Polar classes
# defined above. This isn't strictly necessary, but it makes some of the
# code more readable (and provides a backwards compatible Polar API)
PolarAxes.PolarTransform = PolarTransform
PolarAxes.PolarAffine = PolarAffine
PolarAxes.InvertedPolarTransform = InvertedPolarTransform
PolarAxes.ThetaFormatter = ThetaFormatter
PolarAxes.RadialLocator = RadialLocator
# These are a couple of aborted attempts to project a polar plot using
# cubic bezier curves.
# def transform_path(self, path):
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# vertices = self.transform(vertices)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# kappa = 0.5
# p0 = vertices[0:-1]
# p1 = vertices[1: ]
# x0 = p0[:, 0:1]
# y0 = p0[:, 1: ]
# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
# a0 = y0 - b0*x0
# x1 = p1[:, 0:1]
# y1 = p1[:, 1: ]
# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
# a1 = y1 - b1*x1
# x = -(a0-a1) / (b0-b1)
# y = a0 + b0*x
# xk = (x - x0) * kappa + x0
# yk = (y - y0) * kappa + y0
# result[1::3, 0:1] = xk
# result[1::3, 1: ] = yk
# xk = (x - x1) * kappa + x1
# yk = (y - y1) * kappa + y1
# result[2::3, 0:1] = xk
# result[2::3, 1: ] = yk
# result[3::3] = p1
# print vertices[-2:]
# print result[-2:]
# return mpath.Path(result, codes)
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# print "interpolate", interpolate
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# tkappa = np.arctan(kappa)
# hyp_kappa = np.sqrt(kappa*kappa + 1.0)
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# r0 = vertices[0:-1, 1]
# r1 = vertices[1: , 1]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# td_scaled = td / (np.pi * 0.5)
# rd = r1 - r0
# r0kappa = r0 * kappa * td_scaled
# r1kappa = r1 * kappa * td_scaled
# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
# result[1::3, 0] = t0 + (tkappa * td_scaled)
# result[1::3, 1] = r0*hyp_kappa
# # result[1::3, 1] = r0 / np.cos(tkappa * td_scaled) # np.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
# result[2::3, 0] = t1 - (tkappa * td_scaled)
# result[2::3, 1] = r1*hyp_kappa
# # result[2::3, 1] = r1 / np.cos(tkappa * td_scaled) # np.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
# result[3::3, 0] = t1
# result[3::3, 1] = r1
# print vertices[:6], result[:6], t0[:6], t1[:6], td[:6], td_scaled[:6], tkappa
# result = self.transform(result)
# return mpath.Path(result, codes)
# transform_path_non_affine = transform_path
| mit |
marcocaccin/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
nealbob/nealbob.github.io | code/multicore_storage_sim.py | 2 | 2177 | import numpy as np
from matplotlib import pyplot as plt
import time
from multiprocessing import Process
from multiprocessing.queues import Queue
def retry_on_eintr(function, *args, **kw):
while True:
try:
return function(*args, **kw)
except IOError, e:
if e.errno == errno.EINTR:
continue
else:
raise
class RetryQueue(Queue):
"""Queue which will retry if interrupted with EINTR."""
def get(self, block=True, timeout=None):
return retry_on_eintr(Queue.get, self, block, timeout)
def simulate(K, mu, sig, Sbar, T, multi=False, que=0, jobno=0):
np.random.seed(jobno)
S = np.zeros(T+1)
W = np.zeros(T+1)
I = np.zeros(T+1)
S[0] = K
for t in range(T):
W[t] = min(S[t], Sbar)
I[t+1] = max(np.random.normal(mu, sig), 0)
S[t+1] = min(S[t] - W[t] + I[t+1], K)
if multi:
que.put(S)
else:
return S
def multi_sim(CORES=2, T=100):
results = []
ques = [Queue() for i in range(CORES)]
args = [(100, 70, 70, 70, int(T/CORES), True, ques[i], i) for i in range(CORES)]
jobs = [Process(target=simulate, args=(a)) for a in args]
for j in jobs: j.start()
for q in ques: results.append(q.get())
for j in jobs: j.join()
S = np.hstack(results)
return S
"""
### Sample size
T = 1000000
# Single core run ==================================
tic = time.time()
S = simulate(100, 70, 70, 70, T)
toc = time.time()
print 'Single core run time: ' + str(round(toc - tic,3))
plt.plot(S[0:100])
plt.show()
# Multi core run ==================================
tic = time.time()
CORES = 2
results = []
ques = [Queue() for i in range(CORES)]
args = [(100, 70, 70, 70, int(T/CORES), True, ques[i], i) for i in range(CORES)]
jobs = [Process(target=simulate, args=(a)) for a in args]
for j in jobs: j.start()
for q in ques: results.append(q.get())
for j in jobs: j.join()
S = np.hstack(results)
toc = time.time()
print 'Multi-core run time: ' + str(toc - tic)
plt.plot(S[0:100])
plt.show()
print S.shape
plt.scatter(results[0], results[1])
plt.show()
"""
| mit |
nvoron23/statsmodels | statsmodels/tsa/statespace/tests/test_tools.py | 19 | 4268 | """
Tests for tools
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
from statsmodels.tsa.statespace import tools
# from .results import results_sarimax
from numpy.testing import (
assert_equal, assert_array_equal, assert_almost_equal, assert_raises
)
class TestCompanionMatrix(object):
cases = [
(2, np.array([[0,1],[0,0]])),
([1,-1,-2], np.array([[1,1],[2,0]])),
([1,-1,-2,-3], np.array([[1,1,0],[2,0,1],[3,0,0]]))
]
def test_cases(self):
for polynomial, result in self.cases:
assert_equal(tools.companion_matrix(polynomial), result)
class TestDiff(object):
x = np.arange(10)
cases = [
# diff = 1
([1,2,3], 1, None, 1, [1, 1]),
# diff = 2
(x, 2, None, 1, [0]*8),
# diff = 1, seasonal_diff=1, k_seasons=4
(x, 1, 1, 4, [0]*5),
(x**2, 1, 1, 4, [8]*5),
(x**3, 1, 1, 4, [60, 84, 108, 132, 156]),
# diff = 1, seasonal_diff=2, k_seasons=2
(x, 1, 2, 2, [0]*5),
(x**2, 1, 2, 2, [0]*5),
(x**3, 1, 2, 2, [24]*5),
(x**4, 1, 2, 2, [240, 336, 432, 528, 624]),
]
def test_cases(self):
# Basic cases
for series, diff, seasonal_diff, k_seasons, result in self.cases:
# Test numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Series
series = pd.Series(series)
# Rewrite to test as n-dimensional array
series = np.c_[series, series]
result = np.c_[result, result]
# Test Numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Dataframe
series = pd.DataFrame(series)
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
class TestIsInvertible(object):
cases = [
([1, -0.5], True),
([1, 1-1e-9], True),
([1, 1], False),
([1, 0.9,0.1], True),
(np.array([1,0.9,0.1]), True),
(pd.Series([1,0.9,0.1]), True)
]
def test_cases(self):
for polynomial, invertible in self.cases:
assert_equal(tools.is_invertible(polynomial), invertible)
class TestConstrainStationaryUnivariate(object):
cases = [
(np.array([2.]), -2./((1+2.**2)**0.5))
]
def test_cases(self):
for unconstrained, constrained in self.cases:
result = tools.constrain_stationary_univariate(unconstrained)
assert_equal(result, constrained)
class TestValidateMatrixShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,2), 5, 2, None),
('TEST', (5,2), 5, 2, 10),
('TEST', (5,2,10), 5, 2, 10),
]
invalid = [
('TEST', (5,), 5, None, None),
('TEST', (5,1,1,1), 5, 1, None),
('TEST', (5,2), 10, 2, None),
('TEST', (5,2), 5, 1, None),
('TEST', (5,2,10), 5, 2, None),
('TEST', (5,2,10), 5, 2, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_matrix_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises(
ValueError, tools.validate_matrix_shape, *args
)
class TestValidateVectorShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,), 5, None),
('TEST', (5,), 5, 10),
('TEST', (5,10), 5, 10),
]
invalid = [
('TEST', (5,2,10), 5, 10),
('TEST', (5,), 10, None),
('TEST', (5,10), 5, None),
('TEST', (5,10), 5, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_vector_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises(
ValueError, tools.validate_vector_shape, *args
)
| bsd-3-clause |
ruggiero/clustep | analysis/profiles.py | 2 | 4544 | # -*- coding: utf-8 -*-
from sys import path as syspath
from os import path
from bisect import bisect_left
from argparse import ArgumentParser as parser
import numpy as np
from numpy import pi, cos, sin, arctan
from scipy import integrate
import matplotlib
matplotlib.use('Agg') # To be able to plot under an SSH session.
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from pygadgetreader import *
import centers
# These variables must be updated manually.
a_halo = 200
M_halo = 100000
a_gas = 200
M_gas = 1e5*0.15/0.85
time = 0.0
def main():
input_ = init()
print "reading..."
npart = readheader(input_, 'npartTotal')
if(npart[0] > 0):
data_gas = process_data(input_, 'gas')
part_gas, aux_gas = log_partition(data_gas, 1.3)
density_plot(input_, data_gas, part_gas, aux_gas, 'gas')
if(npart[1] > 0):
data_dm = process_data(input_, 'dm')
part_dm, aux_dm = log_partition(data_dm, 1.3)
density_plot(input_, data_dm, part_dm, aux_dm, 'dm')
def init():
global dm_core, gas_core
flags = parser(description="Plots stuff.")
flags.add_argument('--dm-core', help='Sets the density profile for the\
dark matter to have a core.', action='store_true')
flags.add_argument('--gas-core', help='The same, but for the bulge.',
action='store_true')
flags.add_argument('i', help='The name of the input file.',
metavar="file.dat")
args = flags.parse_args()
dm_core = args.dm_core
gas_core = args.gas_core
input_ = args.i
return input_
def process_data(input_, component):
coords = readsnap(input_, 'pos', component)
data = np.array([np.linalg.norm(i) for i in coords])
data = sorted(data)
return data
def density(r, M, a, core=False):
if(core):
return (3*M*a) / (4*np.pi*(r+a)**4)
else:
if(r == 0):
return 0
else:
return (M*a) / (2*np.pi*r*(r+a)**3)
# Given a data vector, in which each element represents a different
# particle by a list of the form [radius, radial_velocity^2], ordered
# according to the radii; and a multiplication factor, returns the right
# indexes of a log partition of the vector. Also returns an auxiliary
# vector, which will be useful in the functions that calculate the
# distribution functions.
def log_partition(data, factor):
limits = []
auxiliary = []
left_limit = 0
right_limit = 0.01
left_index = 0
while(right_limit < 200 * a_halo):
# Before right_index, everybody is smaller than right_limit.
right_index = left_index + bisect_left(data[left_index:], right_limit)
limits.append(right_index)
auxiliary.append([right_index - left_index, (right_limit + left_limit) /
2])
left_limit = right_limit
left_index = right_index
right_limit *= factor
return limits, auxiliary
# Returns a list containing elements of the form [radius, density].
def density_distribution(data, partition, aux, M):
N = len(data)
distribution = []
left = 0
cte = (10**10*3*M) / (4*np.pi*N)
for j in np.arange(len(partition)):
right = partition[j]
if(right >= len(data)):
break
count = aux[j][0]
middle_radius = aux[j][1]
if(count > 0):
density = (cte * count) / (data[right]**3 - data[left]**3)
distribution.append([middle_radius, density])
else:
distribution.append([middle_radius, 0])
left = right
return distribution
def density_plot(input_, data, part, aux, name):
if(name == 'gas'):
M = M_gas
a = a_gas
core = gas_core
else:
M = M_dm
a = a_dm
core = dm_core
dist = density_distribution(data, part, aux, M)
x_axis = np.logspace(np.log10(dist[0][0]), np.log10(dist[-1][0]), num=500)
p1, = plt.plot([i[0] for i in dist], [i[1] for i in dist], '-', color='blue')
p2, = plt.plot(x_axis, [10**10 * density(i, M, a, core) for i in x_axis], color='black')
plt.legend([p1, p2], [u"Data", "Model"], loc=1)
plt.xlabel("Radius (kpc)")
plt.ylabel("Density ( M$_{\odot}$/kpc$^3$)")
plt.xscale('log')
plt.yscale('log')
plt.xlim([1, 3e3])
plt.ylim([1, 10**9])
plt.title(name + ", t = %1.2f Gyr" % time)
plt.savefig(input_ + "-" + name + "-density.png", bbox_inches='tight')
print "Done with " + name + " density for " + input_
plt.close()
if __name__ == '__main__':
main()
| gpl-2.0 |
hoechenberger/psychopy | psychopy/demos/builder/practical IAT/scoreIAT.py | 1 | 7371 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Scoring script for MSU-PsychoPy version of IAT task.
Authors: Jeremy R. Gray & Nate Pasmanter, 2013
"""
from __future__ import division
from __future__ import print_function
from builtins import range
import pandas as pd
import glob, os, sys
def scoreIAT(csvfile, write_file=False):
"""Input = csv file; output = D score (or explanation why data are bad).
Expects column headers of the form Response_#.corr, and Response_#.rt,
where # is 1-7, for IAT block number.
Scoring is mostly per GNB 2003:
Greenwald, A. G., Nosek, B. A., & Banaji, M. R. (2003). Understanding and
using the implicit association test: I. An improved scoring algorithm.
Journal of Personality and Social Psychology, 85, 197-216.
Following Amodio, incorrect responses do not contribute to RT; there's no
RT penalty for this reason. For computing SDs, unbiased SD is used (N-1).
If write_file=True, will save score to a file 'Scored_' + csvfile.
A positive D value from this script indicates a bias in favor of
"creative bad / practical good". I.e., if RT in blocks with creative+good is
longer than RT in blocks with creative+bad, people are more conflicted or
hesitant about creative+good.
The way the task is set up, when side == 1, creative and bad are paired first.
If side == -1, the opposite is true. This pairing is handled by the scoring
script.
"""
# ------------ Thresholds for excluding trials or subjects: ------------
rt_FAST = 0.300
rt_FASTms = int(1000 * rt_FAST) # 300ms
rt_SLOW = 10.
correct = 1
incorrect = 0
# GNB 2003 thresholds for why subject should be excluded:
warn = u''
threshold = {'ac_prac_blk': 0.50,
'ac_prac_all': 0.60, 'rt_prac_all': 0.35,
'ac_task_blk': 0.60, 'rt_task_blk': 0.25,
'ac_task_all': 0.70, 'rt_task_all': 0.10 }
# ------------ Read dataframe (df) from .csv file and parse: ------------
df = pd.read_csv(csvfile)
# accuracy; mean --> proportion correct
prac_ac = [df.loc[:, 'Response_1.corr'].dropna(),
df.loc[:, 'Response_2.corr'].dropna(),
df.loc[:, 'Response_5.corr'].dropna()]
task_ac = [df.loc[:, 'Response_3.corr'].dropna(),
df.loc[:, 'Response_4.corr'].dropna(),
df.loc[:, 'Response_6.corr'].dropna(),
df.loc[:, 'Response_7.corr'].dropna()]
# response time in seconds
prac_rt = [df.loc[:, 'Response_1.rt'].dropna(),
df.loc[:, 'Response_2.rt'].dropna(),
df.loc[:, 'Response_5.rt'].dropna()]
task_rt = [df.loc[:, 'Response_3.rt'].dropna(),
df.loc[:, 'Response_4.rt'].dropna(),
df.loc[:, 'Response_6.rt'].dropna(),
df.loc[:, 'Response_7.rt'].dropna()]
assert len(task_ac[0]) == len(task_ac[2]) == len(task_rt[0]) # block 3, 6
assert len(task_ac[1]) == len(task_ac[3]) == len(task_rt[1]) # block 4, 7
assert len(task_rt[0]) == len(task_rt[2]) > 1 # require 2+ items in 3, 6
assert len(task_rt[1]) == len(task_rt[3]) > 1 # equire 2+ items in 4, 7
assert all([all(task_ac[i].isin([correct, incorrect])) for i in range(4)])
assert all([all(task_rt[i] > 0) for i in range(4)]) # require positive RTs
# counterbalanced IAT screen side: +1 or -1; used in calc of D
side = df.loc[0, 'side']
assert side in [-1, 1]
# ------------ Check participant exclusion thresholds ------------
# check proportion-too-fast in each task block:
for i, rt in enumerate(task_rt):
prop_too_fast = len(rt[(rt < rt_FAST)]) / len(rt)
if prop_too_fast > threshold['rt_task_blk']:
pct = 100 * prop_too_fast
warn += "%.0f%% trials with RT < %dms in task block #%d\n" % (
pct, rt_FASTms, (3, 4, 6, 7)[i])
# check proportion-too-fast all task trials:
rt = task_rt[0].append(task_rt[1]).append(task_rt[2]).append(task_rt[3])
prop_too_fast = len(rt[(rt < rt_FAST)]) / len(rt)
if prop_too_fast > threshold['rt_task_all']:
pct = 100 * prop_too_fast
warn += "%.0f%% trials with RT < %dms across all task blocks\n" % (
pct, rt_FASTms)
# check proportion-too-fast in each practice block:
for i, rt in enumerate(prac_rt):
prop_too_fast = len(rt[(rt < rt_FAST)]) / len(rt)
if prop_too_fast > threshold['rt_prac_all']:
pct = 100 * prop_too_fast
warn += "%.0f%% trials with RT < %dms in practice block #%d\n" % (
pct, rt_FASTms, (1, 2, 5)[i])
# check proportion-error in each practice block:
for i, prac_blk in enumerate(prac_ac):
if prac_blk.mean() < threshold['ac_prac_blk']:
pct = 100 * (1 - prac_blk.mean())
warn += "%.0f%% errors in practice block #%d\n" %(pct, (1, 2, 5)[i])
# check proportion-error in all practice trials:
ac = prac_ac[0].append(prac_ac[1]).append(prac_ac[2]).mean()
if ac < threshold['ac_prac_all']:
pct = 100 * (1 - ac.mean())
warn += "%.0f%% errors across all practice blocks\n" % pct
# check proportion-error in task blocks:
for i, ac in enumerate(task_ac):
if ac.mean() < threshold['ac_task_blk']:
pct = 100 * (1 - ac.mean())
warn += "%.0f%% errors in task block #%d\n" % (pct, (3, 4, 6, 7)[i])
# check proportion-error across all task trials:
ac = task_ac[0].append(task_ac[1]).append(task_ac[2]).append(task_ac[3])
if ac.mean() < threshold['ac_task_all']:
pct = 100 * (1 - ac.mean())
warn += "%.0f%% errors across all task trials\n" % pct
# ------------ Filter out bad trials: ------------
for i, block in enumerate(task_ac):
# retain trials with correct responses:
correct_trials = (block == correct)
task_rt[i] = task_rt[i][correct_trials]
#task_ac[i] = task_ac[i][correct_trials]
for i, block in enumerate(task_rt):
# retain trials where RT is not too fast or too slow:
rt_ok_trials = (block >= rt_FAST) & (block <= rt_SLOW)
task_rt[i] = task_rt[i][rt_ok_trials]
#task_ac[i] = task_ac[i][rt_ok_trials]
# ------------ Calculate summary stats of the filtered data: ----------
mean3, mean4, mean6, mean7 = [a.mean() for a in task_rt]
stdev36 = task_rt[0].append(task_rt[2]).std() # pooled std of blocks 3 & 6
stdev47 = task_rt[1].append(task_rt[3]).std() # pooled std of blocks 4 & 7
d36 = side * (mean6 - mean3) / stdev36 # side is +1 or -1
d47 = side * (mean7 - mean4) / stdev47
D_IAT = (d36 + d47) / 2
stats = D_IAT, side, mean3, mean4, mean6, mean7, stdev36, stdev47, warn.strip() or 'None'
labels = 'D_IAT', 'side', 'mean3', 'mean4', 'mean6', 'mean7', 'sd36', 'sd47', 'warnings'
if write_file:
df = pd.DataFrame([stats], columns=labels)
df.to_csv('Scored_' + csvfile, index=False, index_label=False, encoding='utf-8')
return warn.strip() or D_IAT
def batchScoreIAT(path='.', write_file=False):
"""Call scoreIAT() on all csv files in path
"""
files = glob.glob(os.path.join(path, '*.csv'))
for f in files:
scoreIAT(f, write_file=write_file)
if __name__ == '__main__':
for f in sys.argv[1:]:
print((f, scoreIAT(f)))
| gpl-3.0 |
cfjhallgren/shogun | examples/undocumented/python/graphical/group_lasso.py | 11 | 7789 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import rand, randn, permutation, multivariate_normal
from shogun import BinaryLabels, RealFeatures, IndexBlock, IndexBlockGroup, FeatureBlockLogisticRegression
def generate_synthetic_logistic_data(n, p, L, blk_nnz, gcov, nstd):
# Generates synthetic data for the logistic regression, using the example
# from [Friedman10]
# n : # of observations
# p : # of predictors
# L : # of blocks
# blk_nnz : # of non-zero coefs. in each block
# gcov : correlation within groups
# nstd : standard deviation of the added noise
# size of each block (assumed to be an integer)
pl = p / L
# generating the coefficients (betas)
coefs = np.zeros((p, 1))
for (i, nnz) in enumerate(blk_nnz):
blkcoefs = np.zeros((pl, 1))
blkcoefs[0:nnz] = np.sign(rand(nnz, 1) - 0.5)
coefs[pl * i:pl * (i + 1)] = permutation(blkcoefs)
# generating the predictors
mu = np.zeros(p)
gsigma = gcov * np.ones((pl, pl))
np.fill_diagonal(gsigma, 1.0)
Sigma = np.kron(np.eye(L), gsigma)
# the predictors come from a standard Gaussian multivariate distribution
X = multivariate_normal(mu, Sigma, n)
# linear function of the explanatory variables in X, plus noise
t = np.dot(X, coefs) + randn(n, 1) * nstd
# applying the logit
Pr = 1 / (1 + np.exp(-t))
# The response variable y[i] is a Bernoulli random variable taking
# value 1 with probability Pr[i]
y = rand(n, 1) <= Pr
# we want each _column_ in X to represent a feature vector
# y and coefs should be also 1D arrays
return X.T, y.flatten(), coefs.flatten()
def misclassified_groups(est_coefs, true_coefs, L):
# Compute the number of groups that are misclassified, i.e. the ones with
# at least one non-zero coefficient whose estimated coefficients are all
# set to zero, or viceversa, as explained in [Friedman10]
# est_coefs : coefficients estimated by the FBLR
# true_coefs : the original coefficients of our synthetic example
# L : number of blocks
p = est_coefs.shape[0] # number of predictors
pl = p / L
est_nz = est_coefs != 0
true_nz = true_coefs != 0
est_blk_nzcount = np.array([sum(est_nz[pl * i:pl * (i + 1)]) for i in xrange(L)])
true_blk_nzcount = np.array([sum(true_nz[pl * i:pl * (i + 1)]) for i in xrange(L)])
return np.sum(np.logical_xor(est_blk_nzcount == 0, true_blk_nzcount == 0))
def misclassified_features(est_coefs, true_coefs):
# Compute the number of individual coefficients that are misclassified,
# i.e. estimated to be zero when the true coefficient is nonzero or
# vice-versa, as explained in [Friedman10]
# est_coefs : coefficients estimated by the FBLR
# true_coefs : the original coefficients of our synthetic example
return np.sum(np.logical_xor(est_coefs == 0, true_coefs == 0))
def compute_misclassifications(cls, true_coefs, L, rel_z):
# Try the given classifier with different values of relative regularization
# parameters, store the coefficients and compute the number of groups
# and features misclassified.
# INPUTS:
# - cls : the classifier to try
# - true_coefs : the original coefficients of our synthetic example
# - L : number of blocks
# - rel_z : regularization values to try, they will be in [0,1]
# OUTPUTS:
# - est_coefs : array with the estimated coefficients, each row for a
# different value of regularization
# - misc_groups, misc_feats : see above
num_z = rel_z.shape[0]
est_coefs = np.zeros((num_z, true_coefs.shape[0]))
misc_groups = np.zeros(num_z)
misc_feats = np.zeros(num_z)
for (i, z) in enumerate(rel_z):
cls.set_z(z)
cls.train()
est_coefs[i, :] = cls.get_w()
misc_groups[i] = misclassified_groups(est_coefs[i, :], true_coefs, L)
misc_feats[i] = misclassified_features(est_coefs[i, :], true_coefs)
return est_coefs, misc_groups, misc_feats
if __name__ == '__main__':
print('FeatureBlockLogisticRegression example')
np.random.seed(956) # reproducible results
# default parameters from [Friedman10]
n = 200
p = 100
L = 10
blk_nnz = [10, 8, 6, 4, 2, 1]
gcov = 0.2
nstd = 0.4
# range of (relative) regularization values to try
min_z = 0
max_z = 1
num_z = 21
# get the data
X, y, true_coefs = generate_synthetic_logistic_data(n, p, L, blk_nnz, gcov, nstd)
# here each column represents a feature vector
features = RealFeatures(X)
# we have to convert the labels to +1/-1
labels = BinaryLabels(np.sign(y.astype(int) - 0.5))
# SETTING UP THE CLASSIFIERS
# CLASSIFIER 1: group LASSO
# build the feature blocks and add them to the block group
pl = p / L
block_group = IndexBlockGroup()
for i in xrange(L):
block_group.add_block(IndexBlock(pl * i, pl * (i + 1)))
cls_gl = FeatureBlockLogisticRegression(0.0, features, labels, block_group)
# with set_regularization(1), the parameter z will indicate the fraction of
# the maximum regularization to use, and so z is in [0,1]
# (reference: SLEP manual)
cls_gl.set_regularization(1)
cls_gl.set_q(2.0) # it is the default anyway...
# CLASSIFIER 2: LASSO (illustrating group lasso with all group sizes = 1)
block_group_ones = IndexBlockGroup()
for i in xrange(p):
block_group_ones.add_block(IndexBlock(i, i + 1))
cls_l = FeatureBlockLogisticRegression(0.0, features, labels, block_group_ones)
cls_l.set_regularization(1)
cls_l.set_q(2.0)
# trying with different values of (relative) regularization parameters
rel_z = np.linspace(min_z, max_z, num_z)
coefs_gl, miscgp_gl, miscft_gl = compute_misclassifications(cls_gl, true_coefs, L, rel_z)
coefs_l, miscgp_l, miscft_l = compute_misclassifications(cls_l, true_coefs, L, rel_z)
# Find the best regularization for each classifier
# for the group lasso: the one that gives the fewest groups misclassified
best_z_gl = np.argmin(miscgp_gl)
# for the lasso: the one that gives the fewest features misclassified
best_z_l = np.argmin(miscft_l)
# plot the true coefs. and the signs of the estimated coefs.
fig = plt.figure()
for (coefs, best_z, name, pos) in zip([coefs_gl, coefs_l], [best_z_gl, best_z_l], ['Group lasso', 'Lasso'], [0, 1]):
ax = plt.subplot2grid((4, 2), (pos, 0), colspan=2)
plt.hold(True)
plt.plot(xrange(p), np.sign(coefs[best_z, :]), 'o', markeredgecolor='none', markerfacecolor='g')
plt.plot(xrange(p), true_coefs, '^', markersize=7, markeredgecolor='r', markerfacecolor='none', markeredgewidth=1)
plt.xticks(xrange(0, p + pl, pl))
plt.yticks([-1, 0, 1])
plt.xlim((-1, p + 1))
plt.ylim((-2, 2))
plt.grid(True)
# plt.legend(('estimated', 'true'), loc='best')
plt.title(name)
plt.xlabel('Predictor [triangles=true coefs], best reg. value = %.2f' % rel_z[best_z])
plt.ylabel('Coefficient')
ax = plt.subplot2grid((4, 2), (2, 0), rowspan=2)
plt.plot(rel_z, miscgp_gl, 'ro-', rel_z, miscgp_l, 'bo-')
plt.legend(('Group lasso', 'Lasso'), loc='best')
plt.title('Groups misclassified')
plt.xlabel('Relative regularization parameter')
plt.ylabel('# of groups misclassified')
ax = plt.subplot2grid((4, 2), (2, 1), rowspan=2)
plt.plot(rel_z, miscft_gl, 'ro-', rel_z, miscft_l, 'bo-')
plt.legend(('Group lasso', 'Lasso'), loc='best')
plt.title('Features misclassified')
plt.xlabel('Relative regularization parameter')
plt.ylabel('# of features misclassified')
plt.tight_layout(1.2, 0, 0)
plt.show()
| gpl-3.0 |
mirestrepo/voxels-at-lems | super3d/compute_mean_var.py | 1 | 1331 | import boxm_batch;
import sys;
import optparse;
import os;
import glob;
#import matplotlib.pyplot as plt;
boxm_batch.register_processes();
boxm_batch.register_datatypes();
class dbvalue:
def __init__(self, index, type):
self.id = index # unsigned integer
self.type = type # string
#dir = "/Users/isa/Experiments/super3d/sr2_scene_sr2_images/expectedImgs_1"
#dir = "/Users/isa/Experiments/super3d/sr2_3scene_sr2_images/expectedImgs_2"
#dir = "/Users/isa/Experiments/super3d/scene_sr2_images/expectedImgs_2"
#dir = "/Users/isa/Experiments/super3d/scene/expectedImgs_2"
#dir = "/Volumes/vision/video/isabel/super3d/scili_experiment/normal_scene/expectedImgs_0"
dir = "/Users/isa/Experiments/super3d/scili_experiments_bicubic/sr2_scene_sr2_images/expectedImgs_0"
boxm_batch.init_process("vilLoadImageViewProcess");
boxm_batch.set_input_string(0,dir + "/exepected_var.tiff");
boxm_batch.run_process();
(id,type) = boxm_batch.commit_output(0);
var_img = dbvalue(id,type);
boxm_batch.init_process("vilImageMeanProcess");
boxm_batch.set_input_from_db(0,var_img);
boxm_batch.run_process();
(id,type) = boxm_batch.commit_output(0);
mean = dbvalue(id,type);
mean_val = boxm_batch.get_output_float(mean.id);
mean_file = dir + "/mean_var.txt"
f = open(mean_file, 'w');
f.write(str(mean_val));
f.close(); | bsd-2-clause |
thorwhalen/ut | ml/diag/multiple_two_component_manifold_learning.py | 1 | 9229 | __author__ = 'thor'
"""
An illustration of various embeddings, based on Pedregosa, Grisel, Blondel, and Varoguaux's code
for the digits dataset. See http://scikit-learn.org/stable/auto_examples/manifold/plot_lle_digits.html
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
def scatter_plot(X, y):
plt.scatter(X[:, 0], X[:, 1], c=y)
def analyze(X=None, y=None, plot_fun=scatter_plot, data_name="data"):
if X is None:
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plot_fun(X, y)
if title is not None:
plt.title(title)
# #----------------------------------------------------------------------
# # Scale and visualize the embedding vectors
# def plot_embedding(X, title=None):
# x_min, x_max = np.min(X, 0), np.max(X, 0)
# X = (X - x_min) / (x_max - x_min)
#
# plt.figure()
# ax = plt.subplot(111)
# for i in range(X.shape[0]):
# plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
# color=plt.cm.Set1(y[i] / 10.),
# fontdict={'weight': 'bold', 'size': 9})
#
# if hasattr(offsetbox, 'AnnotationBbox'):
# # only print thumbnails with matplotlib > 1.0
# shown_images = np.array([[1., 1.]]) # just something big
# for i in range(digits.data.shape[0]):
# dist = np.sum((X[i] - shown_images) ** 2, 1)
# if np.min(dist) < 4e-3:
# # don't show points that are too close
# continue
# shown_images = np.r_[shown_images, [X[i]]]
# imagebox = offsetbox.AnnotationBbox(
# offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
# X[i])
# ax.add_artist(imagebox)
# plt.xticks([]), plt.yticks([])
# if title is not None:
# plt.title(title)
#
#
# #----------------------------------------------------------------------
# # Plot images of the digits
# n_img_per_row = 20
# img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
# for i in range(n_img_per_row):
# ix = 10 * i + 1
# for j in range(n_img_per_row):
# iy = 10 * j + 1
# img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
# plt.imshow(img, cmap=plt.cm.binary)
# plt.xticks([])
# plt.yticks([])
# plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the {}".format(data_name))
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the {} (time {:.2f})".format(data_name, time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the {} (time {:.2f})".format(data_name, time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the {} (time {:.2f})".format(data_name, time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print(("Done. Reconstruction error: %g" % clf.reconstruction_error_))
plot_embedding(X_lle,
"Locally Linear Embedding of the {} (time {:.2f})".format(data_name, time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print(("Done. Reconstruction error: %g" % clf.reconstruction_error_))
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the {} (time {:.2f})".format(data_name, time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print(("Done. Reconstruction error: %g" % clf.reconstruction_error_))
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the {} (time {:.2f})".format(data_name, time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print(("Done. Reconstruction error: %g" % clf.reconstruction_error_))
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the {} (time {:.2f})".format(data_name, time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print(("Done. Stress: %f" % clf.stress_))
plot_embedding(X_mds,
"MDS embedding of the {} (time {:.2f})".format(data_name, time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the {} (time {:.2f})".format(data_name, time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the {} (time {:.2f})".format(data_name, time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the {} (time {:.2f})".format(data_name, time() - t0))
plt.show() | mit |
idrigo/castawayplot | castawayplot.py | 1 | 1537 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 28 23:01:05 2017
@author: drigo
"""
from __future__ import division
import pandas as pd
import CTDplot
import config
import os
cycle=0
filelist=os.listdir(config.directory)
list_len=len(filelist)
if not os.path.exists(config.out_dir):#проверяем существование выходной папки, если нет - создаем ее
os.makedirs(config.out_dir)
for f in filelist: #создаем список файлов в директории и делаем цикл по нему
filename=config.directory+f
figname=f[:-4] + '.png'
df=pd.read_csv(filename,
skiprows=28,
usecols=[1,2,5,6])
df = df.rename(columns={list(df)[0]: 'Depth',
list(df)[1]: 'Temp',
list(df)[2]: 'Sal',
list(df)[3]: 'Dens'})
#из конфигурационного файла
#вызываем функцию построения графика из модуля CTDplot и передаем ей параметры
CTDplot.plot(figname,
config.Txlim,
config.Sxlim,
config.major_ticsT,
config.major_ticsS,
config.minor_ticsT,
config.minor_ticsS,
df,
config.io)
cycle+=1
pers=(cycle/list_len)*100
print ('Файл номер {0}. Выполнено {1} %'.format(cycle,pers)) | mit |
lail3344/sms-tools | lectures/06-Harmonic-model/plots-code/harmonicModel-analysis-synthesis.py | 24 | 1387 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import harmonicModel as HM
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/vignesh.wav')
w = np.blackman(1201)
N = 2048
t = -90
nH = 100
minf0 = 130
maxf0 = 300
f0et = 7
Ns = 512
H = Ns/4
minSineDur = .1
harmDevSlope = 0.01
hfreq, hmag, hphase = HM.harmonicModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur)
y = SM.sineModelSynth(hfreq, hmag, hphase, Ns, H, fs)
numFrames = int(hfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.figure(1, figsize=(9, 7))
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0,x.size/float(fs),min(x),max(x)])
plt.title('x (vignesh.wav)')
plt.subplot(3,1,2)
yhfreq = hfreq
yhfreq[hfreq==0] = np.nan
plt.plot(frmTime, hfreq, lw=1.2)
plt.axis([0,y.size/float(fs),0,8000])
plt.title('f_h, harmonic frequencies')
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y, 'b')
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('yh')
plt.tight_layout()
UF.wavwrite(y, fs, 'vignesh-harmonic-synthesis.wav')
plt.savefig('harmonicModel-analysis-synthesis.png')
plt.show()
| agpl-3.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/pandas/tests/frame/test_sorting.py | 7 | 18805 | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_sort_index(self):
# GH13496
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0 : sort rows by index labels
unordered = frame.ix[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.ix[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.ix[:, ::-1]
assert_frame_equal(result, expected)
def test_sort_index_multiindex(self):
# GH13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples([[2, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(level='A', sort_remaining=False)
expected = df.sortlevel('A', sort_remaining=False)
assert_frame_equal(result, expected)
# sort columns by specified level of multi-index
df = df.T
result = df.sort_index(level='A', axis=1, sort_remaining=False)
expected = df.sortlevel('A', axis=1, sort_remaining=False)
assert_frame_equal(result, expected)
# MI sort, but no level: sort_level has no effect
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(sort_remaining=False)
expected = df.sort_index()
assert_frame_equal(result, expected)
def test_sort(self):
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
frame.sort(columns='A')
with tm.assert_produces_warning(FutureWarning):
frame.sort()
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.ix[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
self.assertRaises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with assertRaisesRegexp(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_index_categorical_index(self):
df = (DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca'))
.astype('category', categories=list('cab'))})
.set_index('B'))
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[3, 2, 5, 1, 0, 4]]
assert_frame_equal(result, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=['B', 'A'])
sorted_df = df.sort_values(by=1, axis=1, na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_index_multicolumn(self):
import random
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.ix[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
self.assertNotEqual(a_id, id(df['A']))
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.ix[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.ix[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['A', 'B'], ascending=[1, 0])
result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_index_duplicates(self):
# with 9816, these are all translated to .sort_values
df = DataFrame([lrange(5, 9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with assertRaisesRegexp(ValueError, 'duplicate'):
df.sort_values(by='a')
with assertRaisesRegexp(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
with assertRaisesRegexp(ValueError, 'duplicate'):
df.sort_values(by=['a'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
with assertRaisesRegexp(ValueError, 'duplicate'):
# multi-column 'by' is separate codepath
df.sort_values(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4, 2),
columns=MultiIndex.from_tuples([('a', 0), ('a', 1)]))
with assertRaisesRegexp(ValueError, 'levels'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with assertRaisesRegexp(ValueError, 'levels'):
df.sort_values(by='a')
# convert tuples to a list of tuples
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=[('a', 1)])
expected = df.sort_values(by=[('a', 1)])
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=('a', 1))
result = df.sort_values(by=('a', 1))
assert_frame_equal(result, expected)
def test_sortlevel(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sortlevel('A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sortlevel(['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with assertRaisesRegexp(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
| mit |
garbersc/keras-galaxies | try_convnet_keras_try_fullFit_maxout.py | 1 | 16971 | import theano.sandbox.cuda.basic_ops as sbcuda
import numpy as np
# import pandas as pd
import keras.backend as T
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, MaxPooling1D, Dropout, Input, Convolution2D, MaxoutDense
from keras.layers.core import Lambda, Flatten, Reshape, Permute
from keras.optimizers import SGD, Adam
from keras.engine.topology import Merge
from keras.callbacks import LearningRateScheduler
from keras import initializations
import functools
from keras_extra_layers import kerasCudaConvnetPooling2DLayer, kerasCudaConvnetConv2DLayer, fPermute
from custom_for_keras import kaggle_MultiRotMergeLayer_output, OptimisedDivGalaxyOutput, kaggle_input, kaggle_sliced_accuracy, dense_weight_init_values, rmse, input_generator #FIXME not finished
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
debug = True
predict = False
continueAnalysis = True
saveAtEveryValidation = True
getWinSolWeights = False
if getWinSolWeights:
WINSOL_PATH = "analysis/final/try_convent_gpu1_win_sol_net_on_0p0775_validation.pkl"
analysis = np.load(WINSOL_PATH)
l_weights = analysis['param_values']
#w_pairs=[]
#for i in range(len(l_weights)/2):
# w_pairs.append([l_weights[2*i],l_weights[2*i+1]])
w_kSorted=[]
for i in range(len(l_weights)/2):
w_kSorted.append(l_weights[-2-2*i])
w_kSorted.append(l_weights[-1-2*i])
CATEGORISED = False
y_train = np.load("data/solutions_train.npy")
if CATEGORISED: y_train = np.load("data/solutions_train_categorised.npy")
ra.y_train=y_train
# split training data into training + a small validation set
ra.num_train = y_train.shape[0]
ra.num_valid = ra.num_train // 10 # integer division, is defining validation size
ra.num_train -= ra.num_valid
#training num check for EV usage
if ra.num_train!=55420: print "num_train = %s not %s" % (ra.num_train,55420)
ra.y_valid = ra.y_train[ra.num_train:]
ra.y_train = ra.y_train[:ra.num_train]
load_data.num_train=y_train.shape[0]
load_data.train_ids = np.load("data/train_ids.npy")
ra.load_data.num_train = load_data.num_train
ra.load_data.train_ids = load_data.train_ids
ra.valid_ids = load_data.train_ids[ra.num_train:]
ra.train_ids = load_data.train_ids[:ra.num_train]
train_ids = load_data.train_ids
test_ids = load_data.test_ids
num_train = ra.num_train
num_test = len(test_ids)
num_valid = ra.num_valid
y_valid = ra.y_valid
y_train = ra.y_train
valid_ids = ra.valid_ids
train_ids = ra.train_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
BATCH_SIZE = 512 #keep in mind
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = { #if adam is used the learning rate doesnt follow the schedule
0: 0.1,
20: 0.05,
40: 0.01,
80: 0.005
#500: 0.04,
#0: 0.01,
#1800: 0.004,
#2300: 0.0004,
# 0: 0.08,
# 50: 0.04,
# 2000: 0.008,
# 3200: 0.0008,
# 4600: 0.0004,
}
if continueAnalysis or getWinSolWeights :
LEARNING_RATE_SCHEDULE = {
0: 0.1,
20: 0.05,
40: 0.01,
80: 0.005
#0: 0.0001,
#500: 0.002,
#800: 0.0004,
#3200: 0.0002,
#4600: 0.0001,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
N_TRAIN = num_train# 2000#1008#10000 # 30000 # this should be a multiple of the batch size, ideally.
EPOCHS = 100
VALIDATE_EVERY = 10 #20 # 12 # 6 # 6 # 6 # 5 #
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
print("The training sample contains %s , the validation sample contains %s images. \n" % ( ra.num_train, ra.num_valid ))
NUM_EPOCHS_NONORM = 0.1 # train without normalisation for this many epochs, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
#FIXME does ist run for part batches one day??? yes!
'''
while (N_TRAIN) % BATCH_SIZE:
N_TRAIN-=1
if debug: print N_TRAIN
'''
USE_ADAM = False #TODO not implemented
USE_LLERROR=False #TODO not implemented
USE_WEIGHTS=False #TODO not implemented
if USE_LLERROR and USE_WEIGHTS: print 'combination of weighted classes and log loss fuction not implemented yet'
WEIGHTS=np.ones((37))
#WEIGHTS[2]=1 #star or artifact
WEIGHTS[3]=1.5 #edge on yes
WEIGHTS[4]=1.5 #edge on no
#WEIGHTS[5]=1 #bar feature yes
#WEIGHTS[7]=1 #spiral arms yes
#WEIGHTS[14]=1 #anything odd? no
#WEIGHTS[18]=1 #ring
#WEIGHTS[19]=1 #lence
#WEIGHTS[20]=1 #disturbed
#WEIGHTS[21]=1 #irregular
#WEIGHTS[22]=1 #other
#WEIGHTS[23]=1 #merger
#WEIGHTS[24]=1 #dust lane
WEIGHTS=WEIGHTS/WEIGHTS[WEIGHTS.argmax()]
GEN_BUFFER_SIZE = 1
TRAIN_LOSS_SF_PATH = "trainingNmbrs_keras_hist_new.txt"
#TARGET_PATH = "predictions/final/try_convnet.csv"
WEIGHTS_PATH = "analysis/final/try_convent_keras_maxout_no_finaldropout.h5"
with open(TRAIN_LOSS_SF_PATH, 'a')as f:
if continueAnalysis:
f.write('#continuing from ')
f.write(WEIGHTS_PATH)
f.write("#wRandFlip \n")
f.write("#The training is running for %s epochs, each with %s images. The validation sample contains %s images. \n" % (EPOCHS,N_TRAIN, ra.num_valid ))
f.write("#round ,time, mean_train_loss , mean_valid_loss, mean_sliced_accuracy, mean_train_loss_test, mean_accuracy \n")
input_sizes = [(69, 69), (69, 69)]
PART_SIZE=45
N_INPUT_VARIATION=2
print "Build model"
if debug : print("input size: %s x %s x %s x %s" % (input_sizes[0][0],input_sizes[0][1],NUM_INPUT_FEATURES,BATCH_SIZE))
input_tensor = Input(batch_shape=(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1]) , dtype='float32', name='input_tensor')
input_tensor_45 = Input(batch_shape=(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1]) , dtype='float32', name='input_tensor_45')
input_0 = Lambda(lambda x: x,output_shape=(NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1]),batch_input_shape=(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1]),name='lambda_input_0')
input_45 = Lambda(lambda x: x,output_shape=(NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1]),batch_input_shape=(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1]),name='lambda_input_45')
model1 = Sequential()
model1.add(input_0)
model2 = Sequential()
model2.add(input_45)
if debug :print model1.output_shape
model = Sequential()
model.add(Merge([model1, model2], mode=kaggle_input , output_shape=lambda x: ((model1.output_shape[0]+model2.output_shape[0])*2*N_INPUT_VARIATION, NUM_INPUT_FEATURES, PART_SIZE, PART_SIZE) , arguments={'part_size':PART_SIZE, 'n_input_var': N_INPUT_VARIATION, 'include_flip':False, 'random_flip':True} ))
if debug : print model.output_shape
#needed for the pylearn moduls used by kerasCudaConvnetConv2DLayer and kerasCudaConvnetPooling2DLayer
model.add(fPermute((1,2,3,0)))
if debug : print model.output_shape
model.add(kerasCudaConvnetConv2DLayer(n_filters=32, filter_size=6 , untie_biases=True))
if debug : print model.output_shape
model.add(kerasCudaConvnetPooling2DLayer())
if debug : print model.output_shape
model.add(kerasCudaConvnetConv2DLayer(n_filters=64, filter_size=5 , untie_biases=True))
if debug : print model.output_shape
model.add(kerasCudaConvnetPooling2DLayer())
model.add(kerasCudaConvnetConv2DLayer(n_filters=128, filter_size=3 , untie_biases=True))
model.add(kerasCudaConvnetConv2DLayer(n_filters=128, filter_size=3, weights_std=0.1 , untie_biases=True ))
if debug : print model.output_shape
model.add(kerasCudaConvnetPooling2DLayer())
if debug : print model.output_shape
model.add(fPermute((3,0,1,2)))
if debug : print model.output_shape
model.add(Lambda(function=kaggle_MultiRotMergeLayer_output, output_shape=lambda x : ( x[0]//4//N_INPUT_VARIATION, (x[1]*x[2]*x[3]*4* N_INPUT_VARIATION) ) , arguments={'num_views':N_INPUT_VARIATION}) )
if debug : print model.output_shape
model.add(Dropout(0.5))
model.add(MaxoutDense(output_dim=2048, nb_feature=2 ,weights = dense_weight_init_values(model.output_shape[-1],2048, nb_feature=2) ))
model.add(Dropout(0.5))
model.add(MaxoutDense(output_dim=2048, nb_feature=2 ,weights = dense_weight_init_values(model.output_shape[-1],2048, nb_feature=2) ))
model.add(Dropout(0.5))
model.add(Dense(output_dim=37, weights = dense_weight_init_values(model.output_shape[-1],37 ,w_std = 0.01 , b_init_val = 0.1 ) ))
if debug : print model.output_shape
model_seq=model([input_tensor,input_tensor_45])
output_layer_norm = Lambda(function=OptimisedDivGalaxyOutput , output_shape=lambda x: x ,arguments={'normalised':True,'categorised':CATEGORISED})(model_seq)
output_layer_noNorm = Lambda(function=OptimisedDivGalaxyOutput , output_shape=lambda x: x ,arguments={'normalised':False,'categorised':CATEGORISED})(model_seq)
model_norm=Model(input=[input_tensor,input_tensor_45],output=output_layer_norm)
model_norm_metrics = Model(input=[input_tensor,input_tensor_45],output=output_layer_norm)
model_noNorm=Model(input=[input_tensor,input_tensor_45],output=output_layer_noNorm)
if debug : print model_norm.output_shape
if debug : print model_noNorm.output_shape
current_lr=LEARNING_RATE_SCHEDULE[0]
def lr_function(e):
global current_lr
if e in LEARNING_RATE_SCHEDULE:
_current_lr = LEARNING_RATE_SCHEDULE[e]
current_lr = _current_lr
else: _current_lr = current_lr
return _current_lr
lr_callback = LearningRateScheduler(lr_function)
if getWinSolWeights:
w_load_worked = False
for l in model_norm.layers:
if debug: print '---'
if debug: print len(l.get_weights())
l_weights = l.get_weights()
if len(l_weights)==len(w_kSorted):
if debug:
for i in range(len(l_weights)):
print type(l_weights[i])
print np.shape(l_weights[i])
if not np.shape(l_weights[i]) == np.shape(w_kSorted[i]): "somethings wrong with the loaded weight shapes"
l.set_weights(w_kSorted)
w_load_worked = True
if not w_load_worked: print "no matching weight length were found"
model_norm.compile(loss='mean_squared_error', optimizer=SGD(lr=LEARNING_RATE_SCHEDULE[0], momentum=MOMENTUM, nesterov=True) )#, metrics=[rmse, 'categorical_accuracy',kaggle_sliced_accuracy])
model_noNorm.compile(loss='mean_squared_error', optimizer=SGD(lr=LEARNING_RATE_SCHEDULE[0], momentum=MOMENTUM, nesterov=True))
model_norm_metrics.compile(loss='mean_squared_error', optimizer=SGD(lr=LEARNING_RATE_SCHEDULE[0], momentum=MOMENTUM, nesterov=True), metrics=[rmse, 'categorical_accuracy',kaggle_sliced_accuracy])
#adam = Adam(lr=LEARNING_RATE_SCHEDULE[0], beta_1=0.9, beta_2=0.999, epsilon=1e-07, decay=0.0)
#model_norm.compile(loss='mean_squared_error', optimizer=adam, metrics=['categorical_accuracy',kaggle_sliced_accuracy])
#model_noNorm.compile(loss='mean_squared_error', optimizer=adam, metrics=['categorical_accuracy'])
model_norm.summary()
if continueAnalysis:
print "Load model weights"
model_norm.load_weights(WEIGHTS_PATH)
WEIGHTS_PATH = ((WEIGHTS_PATH.split('.',1)[0]+'_next.h5'))
print "Set up data loading"
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=N_TRAIN/BATCH_SIZE, chunk_size=BATCH_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
#train_gen = post_augmented_data_gen
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE) #augmentation buffering will not work with the keras .fit, works with fit_generator
input_gen = input_generator(train_gen)
'''
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=N_TRAIN, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
'''
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=N_TRAIN, target_sizes=input_sizes)
return data_gen_valid #load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
'''
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=N_TRAIN, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
'''
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
t_val=(time.time() - start_time)
print " took %.2f seconds" % (t_val)
if debug: print("Free GPU Mem before first step %s MiB " % (sbcuda.cuda_ndarray.cuda_ndarray.mem_info()[0]/1024./1024.))
print ''
print "losses without training on validation sample up front"
evalHistdic = {}
for n in model_norm_metrics.metrics_names:
evalHistdic[n]=[]
evalHist = model_norm_metrics.evaluate(x=[xs_valid[0],xs_valid[1]] , y=y_valid,batch_size=BATCH_SIZE, verbose=1)
for i in range(len(model_norm_metrics.metrics_names)):
print " %s : %.3f" %(model_norm_metrics.metrics_names[i],evalHist[i])
evalHistdic[model_norm_metrics.metrics_names[i]].append(evalHist[i])
if debug: print("Free GPU Mem after validation check %s MiB " % (sbcuda.cuda_ndarray.cuda_ndarray.mem_info()[0]/1024./1024.))
print ''
print "load and augment data, ETA %.f s" % (t_val*N_TRAIN/num_valid*2)
start_time = time.time()
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
l0_input_var = xs_chunk[0]
l0_45_input_var = xs_chunk[1]
l6_target_var = y_chunk
print " took %.2f seconds" % (time.time() - start_time)
print ''
print "Train %s epoch without norm" % NUM_EPOCHS_NONORM
no_norm_events = int(NUM_EPOCHS_NONORM*N_TRAIN)
hist = model_noNorm.fit( x=[l0_input_var[:no_norm_events],l0_45_input_var[:no_norm_events]] , y=l6_target_var[:no_norm_events],validation_data=([xs_valid[0],xs_valid[1]],y_valid),batch_size=BATCH_SIZE, nb_epoch=1, verbose=1, callbacks=[lr_callback] ) #loss is squared!!!
hists=hist.history
if debug: print("\nFree GPU Mem before train loop %s MiB " % (sbcuda.cuda_ndarray.cuda_ndarray.mem_info()[0]/1024./1024.))
epochs_run = 0
epoch_togo =EPOCHS
for i in range(EPOCHS/VALIDATE_EVERY if not EPOCHS%VALIDATE_EVERY else EPOCHS/VALIDATE_EVERY+1 ):
print ''
print "epochs run: %s - epochs to go: %s " % (epochs_run,epoch_togo)
hist = model_norm.fit( x=[l0_input_var,l0_45_input_var] , y=l6_target_var, validation_data=([xs_valid[0],xs_valid[1]],y_valid) ,batch_size=BATCH_SIZE, nb_epoch=np.min([epoch_togo,VALIDATE_EVERY])+epochs_run, initial_epoch=epochs_run, verbose=1, callbacks=[lr_callback] )
for k in hists:
hists[k]+=hist.history[k]
epoch_togo-=np.min([epoch_togo,VALIDATE_EVERY])
epochs_run+=np.min([epoch_togo,VALIDATE_EVERY])
print ''
print 'validate:'
evalHist = model_norm_metrics.evaluate(x=[xs_valid[0],xs_valid[1]] , y=y_valid,batch_size=BATCH_SIZE, verbose=1)
for i in range(len(model_norm_metrics.metrics_names)):
print " %s : %.3f" %(model_norm_metrics.metrics_names[i],evalHist[i])
evalHistdic[model_norm_metrics.metrics_names[i]].append(evalHist[i])
if saveAtEveryValidation:
print "saving weights"
model_norm.save_weights(WEIGHTS_PATH)
elif ((i+1)==(EPOCHS/VALIDATE_EVERY if not EPOCHS%VALIDATE_EVERY else EPOCHS/VALIDATE_EVERY+1)):
print "saving weights"
model_norm.save_weights(WEIGHTS_PATH)
if (i == 0) and debug: print("\nFree GPU Mem in train loop %s MiB " % (sbcuda.cuda_ndarray.cuda_ndarray.mem_info()[0]/1024./1024.))
import json
with open(TRAIN_LOSS_SF_PATH, 'a')as f:
f.write("#eval losses and metrics:\n")
f.write(json.dumps(evalHistdic))
f.write("\n")
f.write("#fit losses:\n")
f.write(json.dumps(hists))
f.write("\n")
print "Done!"
exit()
| bsd-3-clause |
andaag/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
adelomana/schema | conditionedFitness/figureEngineered/script.1.2.py | 2 | 2568 | import numpy,sys,scipy,pickle
import matplotlib.pyplot
sys.path.append('../lib')
import calculateStatistics
### MAIN
matplotlib.rcParams.update({'font.size':36,'font.family':'Times New Roman','xtick.labelsize':28,'ytick.labelsize':28})
thePointSize=12
jarDir='/Users/adriandelomana/scratch/'
# engineered 1.2
xSignal=numpy.array([[216,212,180,196,190],[49,50,59,60,61]])
xNoSignal=numpy.array([[195,186,164,228,159],[87,76,80,94,92]])
cf_mu_0, cf_sd_0, pvalue_0 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[152,147,102,110,127,],[104,73,92,76,89]])
xNoSignal=numpy.array([[126,155,145,126,145],[72,60,61,61,67]])
cf_mu_50, cf_sd_50, pvalue_50 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[116,124,87],[123,114,134]])
xNoSignal=numpy.array([[148,142,144],[125,94,104]])
cf_mu_190, cf_sd_190, pvalue_190 = calculateStatistics.main(xSignal, xNoSignal)
xSignal=numpy.array([[116,105,88,83,78],[96,126,135,118,118]])
xNoSignal=numpy.array([[79,81,101,92,114],[147,132,111,112,109]])
cf_mu_300, cf_sd_300, pvalue_300 = calculateStatistics.main(xSignal, xNoSignal)
x = [0, 50, 190, 300]
y = [cf_mu_0, cf_mu_50, cf_mu_190, cf_mu_300]
z = [cf_sd_0, cf_sd_50, cf_sd_190, cf_sd_300]
w = [pvalue_0, pvalue_50, pvalue_190, pvalue_300]
print(y)
matplotlib.pyplot.errorbar(x,y,yerr=z,fmt=':o',color='blue',ecolor='blue',markeredgecolor='blue',capsize=0,ms=thePointSize,mew=0)
for i in range(len(w)):
if y[i] > 0.:
sp=y[i]+z[i]+0.02
else:
sp=y[i]-z[i]-0.02
if w[i] < 0.05 and w[i] >= 0.01:
matplotlib.pyplot.scatter(x[i], sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
if w[i] < 0.01:
matplotlib.pyplot.scatter(x[i]-3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.scatter(x[i]+3, sp, s=75, c='black', marker=r"${*}$", edgecolors='none')
matplotlib.pyplot.plot([0,300],[0,0],'--',color='black')
sequencing=[0,50,300]
top=-0.4
for xpos in sequencing:
matplotlib.pyplot.scatter(xpos, top, s=400, c='black', marker=r"x", edgecolors='none')
matplotlib.pyplot.xlim([-25,325])
matplotlib.pyplot.ylim([-0.55,0.55])
matplotlib.pyplot.yticks([-0.4,-0.2,0,0.2,0.4])
matplotlib.pyplot.xlabel('Generation')
matplotlib.pyplot.ylabel('Conditioned Fitness')
matplotlib.pyplot.tight_layout(pad=0.5)
matplotlib.pyplot.savefig('figure.engineered.1.2.pdf')
matplotlib.pyplot.clf()
# save processed data alternative plotting
trajectory=[x,y,z]
jarFile=jarDir+'engineered.1.2.pickle'
f=open(jarFile,'wb')
pickle.dump(trajectory,f)
f.close()
| gpl-3.0 |
zaxtax/scikit-learn | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
myselfHimanshu/UdacityDSWork | Intro-To-Data-Science/Lesson2/PS2_1.py | 2 | 1480 | import pandas
import pandasql
def num_rainy_days(filename):
'''
This function should run a SQL query on a dataframe of
weather data. The SQL query should return one column and
one row - a count of the number of days in the dataframe where
the rain column is equal to 1 (i.e., the number of days it
rained). The dataframe will be titled 'weather_data'. You'll
need to provide the SQL query. You might find SQL's count function
useful for this exercise. You can read more about it here:
https://dev.mysql.com/doc/refman/5.1/en/counting-rows.html
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be useful to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
You can see the weather data that we are passing in below:
https://www.dropbox.com/s/7sf0yqc9ykpq3w8/weather_underground.csv
'''
weather_data = pandas.read_csv(filename)
#print weather_data['rain'].head()
q = """
SELECT sum(rain) from weather_data
"""
#Execute your SQL command against the pandas frame
rainy_days = pandasql.sqldf(q.lower(), locals())
return rainy_days
| gpl-2.0 |
abretaud/tools-iuc | tools/table_compute/scripts/table_compute.py | 10 | 14161 | #!/usr/bin/env python3
"""
Table Compute tool - a wrapper around pandas with parameter input validation.
"""
__version__ = "0.9.2"
import csv
import math
from sys import argv
import numpy as np
import pandas as pd
from safety import Safety
if len(argv) == 2 and argv[1] == "--version":
print(__version__)
exit(-1)
# The import below should be generated in the same directory as
# the table_compute.py script.
# It is placed here so that the --version switch does not fail
import userconfig as uc # noqa: I100,I202
class Utils:
@staticmethod
def getOneValueMathOp(op_name):
"Returns a simple one value math operator such as log, sqrt, etc"
return getattr(math, op_name)
@staticmethod
def getVectorPandaOp(op_name):
"Returns a valid DataFrame vector operator"
return getattr(pd.DataFrame, op_name)
@staticmethod
def getTwoValuePandaOp(op_name, pd_obj):
"Returns a valid two value DataFrame or Series operator"
return getattr(type(pd_obj), "__" + op_name + "__")
@staticmethod
def readcsv(filedict, narm):
data = pd.read_csv(
filedict["file"],
header=filedict["header"],
index_col=filedict["row_names"],
keep_default_na=narm,
nrows=filedict["nrows"],
skipfooter=filedict["skipfooter"],
skip_blank_lines=filedict["skip_blank_lines"],
sep='\t'
)
# Fix whitespace issues in index or column names
data.columns = [col.strip() if type(col) is str else col
for col in data.columns]
data.index = [row.strip() if type(row) is str else row
for row in data.index]
return(data)
@staticmethod
def rangemaker(tab):
# e.g. "1:3,2:-2" specifies "1,2,3,2,1,0,-1,-2" to give [0,1,2,1,0,-1,-2]
# Positive indices are decremented by 1 to reference 0-base numbering
# Negative indices are unaltered, so that -1 refers to the last column
out = []
err_mess = None
for ranges in tab.split(","):
nums = ranges.split(":")
if len(nums) == 1:
numb = int(nums[0])
# Positive numbers get decremented.
# i.e. column "3" refers to index 2
# column "-1" still refers to index -1
if numb != 0:
out.append(numb if (numb < 0) else (numb - 1))
else:
err_mess = "Please do not use 0 as an index"
elif len(nums) == 2:
left, right = map(int, nums)
if 0 in (left, right):
err_mess = "Please do not use 0 as an index"
elif left < right:
if left > 0: # and right > 0 too
# 1:3 to 0,1,2
out.extend(range(left - 1, right))
elif right < 0: # and left < 0 too
# -3:-1 to -3,-2,-1
out.extend(range(left, right + 1))
elif left < 0 and right > 0:
# -2:2 to -2,-1,0,1
out.extend(range(left, 0))
out.extend(range(0, right))
elif right < left:
if right > 0: # and left > 0
# 3:1 to 2,1,0
out.extend(range(left - 1, right - 2, -1))
elif left < 0: # and right < 0
# -1:-3 to -1,-2,-3
out.extend(range(left, right - 1, -1))
elif right < 0 and left > 0:
# 2:-2 to 1,0,-1,-2
out.extend(range(left - 1, right - 1, -1))
else:
err_mess = "%s should not be equal or contain a zero" % nums
if err_mess:
print(err_mess)
return(None)
return(out)
# Set decimal precision
pd.options.display.precision = uc.Default["precision"]
user_mode = uc.Default["user_mode"]
user_mode_single = None
out_table = None
params = uc.Data["params"]
if user_mode == "single":
# Read in TSV file
data = Utils.readcsv(uc.Data["tables"][0], uc.Default["narm"])
user_mode_single = params["user_mode_single"]
if user_mode_single == "precision":
# Useful for changing decimal precision on write out
out_table = data
elif user_mode_single == "select":
cols_specified = params["select_cols_wanted"]
rows_specified = params["select_rows_wanted"]
# Select all indexes if empty array of values
if cols_specified:
cols_specified = Utils.rangemaker(cols_specified)
else:
cols_specified = range(len(data.columns))
if rows_specified:
rows_specified = Utils.rangemaker(rows_specified)
else:
rows_specified = range(len(data))
# do not use duplicate indexes
# e.g. [2,3,2,5,5,4,2] to [2,3,5,4]
nodupes_col = not params["select_cols_unique"]
nodupes_row = not params["select_rows_unique"]
if nodupes_col:
cols_specified = [x for i, x in enumerate(cols_specified)
if x not in cols_specified[:i]]
if nodupes_row:
rows_specified = [x for i, x in enumerate(rows_specified)
if x not in rows_specified[:i]]
out_table = data.iloc[rows_specified, cols_specified]
elif user_mode_single == "filtersumval":
mode = params["filtersumval_mode"]
axis = params["filtersumval_axis"]
operation = params["filtersumval_op"]
compare_operation = params["filtersumval_compare"]
value = params["filtersumval_against"]
minmatch = params["filtersumval_minmatch"]
if mode == "operation":
# Perform axis operation
summary_op = Utils.getVectorPandaOp(operation)
axis_summary = summary_op(data, axis=axis)
# Perform vector comparison
compare_op = Utils.getTwoValuePandaOp(
compare_operation, axis_summary
)
axis_bool = compare_op(axis_summary, value)
elif mode == "element":
if operation.startswith("str_"):
data = data.astype("str")
value = str(value)
# Convert str_eq to eq
operation = operation[4:]
else:
value = float(value)
op = Utils.getTwoValuePandaOp(operation, data)
bool_mat = op(data, value)
axis_bool = np.sum(bool_mat, axis=axis) >= minmatch
out_table = data.loc[:, axis_bool] if axis == 0 else data.loc[axis_bool, :]
elif user_mode_single == "matrixapply":
# 0 - column, 1 - row
axis = params["matrixapply_dimension"]
# sd, mean, max, min, sum, median, summary
operation = params["matrixapply_op"]
if operation is None:
use_custom = params["matrixapply_custom"]
if use_custom:
custom_func = params["matrixapply_custom_func"]
def fun(vec):
"""Dummy Function"""
return vec
ss = Safety(custom_func, ['vec'], 'pd.Series')
fun_string = ss.generateFunction()
exec(fun_string) # SUPER DUPER SAFE...
out_table = data.apply(fun, axis)
else:
print("No operation given")
exit(-1)
else:
op = getattr(pd.DataFrame, operation)
out_table = op(data, axis)
elif user_mode_single == "element":
# lt, gt, ge, etc.
operation = params["element_op"]
bool_mat = None
if operation is not None:
if operation == "rowcol":
# Select all indexes if empty array of values
if "element_cols" in params:
cols_specified = Utils.rangemaker(params["element_cols"])
else:
cols_specified = range(len(data.columns))
if "element_rows" in params:
rows_specified = Utils.rangemaker(params["element_rows"])
else:
rows_specified = range(len(data))
# Inclusive selection:
# - True: Giving a row or column will match all elements in that row or column
# - False: Give a row or column will match only elements in both those rows or columns
inclusive = params["element_inclusive"]
# Create a bool matrix (intialised to False) with selected
# rows and columns set to True
bool_mat = data.copy()
bool_mat[:] = False
if inclusive:
bool_mat.iloc[rows_specified, :] = True
bool_mat.iloc[:, cols_specified] = True
else:
bool_mat.iloc[rows_specified, cols_specified] = True
else:
op = Utils.getTwoValuePandaOp(operation, data)
value = params["element_value"]
try:
# Could be numeric
value = float(value)
except ValueError:
pass
# generate filter matrix of True/False values
bool_mat = op(data, value)
else:
# implement no filtering through a filter matrix filled with
# True values.
bool_mat = np.full(data.shape, True)
# Get the main processing mode
mode = params["element_mode"]
if mode == "replace":
replacement_val = params["element_replace"]
out_table = data.mask(
bool_mat,
data.where(bool_mat).applymap(
lambda x: replacement_val.format(elem=x)
)
)
elif mode == "modify":
mod_op = Utils.getOneValueMathOp(params["element_modify_op"])
out_table = data.mask(
bool_mat, data.where(bool_mat).applymap(mod_op)
)
elif mode == "scale":
scale_op = Utils.getTwoValuePandaOp(
params["element_scale_op"], data
)
scale_value = params["element_scale_value"]
out_table = data.mask(
bool_mat, scale_op(data.where(bool_mat), scale_value)
)
elif mode == "custom":
element_customop = params["element_customop"]
def fun(elem):
"""Dummy Function"""
return elem
ss = Safety(element_customop, ['elem'])
fun_string = ss.generateFunction()
exec(fun_string) # SUPER DUPER SAFE...
out_table = data.mask(
bool_mat, data.where(bool_mat).applymap(fun)
)
else:
print("No such element mode!", mode)
exit(-1)
elif user_mode_single == "fulltable":
general_mode = params["mode"]
if general_mode == "transpose":
out_table = data.T
elif general_mode == "melt":
melt_ids = params["MELT"]["melt_ids"]
melt_values = params["MELT"]["melt_values"]
out_table = pd.melt(data, id_vars=melt_ids, value_vars=melt_values)
elif general_mode == "pivot":
pivot_index = params["PIVOT"]["pivot_index"]
pivot_column = params["PIVOT"]["pivot_column"]
pivot_values = params["PIVOT"]["pivot_values"]
out_table = data.pivot(
index=pivot_index, columns=pivot_column, values=pivot_values
)
elif general_mode == "custom":
custom_func = params["fulltable_customop"]
def fun(tableau):
"""Dummy Function"""
return tableau
ss = Safety(custom_func, ['table'], 'pd.DataFrame')
fun_string = ss.generateFunction()
exec(fun_string) # SUPER DUPER SAFE...
out_table = fun(data)
else:
print("No such mode!", user_mode_single)
exit(-1)
elif user_mode == "multiple":
table_sections = uc.Data["tables"]
if not table_sections:
print("Multiple table sets not given!")
exit(-1)
reader_skip = uc.Default["reader_skip"]
# Data
table = []
# 1-based handlers for users "table1", "table2", etc.
table_names = []
# Actual 0-based references "table[0]", "table[1]", etc.
table_names_real = []
# Read and populate tables
for x, t_sect in enumerate(table_sections):
tmp = Utils.readcsv(t_sect, uc.Default["narm"])
table.append(tmp)
table_names.append("table" + str(x + 1))
table_names_real.append("table[" + str(x) + "]")
custom_op = params["fulltable_customop"]
ss = Safety(custom_op, table_names, 'pd.DataFrame')
fun_string = ss.generateFunction()
# Change the argument to table
fun_string = fun_string.replace("fun(table1):", "fun():")
# table1 to table[1]
for name, name_real in zip(table_names, table_names_real):
fun_string = fun_string.replace(name, name_real)
fun_string = fun_string.replace("fun():", "fun(table):")
exec(fun_string) # SUPER DUPER SAFE...
out_table = fun(table)
else:
print("No such mode!", user_mode)
exit(-1)
if not isinstance(out_table, (pd.DataFrame, pd.Series)):
print('The specified operation did not result in a table to return.')
raise RuntimeError(
'The operation did not generate a pd.DataFrame or pd.Series to return.'
)
out_parameters = {
"sep": "\t",
"float_format": "%%.%df" % pd.options.display.precision,
"header": uc.Default["out_headers_col"],
"index": uc.Default["out_headers_row"]
}
if user_mode_single not in ('matrixapply', None):
out_parameters["quoting"] = csv.QUOTE_NONE
out_table.to_csv(uc.Default["outtable"], **out_parameters)
| mit |
shaman-tech/Hacker-Hostel-2017 | reader.py | 1 | 1943 | import pandas as pd
import numpy as np
import sqlite3
def create_sql_command(row_entry):
index = ""
for column_value in row_entry:
if index == "":
if type(column_value) == str:
index = "'%s'"%(column_value)
else:
index = str(column_value)
else:
if type(column_value) == str:
index = index + ", " + "'%s'"%(column_value)
else:
index = index + ", " + str(column_value)
return str(index)
def change_float_int(df_column):
for index,value in enumerate(df_column.values):
df_column[index] = int(value)
return df_column
def populate_product_num(df):
supplier_columns = ['PONUM','PODate','SUPPLIER','ShippingLine','ETAJamaica','ETATGG','AADTGG','DateOffloaded','Dateleft']
for index, prod_num in enumerate(df['PONUM']):
if prod_num == '?' and df.loc[index,'SUPPLIER'] == "?":
for column in supplier_columns:
df.loc[index,column] = df.loc[index-1,column]
elif df.loc[index,'SUPPLIER'] == "?" and prod_num != "?":
for column in supplier_columns[2:]:
df.loc[index,column] = df.loc[index-1,column]
elif prod_num == "?":
for column in supplier_columns:
df.loc[index,column] = df.loc[index-1,column]
return df
# cn = sqlite3.connect('db.sqlite')
# curs = cn.cursor()
fields = "PONUM,PODate,SUPPLIER,ProductDescription,Qty,SizeContainer,ContainerNUM,ShippingLine,ETAJamaica,ETATGG,AADTGG,DateOffloaded,Dateleft"#create_sql_command(column_names)
df = pd.read_csv('TGG_Merchandise.csv',skiprows=5, names=fields.split(","))
df = df.fillna(0)
df['PONUM']= df['PONUM'].astype(int)
column_names = list(df.columns)
column_values = df.values
df = df.replace(0, '?')
df = df.replace("","?")
df = populate_product_num(df)
df.to_csv('sample.csv')
# for row in column_values:
# create_table_command = "{} TGG_MERCHANDISE ({}) {} ({})".format('INSERT INTO',fields,'VALUES',create_sql_command(row))
# curs.execute((create_table_command))
# for name in column_names:
# print(name.split())
| mit |
voxlol/scikit-learn | sklearn/linear_model/least_angle.py | 42 | 49357 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True):
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
joernhees/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 63 | 2945 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from itertools import cycle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positive elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
colors = cycle(['b', 'r', 'g', 'c', 'k'])
neg_log_alphas_lasso = -np.log10(alphas_lasso)
neg_log_alphas_enet = -np.log10(alphas_enet)
for coef_l, coef_e, c in zip(coefs_lasso, coefs_enet, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
neg_log_alphas_positive_lasso = -np.log10(alphas_positive_lasso)
for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_positive_lasso, coef_pl, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet)
for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors):
l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c)
l2 = plt.plot(neg_log_alphas_positive_enet, coef_pe, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
saquiba2/numpy2 | numpy/core/tests/test_multiarray.py | 4 | 224802 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import ctypes
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
if i < len(inputs):
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
if ('out' in kw) and (kw['out'] is not None):
kw['out'] = np.asarray(kw['out'])
r = func(*inputs, **kw)
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_numpy_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
class CheckIndex(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return i
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), 1)
assert_equal(np.sin(dummy, out=a), 1)
assert_equal(np.sin(dummy, out=(a,)), 1)
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), 1)
assert_equal(np.modf(dummy, None, a), 2)
assert_equal(np.modf(dummy, dummy, a), 2)
assert_equal(np.modf(dummy, out=a), 1)
assert_equal(np.modf(dummy, out=(a,)), 1)
assert_equal(np.modf(dummy, out=(a, None)), 1)
assert_equal(np.modf(dummy, out=(a, dummy)), 1)
assert_equal(np.modf(dummy, out=(None, a)), 2)
assert_equal(np.modf(dummy, out=(dummy, a)), 2)
assert_equal(np.modf(a, out=(dummy, a)), 0)
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), 2)
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), 2)
assert_equal(np.add(dummy, dummy, out=(a,)), 2)
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# regression test for github bug 4753
class OutClass(np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unbuffered_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise io.IOError('Can not tell or seek')
f = io.open(self.filename, 'rb', buffering=0)
f.seek = fail
f.tell = fail
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_assign():
dtype = np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_assign)
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
def test_multiple_field_name_unicode(self):
def test_assign_unicode():
dt = np.dtype([("\u20B9", "f8"),
("B", "f8"),
("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_assign_unicode)
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(B, C), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
Carmezim/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 62 | 3753 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
daynebatten/keras-wtte-rnn | wtte-rnn.py | 1 | 5661 | import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Activation
from keras.layers import Masking
from keras.optimizers import RMSprop
from keras import backend as k
from sklearn.preprocessing import normalize
"""
Discrete log-likelihood for Weibull hazard function on censored survival data
y_true is a (samples, 2) tensor containing time-to-event (y), and an event indicator (u)
ab_pred is a (samples, 2) tensor containing predicted Weibull alpha (a) and beta (b) parameters
For math, see https://ragulpr.github.io/assets/draft_master_thesis_martinsson_egil_wtte_rnn_2016.pdf (Page 35)
"""
def weibull_loglik_discrete(y_true, ab_pred, name=None):
y_ = y_true[:, 0]
u_ = y_true[:, 1]
a_ = ab_pred[:, 0]
b_ = ab_pred[:, 1]
hazard0 = k.pow((y_ + 1e-35) / a_, b_)
hazard1 = k.pow((y_ + 1) / a_, b_)
return -1 * k.mean(u_ * k.log(k.exp(hazard1 - hazard0) - 1.0) - hazard1)
"""
Not used for this model, but included in case somebody needs it
For math, see https://ragulpr.github.io/assets/draft_master_thesis_martinsson_egil_wtte_rnn_2016.pdf (Page 35)
"""
def weibull_loglik_continuous(y_true, ab_pred, name=None):
y_ = y_true[:, 0]
u_ = y_true[:, 1]
a_ = ab_pred[:, 0]
b_ = ab_pred[:, 1]
ya = (y_ + 1e-35) / a_
return -1 * k.mean(u_ * (k.log(b_) + b_ * k.log(ya)) - k.pow(ya, b_))
"""
Custom Keras activation function, outputs alpha neuron using exponentiation and beta using softplus
"""
def activate(ab):
a = k.exp(ab[:, 0])
b = k.softplus(ab[:, 1])
a = k.reshape(a, (k.shape(a)[0], 1))
b = k.reshape(b, (k.shape(b)[0], 1))
return k.concatenate((a, b), axis=1)
"""
Load and parse engine data files into:
- an (engine/day, observed history, sensor readings) x tensor, where observed history is 100 days, zero-padded
for days that don't have a full 100 days of observed history (e.g., first observed day for an engine)
- an (engine/day, 2) tensor containing time-to-event and 1 (since all engines failed)
There are probably MUCH better ways of doing this, but I don't use Numpy that much, and the data parsing isn't the
point of this demo anyway.
"""
def load_file(name):
with open(name, 'r') as file:
return np.loadtxt(file, delimiter=',')
np.set_printoptions(suppress=True, threshold=10000)
train = load_file('train.csv')
test_x = load_file('test_x.csv')
test_y = load_file('test_y.csv')
# Combine the X values to normalize them, then split them back out
all_x = np.concatenate((train[:, 2:26], test_x[:, 2:26]))
all_x = normalize(all_x, axis=0)
train[:, 2:26] = all_x[0:train.shape[0], :]
test_x[:, 2:26] = all_x[train.shape[0]:, :]
# Make engine numbers and days zero-indexed, for everybody's sanity
train[:, 0:2] -= 1
test_x[:, 0:2] -= 1
# Configurable observation look-back period for each engine/day
max_time = 100
def build_data(engine, time, x, max_time, is_test):
# y[0] will be days remaining, y[1] will be event indicator, always 1 for this data
out_y = np.empty((0, 2), dtype=np.float32)
# A full history of sensor readings to date for each x
out_x = np.empty((0, max_time, 24), dtype=np.float32)
for i in range(100):
print("Engine: " + str(i))
# When did the engine fail? (Last day + 1 for train data, irrelevant for test.)
max_engine_time = int(np.max(time[engine == i])) + 1
if is_test:
start = max_engine_time - 1
else:
start = 0
this_x = np.empty((0, max_time, 24), dtype=np.float32)
for j in range(start, max_engine_time):
engine_x = x[engine == i]
out_y = np.append(out_y, np.array((max_engine_time - j, 1), ndmin=2), axis=0)
xtemp = np.zeros((1, max_time, 24))
xtemp[:, max_time-min(j, 99)-1:max_time, :] = engine_x[max(0, j-max_time+1):j+1, :]
this_x = np.concatenate((this_x, xtemp))
out_x = np.concatenate((out_x, this_x))
return out_x, out_y
train_x, train_y = build_data(train[:, 0], train[:, 1], train[:, 2:26], max_time, False)
test_x = build_data(test_x[:, 0], test_x[:, 1], test_x[:, 2:26], max_time, True)[0]
train_u = np.zeros((100, 1), dtype=np.float32)
train_u += 1
test_y = np.append(np.reshape(test_y, (100, 1)), train_u, axis=1)
"""
Here's the rest of the meat of the demo... actually fitting and training the model.
We'll also make some test predictions so we can evaluate model performance.
"""
# Start building our model
model = Sequential()
# Mask parts of the lookback period that are all zeros (i.e., unobserved) so they don't skew the model
model.add(Masking(mask_value=0., input_shape=(max_time, 24)))
# LSTM is just a common type of RNN. You could also try anything else (e.g., GRU).
model.add(LSTM(20, input_dim=24))
# We need 2 neurons to output Alpha and Beta parameters for our Weibull distribution
model.add(Dense(2))
# Apply the custom activation function mentioned above
model.add(Activation(activate))
# Use the discrete log-likelihood for Weibull survival data as our loss function
model.compile(loss=weibull_loglik_discrete, optimizer=RMSprop(lr=.001))
# Fit!
model.fit(train_x, train_y, nb_epoch=250, batch_size=2000, verbose=2, validation_data=(test_x, test_y))
# Make some predictions and put them alongside the real TTE and event indicator values
test_predict = model.predict(test_x)
test_predict = np.resize(test_predict, (100, 2))
test_result = np.concatenate((test_y, test_predict), axis=1)
# TTE, Event Indicator, Alpha, Beta
print(test_result)
| mit |
IndraVikas/scikit-learn | sklearn/learning_curve.py | 110 | 13467 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
cactusbin/nyt | matplotlib/lib/matplotlib/pyplot.py | 4 | 114926 | # Note: The first part of this file can be modified in place, but the latter
# part is autogenerated by the boilerplate.py script.
"""
Provides a MATLAB-like plotting framework.
:mod:`~matplotlib.pylab` combines pyplot with numpy into a single namespace.
This is convenient for interactive work, but for programming it
is recommended that the namespaces be kept separate, e.g.::
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 5, 0.1);
y = np.sin(x)
plt.plot(x, y)
"""
from __future__ import print_function
import sys
import warnings
import matplotlib
import matplotlib.colorbar
from matplotlib import _pylab_helpers, interactive
from matplotlib.cbook import dedent, silent_list, is_string_like, is_numlike
from matplotlib import docstring
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.figure import Figure, figaspect
from matplotlib.gridspec import GridSpec
from matplotlib.image import imread as _imread
from matplotlib.image import imsave as _imsave
from matplotlib import rcParams, rcParamsDefault, get_backend
from matplotlib import rc_context
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import getp, get, Artist
from matplotlib.artist import setp as _setp
from matplotlib.axes import Axes, Subplot, _string_to_bool
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for csv2rec, detrend_none, window_hanning
from matplotlib.scale import get_scale_docs, get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap, register_cmap
import numpy as np
# We may not need the following imports here:
from matplotlib.colors import Normalize
from matplotlib.colors import normalize # for backwards compat.
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
FuncFormatter, FormatStrFormatter, ScalarFormatter,\
LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
Locator, IndexLocator, FixedLocator, NullLocator,\
LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
MaxNLocator
## Backend detection ##
def _backend_selection():
""" If rcParams['backend_fallback'] is true, check to see if the
current backend is compatible with the current running event
loop, and if not switches to a compatible one.
"""
backend = rcParams['backend']
if not rcParams['backend_fallback'] or \
backend not in _interactive_bk:
return
is_agg_backend = rcParams['backend'].endswith('Agg')
if 'wx' in sys.modules and not backend in ('WX', 'WXAgg'):
import wx
if wx.App.IsMainLoopRunning():
rcParams['backend'] = 'wx' + 'Agg' * is_agg_backend
elif 'PyQt4.QtCore' in sys.modules and not backend == 'Qt4Agg':
import PyQt4.QtGui
if not PyQt4.QtGui.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt4Agg'
elif 'gtk' in sys.modules and not backend in ('GTK', 'GTKAgg',
'GTKCairo'):
import gobject
if gobject.MainLoop().is_running():
rcParams['backend'] = 'gtk' + 'Agg' * is_agg_backend
elif 'Tkinter' in sys.modules and not backend == 'TkAgg':
# import Tkinter
pass # what if anything do we need to do for tkinter?
_backend_selection()
## Global ##
from matplotlib.backends import pylab_setup
_backend_mod, new_figure_manager, draw_if_interactive, _show = pylab_setup()
@docstring.copy_dedent(Artist.findobj)
def findobj(o=None, match=None, include_self=True):
if o is None:
o = gcf()
return o.findobj(match, include_self=include_self)
def switch_backend(newbackend):
"""
Switch the default backend. This feature is **experimental**, and
is only expected to work switching to an image backend. e.g., if
you have a bunch of PostScript scripts that you want to run from
an interactive ipython session, you may want to switch to the PS
backend before running them to avoid having a bunch of GUI windows
popup. If you try to interactively switch from one GUI backend to
another, you will explode.
Calling this command will close all open windows.
"""
close('all')
global _backend_mod, new_figure_manager, draw_if_interactive, _show
matplotlib.use(newbackend, warn=False, force=True)
from matplotlib.backends import pylab_setup
_backend_mod, new_figure_manager, draw_if_interactive, _show = pylab_setup()
def show(*args, **kw):
"""
Display a figure.
When running in ipython with its pylab mode, display all
figures and return to the ipython prompt.
In non-interactive mode, display all figures and block until
the figures have been closed; in interactive mode it has no
effect unless figures were created prior to a change from
non-interactive to interactive mode (not recommended). In
that case it displays the figures but does not block.
A single experimental keyword argument, *block*, may be
set to True or False to override the blocking behavior
described above.
"""
global _show
_show(*args, **kw)
def isinteractive():
"""
Return status of interactive mode.
"""
return matplotlib.is_interactive()
def ioff():
'Turn interactive mode off.'
matplotlib.interactive(False)
def ion():
'Turn interactive mode on.'
matplotlib.interactive(True)
def pause(interval):
"""
Pause for *interval* seconds.
If there is an active figure it will be updated and displayed,
and the GUI event loop will run during the pause.
If there is no active figure, or if a non-interactive backend
is in use, this executes time.sleep(interval).
This can be used for crude animation. For more complex
animation, see :mod:`matplotlib.animation`.
This function is experimental; its behavior may be changed
or extended in a future release.
"""
backend = rcParams['backend']
if backend in _interactive_bk:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
canvas = figManager.canvas
canvas.draw()
show(block=False)
canvas.start_event_loop(interval)
return
# No on-screen figure is active, so sleep() is all we need.
import time
time.sleep(interval)
@docstring.copy_dedent(matplotlib.rc)
def rc(*args, **kwargs):
matplotlib.rc(*args, **kwargs)
@docstring.copy_dedent(matplotlib.rc_context)
def rc_context(rc=None, fname=None):
return matplotlib.rc_context(rc, fname)
@docstring.copy_dedent(matplotlib.rcdefaults)
def rcdefaults():
matplotlib.rcdefaults()
draw_if_interactive()
# The current "image" (ScalarMappable) is retrieved or set
# only via the pyplot interface using the following two
# functions:
def gci():
"""
Get the current colorable artist. Specifically, returns the
current :class:`~matplotlib.cm.ScalarMappable` instance (image or
patch collection), or *None* if no images or patch collections
have been defined. The commands :func:`~matplotlib.pyplot.imshow`
and :func:`~matplotlib.pyplot.figimage` create
:class:`~matplotlib.image.Image` instances, and the commands
:func:`~matplotlib.pyplot.pcolor` and
:func:`~matplotlib.pyplot.scatter` create
:class:`~matplotlib.collections.Collection` instances. The
current image is an attribute of the current axes, or the nearest
earlier axes in the current figure that contains an image.
"""
return gcf()._gci()
def sci(im):
"""
Set the current image. This image will be the target of colormap
commands like :func:`~matplotlib.pyplot.jet`,
:func:`~matplotlib.pyplot.hot` or
:func:`~matplotlib.pyplot.clim`). The current image is an
attribute of the current axes.
"""
gca()._sci(im)
## Any Artist ##
# (getp is simply imported)
@docstring.copy(_setp)
def setp(*args, **kwargs):
ret = _setp(*args, **kwargs)
draw_if_interactive()
return ret
def xkcd(scale=1, length=100, randomness=2):
"""
Turns on `xkcd <http://xkcd.com/>`_ sketch-style drawing mode.
This will only have effect on things drawn after this function is
called.
For best results, the "Humor Sans" font should be installed: it is
not included with matplotlib.
Parameters
----------
scale: float, optional
The amplitude of the wiggle perpendicular to the source line.
length: float, optional
The length of the wiggle along the line.
randomness: float, optional
The scale factor by which the length is shrunken or expanded.
This function works by a number of rcParams, so it will probably
override others you have set before.
If you want the effects of this function to be temporary, it can
be used as a context manager, for example::
with plt.xkcd():
# This figure will be in XKCD-style
fig1 = plt.figure()
# ...
# This figure will be in regular style
fig2 = plt.figure()
"""
if rcParams['text.usetex']:
raise RuntimeError(
"xkcd mode is not compatible with text.usetex = True")
from matplotlib import patheffects
context = rc_context()
try:
rcParams['font.family'] = ['Humor Sans', 'Comic Sans MS']
rcParams['font.size'] = 14.0
rcParams['path.sketch'] = (scale, length, randomness)
rcParams['path.effects'] = [
patheffects.withStroke(linewidth=4, foreground="w")]
rcParams['axes.linewidth'] = 1.5
rcParams['lines.linewidth'] = 2.0
rcParams['figure.facecolor'] = 'white'
rcParams['grid.linewidth'] = 0.0
rcParams['axes.unicode_minus'] = False
rcParams['axes.color_cycle'] = ['b', 'r', 'c', 'm']
rcParams['xtick.major.size'] = 8
rcParams['xtick.major.width'] = 3
rcParams['ytick.major.size'] = 8
rcParams['ytick.major.width'] = 3
except:
context.__exit__(*sys.exc_info())
raise
return context
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize=None, # defaults to rc figure.figsize
dpi=None, # defaults to rc figure.dpi
facecolor=None, # defaults to rc figure.facecolor
edgecolor=None, # defaults to rc figure.edgecolor
frameon=True,
FigureClass=Figure,
**kwargs
):
"""
Creates a new figure.
Parameters
----------
num : integer or string, optional, default: none
If not provided, a new figure will be created, and a the figure number
will be increamted. The figure objects holds this number in a `number`
attribute.
If num is provided, and a figure with this id already exists, make
it active, and returns a reference to it. If this figure does not
exists, create it and returns it.
If num is a string, the window title will be set to this figure's
`num`.
figsize : tuple of integers, optional, default : None
width, height in inches. If not provided, defaults to rc
figure.figsize.
dpi : integer, optional, default ; None
resolution of the figure. If not provided, defaults to rc figure.dpi.
facecolor :
the background color; If not provided, defaults to rc figure.facecolor
edgecolor :
the border color. If not provided, defaults to rc figure.edgecolor
Returns
-------
figure : Figure
The Figure instance returned will also be passed to new_figure_manager
in the backends, which allows to hook custom Figure classes into the
pylab interface. Additional kwargs will be passed to the figure init
function.
Note
----
If you are creating many figures, make sure you explicitly call "close"
on the figures you are not using, because this will enable pylab
to properly clean up the memory.
rcParams defines the default values, which can be modified in the
matplotlibrc file
"""
if figsize is None:
figsize = rcParams['figure.figsize']
if dpi is None:
dpi = rcParams['figure.dpi']
if facecolor is None:
facecolor = rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = rcParams['figure.edgecolor']
allnums = get_fignums()
next_num = max(allnums) + 1 if allnums else 1
figLabel = ''
if num is None:
num = next_num
elif is_string_like(num):
figLabel = num
allLabels = get_figlabels()
if figLabel not in allLabels:
if figLabel == 'all':
warnings.warn("close('all') closes all existing figures")
num = next_num
else:
inum = allLabels.index(figLabel)
num = allnums[inum]
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
max_open_warning = rcParams['figure.max_open_warning']
if (max_open_warning >= 1 and
len(allnums) >= max_open_warning):
warnings.warn(
"More than %d figures have been opened. Figures "
"created through the pyplot interface "
"(`matplotlib.pyplot.figure`) are retained until "
"explicitly closed and may consume too much memory. "
"(To control this warning, see the rcParam "
"`figure.max_num_figures`)." %
max_open_warning, RuntimeWarning)
if get_backend().lower() == 'ps':
dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
if figLabel:
figManager.set_window_title(figLabel)
figManager.canvas.figure.set_label(figLabel)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
figManager.canvas.figure.number = num
draw_if_interactive()
return figManager.canvas.figure
def gcf():
"Return a reference to the current figure."
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
return figManager.canvas.figure
else:
return figure()
fignum_exists = _pylab_helpers.Gcf.has_fignum
def get_fignums():
"""Return a list of existing figure numbers."""
fignums = _pylab_helpers.Gcf.figs.keys()
fignums.sort()
return fignums
def get_figlabels():
"Return a list of existing figure labels."
figManagers = _pylab_helpers.Gcf.get_all_fig_managers()
figManagers.sort(key=lambda m: m.num)
return [m.canvas.figure.get_label() for m in figManagers]
def get_current_fig_manager():
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
gcf() # creates an active figure as a side effect
figManager = _pylab_helpers.Gcf.get_active()
return figManager
@docstring.copy_dedent(FigureCanvasBase.mpl_connect)
def connect(s, func):
return get_current_fig_manager().canvas.mpl_connect(s, func)
@docstring.copy_dedent(FigureCanvasBase.mpl_disconnect)
def disconnect(cid):
return get_current_fig_manager().canvas.mpl_disconnect(cid)
def close(*args):
"""
Close a figure window.
``close()`` by itself closes the current figure
``close(h)`` where *h* is a :class:`Figure` instance, closes that figure
``close(num)`` closes figure number *num*
``close(name)`` where *name* is a string, closes figure with that label
``close('all')`` closes all the figure windows
"""
if len(args) == 0:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
return
else:
_pylab_helpers.Gcf.destroy(figManager.num)
elif len(args) == 1:
arg = args[0]
if arg == 'all':
_pylab_helpers.Gcf.destroy_all()
elif isinstance(arg, int):
_pylab_helpers.Gcf.destroy(arg)
elif is_string_like(arg):
allLabels = get_figlabels()
if arg in allLabels:
num = get_fignums()[allLabels.index(arg)]
_pylab_helpers.Gcf.destroy(num)
elif isinstance(arg, Figure):
_pylab_helpers.Gcf.destroy_fig(arg)
else:
raise TypeError('Unrecognized argument type %s to close' % type(arg))
else:
raise TypeError('close takes 0 or 1 arguments')
def clf():
"""
Clear the current figure.
"""
gcf().clf()
draw_if_interactive()
def draw():
"""
Redraw the current figure.
This is used in interactive mode to update a figure that
has been altered using one or more plot object method calls;
it is not needed if figure modification is done entirely
with pyplot functions, if a sequence of modifications ends
with a pyplot function, or if matplotlib is in non-interactive
mode and the sequence of modifications ends with :func:`show` or
:func:`savefig`.
A more object-oriented alternative, given any
:class:`~matplotlib.figure.Figure` instance, :attr:`fig`, that
was created using a :mod:`~matplotlib.pyplot` function, is::
fig.canvas.draw()
"""
get_current_fig_manager().canvas.draw()
@docstring.copy_dedent(Figure.savefig)
def savefig(*args, **kwargs):
fig = gcf()
return fig.savefig(*args, **kwargs)
@docstring.copy_dedent(Figure.ginput)
def ginput(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is negative, does not timeout.
"""
return gcf().ginput(*args, **kwargs)
@docstring.copy_dedent(Figure.waitforbuttonpress)
def waitforbuttonpress(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* key or mouse clicks from the user and
return a list containing True's for keyboard clicks and False's
for mouse clicks.
If *timeout* is negative, does not timeout.
"""
return gcf().waitforbuttonpress(*args, **kwargs)
# Putting things in figures
@docstring.copy_dedent(Figure.text)
def figtext(*args, **kwargs):
ret = gcf().text(*args, **kwargs)
draw_if_interactive()
return ret
@docstring.copy_dedent(Figure.suptitle)
def suptitle(*args, **kwargs):
ret = gcf().suptitle(*args, **kwargs)
draw_if_interactive()
return ret
@docstring.Appender("Addition kwargs: hold = [True|False] overrides default hold state", "\n")
@docstring.copy_dedent(Figure.figimage)
def figimage(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
ret = gcf().figimage(*args, **kwargs)
draw_if_interactive()
#sci(ret) # JDH figimage should not set current image -- it is not mappable, etc
return ret
def figlegend(handles, labels, loc, **kwargs):
"""
Place a legend in the figure.
*labels*
a sequence of strings
*handles*
a sequence of :class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances
*loc*
can be a string or an integer specifying the legend
location
A :class:`matplotlib.legend.Legend` instance is returned.
Example::
figlegend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right' )
.. seealso::
:func:`~matplotlib.pyplot.legend`
"""
l = gcf().legend(handles, labels, loc, **kwargs)
draw_if_interactive()
return l
## Figure and Axes hybrid ##
def hold(b=None):
"""
Set the hold state. If *b* is None (default), toggle the
hold state, else set the hold state to boolean value *b*::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
When *hold* is *True*, subsequent plot commands will be added to
the current axes. When *hold* is *False*, the current axes and
figure will be cleared on the next plot command.
"""
fig = gcf()
ax = fig.gca()
fig.hold(b)
ax.hold(b)
# b=None toggles the hold state, so let's get get the current hold
# state; but should pyplot hold toggle the rc setting - me thinks
# not
b = ax.ishold()
rc('axes', hold=b)
def ishold():
"""
Return the hold status of the current axes.
"""
return gca().ishold()
def over(func, *args, **kwargs):
"""
Call a function with hold(True).
Calls::
func(*args, **kwargs)
with ``hold(True)`` and then restores the hold state.
"""
h = ishold()
hold(True)
func(*args, **kwargs)
hold(h)
## Axes ##
def axes(*args, **kwargs):
"""
Add an axes to the figure.
The axes is added at position *rect* specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============ ================================================
kwarg Accepts Description
======= ============ ================================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute with otherax
sharey otherax current axes shares yaxis attribute with otherax
polar [True|False] use a polar axes?
======= ============ ================================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args)==0: return subplot(111, **kwargs)
if nargs>1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
draw_if_interactive()
return a
def delaxes(*args):
"""
Remove an axes from the current figure. If *ax*
doesn't exist, an error will be raised.
``delaxes()``: delete the current axes
"""
if not len(args):
ax = gca()
else:
ax = args[0]
ret = gcf().delaxes(ax)
draw_if_interactive()
return ret
def sca(ax):
"""
Set the current Axes instance to *ax*.
The current Figure is updated to the parent of *ax*.
"""
managers = _pylab_helpers.Gcf.get_all_fig_managers()
for m in managers:
if ax in m.canvas.figure.axes:
_pylab_helpers.Gcf.set_active(m)
m.canvas.figure.sca(ax)
return
raise ValueError("Axes instance argument was not found in a figure.")
def gca(**kwargs):
"""
Return the current axis instance. This can be used to control
axis properties either using set or the
:class:`~matplotlib.axes.Axes` methods, for example, setting the
xaxis range::
plot(t,s)
set(gca(), 'xlim', [0,10])
or::
plot(t,s)
a = gca()
a.set_xlim([0,10])
"""
ax = gcf().gca(**kwargs)
return ax
# More ways of creating axes:
def subplot(*args, **kwargs):
"""
Return a subplot axes positioned by the given grid definition.
Typical call signature::
subplot(nrows, ncols, plot_number)
Where *nrows* and *ncols* are used to notionally split the figure
into ``nrows * ncols`` sub-axes, and *plot_number* is used to identify
the particular subplot that this function is to create within the notional
grid. *plot_number* starts at 1, increments across rows first and has a
maximum of ``nrows * ncols``.
In the case when *nrows*, *ncols* and *plot_number* are all less than 10,
a convenience exists, such that the a 3 digit number can be given instead,
where the hundreds represent *nrows*, the tens represent *ncols* and the
units represent *plot_number*. For instance::
subplot(211)
produces a subaxes in a figure which represents the top plot (i.e. the
first) in a 2 row by 1 column notional grid (no grid actually exists,
but conceptually this is how the returned subplot has been positioned).
.. note::
Creating a new subplot with a position which is entirely inside a
pre-existing axes will trigger the larger axes to be deleted::
import matplotlib.pyplot as plt
# plot a line, implicitly creating a subplot(111)
plt.plot([1,2,3])
# now create a subplot which represents the top plot of a grid
# with 2 rows and 1 column. Since this subplot will overlap the
# first, the plot (and its axes) previously created, will be removed
plt.subplot(211)
plt.plot(range(12))
plt.subplot(212, axisbg='y') # creates 2nd subplot with yellow background
If you do not want this behavior, use the
:meth:`~matplotlib.figure.Figure.add_subplot` method or the
:func:`~matplotlib.pyplot.axes` function instead.
Keyword arguments:
*axisbg*:
The background color of the subplot, which can be any valid
color specifier. See :mod:`matplotlib.colors` for more
information.
*polar*:
A boolean flag indicating whether the subplot plot should be
a polar projection. Defaults to *False*.
*projection*:
A string giving the name of a custom projection to be used
for the subplot. This projection must have been previously
registered. See :mod:`matplotlib.projections`.
.. seealso::
:func:`~matplotlib.pyplot.axes`
For additional information on :func:`axes` and
:func:`subplot` keyword arguments.
:file:`examples/pie_and_polar_charts/polar_scatter_demo.py`
For an example
**Example:**
.. plot:: mpl_examples/subplots_axes_and_figures/subplot_demo.py
"""
# if subplot called without arguments, create subplot(1,1,1)
if len(args)==0:
args=(1,1,1)
# This check was added because it is very easy to type
# subplot(1, 2, False) when subplots(1, 2, False) was intended
# (sharex=False, that is). In most cases, no error will
# ever occur, but mysterious behavior can result because what was
# intended to be the sharex argument is instead treated as a
# subplot index for subplot()
if len(args) >= 3 and isinstance(args[2], bool) :
warnings.warn("The subplot index argument to subplot() appears"
" to be a boolean. Did you intend to use subplots()?")
fig = gcf()
a = fig.add_subplot(*args, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, **fig_kw):
"""
Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
*nrows* : int
Number of rows of the subplot grid. Defaults to 1.
*ncols* : int
Number of columns of the subplot grid. Defaults to 1.
*sharex* : string or bool
If *True*, the X axis will be shared amongst all subplots. If
*True* and you have multiple rows, the x tick labels on all but
the last row of plots will have visible set to *False*
If a string must be one of "row", "col", "all", or "none".
"all" has the same effect as *True*, "none" has the same effect
as *False*.
If "row", each subplot row will share a X axis.
If "col", each subplot column will share a X axis and the x tick
labels on all but the last row will have visible set to *False*.
*sharey* : string or bool
If *True*, the Y axis will be shared amongst all subplots. If
*True* and you have multiple columns, the y tick labels on all but
the first column of plots will have visible set to *False*
If a string must be one of "row", "col", "all", or "none".
"all" has the same effect as *True*, "none" has the same effect
as *False*.
If "row", each subplot row will share a Y axis.
If "col", each subplot column will share a Y axis and the y tick
labels on all but the last row will have visible set to *False*.
*squeeze* : bool
If *True*, extra dimensions are squeezed out from the
returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the
resulting single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy
object array of Axis objects are returned as numpy 1-d
arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d
array.
If *False*, no squeezing at all is done: the returned axis
object is always a 2-d array containing Axis instances, even if it
ends up being 1x1.
*subplot_kw* : dict
Dict with keywords passed to the
:meth:`~matplotlib.figure.Figure.add_subplot` call used to
create each subplots.
*fig_kw* : dict
Dict with keywords passed to the :func:`figure` call. Note that all
keywords not recognized above will be automatically included here.
Returns:
fig, ax : tuple
- *fig* is the :class:`matplotlib.figure.Figure` object
- *ax* can be either a single axis object or an array of axis
objects if more than one subplot was created. The dimensions
of the resulting array can be controlled with the squeeze
keyword, see above.
Examples::
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
# Share a X axis with each column of subplots
plt.subplots(2, 2, sharex='col')
# Share a Y axis with each row of subplots
plt.subplots(2, 2, sharey='row')
# Share a X and Y axis with all subplots
plt.subplots(2, 2, sharex='all', sharey='all')
# same as
plt.subplots(2, 2, sharex=True, sharey=True)
"""
# for backwards compatibility
if isinstance(sharex, bool):
if sharex:
sharex = "all"
else:
sharex = "none"
if isinstance(sharey, bool):
if sharey:
sharey = "all"
else:
sharey = "none"
share_values = ["all", "row", "col", "none"]
if sharex not in share_values:
# This check was added because it is very easy to type subplots(1, 2, 1)
# when subplot(1, 2, 1) was intended. In most cases, no error will
# ever occur, but mysterious behavior will result because what was
# intended to be the subplot index is instead treated as a bool for
# sharex.
if isinstance(sharex, int):
warnings.warn("sharex argument to subplots() was an integer."
" Did you intend to use subplot() (without 's')?")
raise ValueError("sharex [%s] must be one of %s" % \
(sharex, share_values))
if sharey not in share_values:
raise ValueError("sharey [%s] must be one of %s" % \
(sharey, share_values))
if subplot_kw is None:
subplot_kw = {}
fig = figure(**fig_kw)
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
nplots = nrows*ncols
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
#if sharex:
# subplot_kw['sharex'] = ax0
#if sharey:
# subplot_kw['sharey'] = ax0
axarr[0] = ax0
r, c = np.mgrid[:nrows, :ncols]
r = r.flatten() * ncols
c = c.flatten()
lookup = {
"none": np.arange(nplots),
"all": np.zeros(nplots, dtype=int),
"row": r,
"col": c,
}
sxs = lookup[sharex]
sys = lookup[sharey]
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
if sxs[i] == i:
subplot_kw['sharex'] = None
else:
subplot_kw['sharex'] = axarr[sxs[i]]
if sys[i] == i:
subplot_kw['sharey'] = None
else:
subplot_kw['sharey'] = axarr[sys[i]]
axarr[i] = fig.add_subplot(nrows, ncols, i + 1, **subplot_kw)
# returned axis array will be always 2-d, even if nrows=ncols=1
axarr = axarr.reshape(nrows, ncols)
# turn off redundant tick labeling
if sharex in ["col", "all"] and nrows > 1:
#if sharex and nrows>1:
# turn off all but the bottom row
for ax in axarr[:-1, :].flat:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
if sharey in ["row", "all"] and ncols > 1:
#if sharey and ncols>1:
# turn off all but the first column
for ax in axarr[:, 1:].flat:
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots==1:
ret = fig, axarr[0,0]
else:
ret = fig, axarr.squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
ret = fig, axarr.reshape(nrows, ncols)
return ret
def subplot2grid(shape, loc, rowspan=1, colspan=1, **kwargs):
"""
Create a subplot in a grid. The grid is specified by *shape*, at
location of *loc*, spanning *rowspan*, *colspan* cells in each
direction. The index for loc is 0-based. ::
subplot2grid(shape, loc, rowspan=1, colspan=1)
is identical to ::
gridspec=GridSpec(shape[0], shape[2])
subplotspec=gridspec.new_subplotspec(loc, rowspan, colspan)
subplot(subplotspec)
"""
fig = gcf()
s1, s2 = shape
subplotspec = GridSpec(s1, s2).new_subplotspec(loc,
rowspan=rowspan,
colspan=colspan)
a = fig.add_subplot(subplotspec, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def twinx(ax=None):
"""
Make a second axes that shares the *x*-axis. The new axes will
overlay *ax* (or the current axes if *ax* is *None*). The ticks
for *ax2* will be placed on the right, and the *ax2* instance is
returned.
.. seealso::
:file:`examples/api_examples/two_scales.py`
For an example
"""
if ax is None:
ax=gca()
ax1 = ax.twinx()
draw_if_interactive()
return ax1
def twiny(ax=None):
"""
Make a second axes that shares the *y*-axis. The new axis will
overlay *ax* (or the current axes if *ax* is *None*). The ticks
for *ax2* will be placed on the top, and the *ax2* instance is
returned.
"""
if ax is None:
ax=gca()
ax1 = ax.twiny()
draw_if_interactive()
return ax1
def subplots_adjust(*args, **kwargs):
"""
Tune the subplot layout.
call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
The parameter meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
fig = gcf()
fig.subplots_adjust(*args, **kwargs)
draw_if_interactive()
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for a figure.
A :class:`matplotlib.widgets.SubplotTool` instance is returned.
"""
tbar = rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
rcParams['toolbar'] = 'None'
if targetfig is None:
manager = get_current_fig_manager()
targetfig = manager.canvas.figure
else:
# find the manager for this figure
for manager in _pylab_helpers.Gcf._activeQue:
if manager.canvas.figure==targetfig: break
else: raise RuntimeError('Could not find manager for targetfig')
toolfig = figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
ret = SubplotTool(targetfig, toolfig)
rcParams['toolbar'] = tbar
_pylab_helpers.Gcf.set_active(manager) # restore the current figure
return ret
def tight_layout(pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Automatically adjust subplot parameters to give specified padding.
Parameters:
pad : float
padding between the figure edge and the edges of subplots, as a fraction of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
rect : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
fig = gcf()
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
draw_if_interactive()
def box(on=None):
"""
Turn the axes box on or off. *on* may be a boolean or a string,
'on' or 'off'.
If *on* is *None*, toggle state.
"""
ax = gca()
on = _string_to_bool(on)
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
draw_if_interactive()
def title(s, *args, **kwargs):
"""
Set a title of the current axes.
Set one of the three available axes titles. The available titles are
positioned above the axes in the center, flush with the left edge,
and flush with the right edge.
Parameters
----------
label : str
Text to use for the title
fontdict : dict
A dictionary controlling the appearance of the title text,
the default `fontdict` is:
{'fontsize': rcParams['axes.titlesize'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
loc : {'center', 'left', 'right'}, str, optional
Which title to set, defaults to 'center'
Returns
-------
text : :class:`~matplotlib.text.Text`
The matplotlib text instance representing the title
Other parameters
----------------
Other keyword arguments are text properties, see
:class:`~matplotlib.text.Text` for a list of valid text
properties.
See also
--------
See :func:`~matplotlib.pyplot.text` for adding text to the current axes
"""
l = gca().set_title(s, *args, **kwargs)
draw_if_interactive()
return l
## Axis ##
def axis(*v, **kwargs):
"""
Convenience method to get or set axis properties.
Calling with no arguments::
>>> axis()
returns the current axes limits ``[xmin, xmax, ymin, ymax]``.::
>>> axis(v)
sets the min and max of the x and y axes, with
``v = [xmin, xmax, ymin, ymax]``.::
>>> axis('off')
turns off the axis lines and labels.::
>>> axis('equal')
changes limits of *x* or *y* axis so that equal increments of *x*
and *y* have the same length; a circle is circular.::
>>> axis('scaled')
achieves the same result by changing the dimensions of the plot box instead
of the axis data limits.::
>>> axis('tight')
changes *x* and *y* axis limits such that all data is shown. If
all data is already shown, it will move it to the center of the
figure without modifying (*xmax* - *xmin*) or (*ymax* -
*ymin*). Note this is slightly different than in MATLAB.::
>>> axis('image')
is 'scaled' with the axis limits equal to the data limits.::
>>> axis('auto')
and::
>>> axis('normal')
are deprecated. They restore default behavior; axis limits are automatically
scaled to make the data fit comfortably within the plot box.
if ``len(*v)==0``, you can pass in *xmin*, *xmax*, *ymin*, *ymax*
as kwargs selectively to alter just those limits without changing
the others.
The xmin, xmax, ymin, ymax tuple is returned
.. seealso::
:func:`xlim`, :func:`ylim`
For setting the x- and y-limits individually.
"""
ax = gca()
v = ax.axis(*v, **kwargs)
draw_if_interactive()
return v
def xlabel(s, *args, **kwargs):
"""
Set the *x* axis label of the current axis.
Default override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'top',
'horizontalalignment' : 'center'
}
.. seealso::
:func:`~matplotlib.pyplot.text`
For information on how override and the optional args work
"""
l = gca().set_xlabel(s, *args, **kwargs)
draw_if_interactive()
return l
def ylabel(s, *args, **kwargs):
"""
Set the *y* axis label of the current axis.
Defaults override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'center',
'horizontalalignment' : 'right',
'rotation'='vertical' : }
.. seealso::
:func:`~matplotlib.pyplot.text`
For information on how override and the optional args
work.
"""
l = gca().set_ylabel(s, *args, **kwargs)
draw_if_interactive()
return l
def xlim(*args, **kwargs):
"""
Get or set the *x* limits of the current axes.
::
xmin, xmax = xlim() # return the current xlim
xlim( (xmin, xmax) ) # set the xlim to xmin, xmax
xlim( xmin, xmax ) # set the xlim to xmin, xmax
If you do not specify args, you can pass the xmin and xmax as
kwargs, e.g.::
xlim(xmax=3) # adjust the max leaving min unchanged
xlim(xmin=1) # adjust the min leaving max unchanged
Setting limits turns autoscaling off for the x-axis.
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
if not args and not kwargs:
return ax.get_xlim()
ret = ax.set_xlim(*args, **kwargs)
draw_if_interactive()
return ret
def ylim(*args, **kwargs):
"""
Get or set the *y*-limits of the current axes.
::
ymin, ymax = ylim() # return the current ylim
ylim( (ymin, ymax) ) # set the ylim to ymin, ymax
ylim( ymin, ymax ) # set the ylim to ymin, ymax
If you do not specify args, you can pass the *ymin* and *ymax* as
kwargs, e.g.::
ylim(ymax=3) # adjust the max leaving min unchanged
ylim(ymin=1) # adjust the min leaving max unchanged
Setting limits turns autoscaling off for the y-axis.
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
if not args and not kwargs:
return ax.get_ylim()
ret = ax.set_ylim(*args, **kwargs)
draw_if_interactive()
return ret
@docstring.dedent_interpd
def xscale(*args, **kwargs):
"""
Set the scaling of the *x*-axis.
call signature::
xscale(scale, **kwargs)
The available scales are: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ax.set_xscale(*args, **kwargs)
draw_if_interactive()
@docstring.dedent_interpd
def yscale(*args, **kwargs):
"""
Set the scaling of the *y*-axis.
call signature::
yscale(scale, **kwargs)
The available scales are: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ax.set_yscale(*args, **kwargs)
draw_if_interactive()
def xticks(*args, **kwargs):
"""
Get or set the *x*-limits of the current tick locations and labels.
::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = xticks()
# set the locations of the xticks
xticks( arange(6) )
# set the locations and labels of the xticks
xticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties. For example, to rotate long labels::
xticks( arange(12), calendar.month_name[1:13], rotation=17 )
"""
ax = gca()
if len(args)==0:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif len(args)==1:
locs = ax.set_xticks(args[0])
labels = ax.get_xticklabels()
elif len(args)==2:
locs = ax.set_xticks(args[0])
labels = ax.set_xticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to xticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return locs, silent_list('Text xticklabel', labels)
def yticks(*args, **kwargs):
"""
Get or set the *y*-limits of the current tick locations and labels.
::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = yticks()
# set the locations of the yticks
yticks( arange(6) )
# set the locations and labels of the yticks
yticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties. For example, to rotate long labels::
yticks( arange(12), calendar.month_name[1:13], rotation=45 )
"""
ax = gca()
if len(args)==0:
locs = ax.get_yticks()
labels = ax.get_yticklabels()
elif len(args)==1:
locs = ax.set_yticks(args[0])
labels = ax.get_yticklabels()
elif len(args)==2:
locs = ax.set_yticks(args[0])
labels = ax.set_yticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to yticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return ( locs,
silent_list('Text yticklabel', labels)
)
def minorticks_on():
"""
Display minor ticks on the current plot.
Displaying minor ticks reduces performance; turn them off using
minorticks_off() if drawing speed is a problem.
"""
gca().minorticks_on()
draw_if_interactive()
def minorticks_off():
"""
Remove minor ticks from the current plot.
"""
gca().minorticks_off()
draw_if_interactive()
def rgrids(*args, **kwargs):
"""
Get or set the radial gridlines on a polar plot.
call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, **kwargs)
When called with no arguments, :func:`rgrid` simply returns the
tuple (*lines*, *labels*), where *lines* is an array of radial
gridlines (:class:`~matplotlib.lines.Line2D` instances) and
*labels* is an array of tick labels
(:class:`~matplotlib.text.Text` instances). When called with
arguments, the labels will appear at the specified radial
distances and angles.
*labels*, if not *None*, is a len(*radii*) list of strings of the
labels to use at each angle.
If *labels* is None, the rformatter will be used
Examples::
# set the locations of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.yaxis.get_gridlines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(*args, **kwargs)
draw_if_interactive()
return ( silent_list('Line2D rgridline', lines),
silent_list('Text rgridlabel', labels) )
def thetagrids(*args, **kwargs):
"""
Get or set the theta locations of the gridlines in a polar plot.
If no arguments are passed, return a tuple (*lines*, *labels*)
where *lines* is an array of radial gridlines
(:class:`~matplotlib.lines.Line2D` instances) and *labels* is an
array of tick labels (:class:`~matplotlib.text.Text` instances)::
lines, labels = thetagrids()
Otherwise the syntax is::
lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)
set the angles at which to place the theta grids (these gridlines
are equal along the theta dimension).
*angles* is in degrees.
*labels*, if not *None*, is a len(angles) list of strings of the
labels to use at each angle.
If *labels* is *None*, the labels will be ``fmt%angle``.
*frac* is the fraction of the polar axes radius at which to place
the label (1 is the edge). e.g., 1.05 is outside the axes and 0.95
is inside the axes.
Return value is a list of tuples (*lines*, *labels*):
- *lines* are :class:`~matplotlib.lines.Line2D` instances
- *labels* are :class:`~matplotlib.text.Text` instances.
Note that on input, the *labels* argument is a list of strings,
and on output it is a list of :class:`~matplotlib.text.Text`
instances.
Examples::
# set the locations of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90) )
# set the locations and labels of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(*args, **kwargs)
draw_if_interactive()
return (silent_list('Line2D thetagridline', lines),
silent_list('Text thetagridlabel', labels)
)
## Plotting Info ##
def plotting():
pass
def get_plot_commands():
"""
Get a sorted list of all of the plotting commands.
"""
# This works by searching for all functions in this module and
# removing a few hard-coded exclusions, as well as all of the
# colormap-setting functions, and anything marked as private with
# a preceding underscore.
import inspect
exclude = set(['colormaps', 'colors', 'connect', 'disconnect',
'get_plot_commands', 'get_current_fig_manager',
'ginput', 'plotting', 'waitforbuttonpress'])
exclude |= set(colormaps())
this_module = inspect.getmodule(get_plot_commands)
commands = set()
for name, obj in globals().items():
if name.startswith('_') or name in exclude:
continue
if inspect.isfunction(obj) and inspect.getmodule(obj) is this_module:
commands.add(name)
commands = list(commands)
commands.sort()
return commands
def colors():
"""
This is a do-nothing function to provide you with help on how
matplotlib handles colors.
Commands which take color arguments can use several formats to
specify the colors. For the basic built-in colors, you can use a
single letter
===== =======
Alias Color
===== =======
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
===== =======
For a greater range of colors, you have two options. You can
specify the color using an html hex string, as in::
color = '#eeefff'
or you can pass an R,G,B tuple, where each of R,G,B are in the
range [0,1].
You can also use any legal html name for a color, for example::
color = 'red'
color = 'burlywood'
color = 'chartreuse'
The example below creates a subplot with a dark
slate gray background::
subplot(111, axisbg=(0.1843, 0.3098, 0.3098))
Here is an example that creates a pale turquoise title::
title('Is this the best color?', color='#afeeee')
"""
pass
def colormaps():
"""
Matplotlib provides a number of colormaps, and others can be added using
:func:`~matplotlib.cm.register_cmap`. This function documents the built-in
colormaps, and will also return a list of all registered colormaps if called.
You can set the colormap for an image, pcolor, scatter, etc,
using a keyword argument::
imshow(X, cmap=cm.hot)
or using the :func:`set_cmap` function::
imshow(X)
pyplot.set_cmap('hot')
pyplot.set_cmap('jet')
In interactive mode, :func:`set_cmap` will update the colormap post-hoc,
allowing you to see which one works best for your data.
All built-in colormaps can be reversed by appending ``_r``: For instance,
``gray_r`` is the reverse of ``gray``.
There are several common color schemes used in visualization:
Sequential schemes
for unipolar data that progresses from low to high
Diverging schemes
for bipolar data that emphasizes positive or negative deviations from a
central value
Cyclic schemes
meant for plotting values that wrap around at the
endpoints, such as phase angle, wind direction, or time of day
Qualitative schemes
for nominal data that has no inherent ordering, where color is used
only to distinguish categories
The base colormaps are derived from those of the same name provided
with Matlab:
========= =======================================================
Colormap Description
========= =======================================================
autumn sequential linearly-increasing shades of red-orange-yellow
bone sequential increasing black-white color map with
a tinge of blue, to emulate X-ray film
cool linearly-decreasing shades of cyan-magenta
copper sequential increasing shades of black-copper
flag repetitive red-white-blue-black pattern (not cyclic at
endpoints)
gray sequential linearly-increasing black-to-white
grayscale
hot sequential black-red-yellow-white, to emulate blackbody
radiation from an object at increasing temperatures
hsv cyclic red-yellow-green-cyan-blue-magenta-red, formed
by changing the hue component in the HSV color space
jet a spectral map with dark endpoints, blue-cyan-yellow-red;
based on a fluid-jet simulation by NCSA [#]_
pink sequential increasing pastel black-pink-white, meant
for sepia tone colorization of photographs
prism repetitive red-yellow-green-blue-purple-...-green pattern
(not cyclic at endpoints)
spring linearly-increasing shades of magenta-yellow
summer sequential linearly-increasing shades of green-yellow
winter linearly-increasing shades of blue-green
========= =======================================================
For the above list only, you can also set the colormap using the
corresponding pylab shortcut interface function, similar to Matlab::
imshow(X)
hot()
jet()
The next set of palettes are from the `Yorick scientific visualisation
package <http://yorick.sourceforge.net/index.php>`_, an evolution of
the GIST package, both by David H. Munro:
============ =======================================================
Colormap Description
============ =======================================================
gist_earth mapmaker's colors from dark blue deep ocean to green
lowlands to brown highlands to white mountains
gist_heat sequential increasing black-red-orange-white, to emulate
blackbody radiation from an iron bar as it grows hotter
gist_ncar pseudo-spectral black-blue-green-yellow-red-purple-white
colormap from National Center for Atmospheric
Research [#]_
gist_rainbow runs through the colors in spectral order from red to
violet at full saturation (like *hsv* but not cyclic)
gist_stern "Stern special" color table from Interactive Data
Language software
============ =======================================================
The following colormaps are based on the `ColorBrewer
<http://colorbrewer.org>`_ color specifications and designs developed by
Cynthia Brewer:
ColorBrewer Diverging (luminance is highest at the midpoint, and
decreases towards differently-colored endpoints):
======== ===================================
Colormap Description
======== ===================================
BrBG brown, white, blue-green
PiYG pink, white, yellow-green
PRGn purple, white, green
PuOr orange, white, purple
RdBu red, white, blue
RdGy red, white, gray
RdYlBu red, yellow, blue
RdYlGn red, yellow, green
Spectral red, orange, yellow, green, blue
======== ===================================
ColorBrewer Sequential (luminance decreases monotonically):
======== ====================================
Colormap Description
======== ====================================
Blues white to dark blue
BuGn white, light blue, dark green
BuPu white, light blue, dark purple
GnBu white, light green, dark blue
Greens white to dark green
Greys white to black (not linear)
Oranges white, orange, dark brown
OrRd white, orange, dark red
PuBu white, light purple, dark blue
PuBuGn white, light purple, dark green
PuRd white, light purple, dark red
Purples white to dark purple
RdPu white, pink, dark purple
Reds white to dark red
YlGn light yellow, dark green
YlGnBu light yellow, light green, dark blue
YlOrBr light yellow, orange, dark brown
YlOrRd light yellow, orange, dark red
======== ====================================
ColorBrewer Qualitative:
(For plotting nominal data, :class:`ListedColormap` should be used,
not :class:`LinearSegmentedColormap`. Different sets of colors are
recommended for different numbers of categories. These continuous
versions of the qualitative schemes may be removed or converted in the
future.)
* Accent
* Dark2
* Paired
* Pastel1
* Pastel2
* Set1
* Set2
* Set3
Other miscellaneous schemes:
============= =======================================================
Colormap Description
============= =======================================================
afmhot sequential black-orange-yellow-white blackbody
spectrum, commonly used in atomic force microscopy
brg blue-red-green
bwr diverging blue-white-red
coolwarm diverging blue-gray-red, meant to avoid issues with 3D
shading, color blindness, and ordering of colors [#]_
CMRmap "Default colormaps on color images often reproduce to
confusing grayscale images. The proposed colormap
maintains an aesthetically pleasing color image that
automatically reproduces to a monotonic grayscale with
discrete, quantifiable saturation levels." [#]_
cubehelix Unlike most other color schemes cubehelix was designed
by D.A. Green to be monotonically increasing in terms
of perceived brightness. Also, when printed on a black
and white postscript printer, the scheme results in a
greyscale with monotonically increasing brightness.
This color scheme is named cubehelix because the r,g,b
values produced can be visualised as a squashed helix
around the diagonal in the r,g,b color cube.
gnuplot gnuplot's traditional pm3d scheme
(black-blue-red-yellow)
gnuplot2 sequential color printable as gray
(black-blue-violet-yellow-white)
ocean green-blue-white
rainbow spectral purple-blue-green-yellow-orange-red colormap
with diverging luminance
seismic diverging blue-white-red
nipy_spectral black-purple-blue-green-yellow-red-white spectrum,
originally from the Neuroimaging in Python project
terrain mapmaker's colors, blue-green-yellow-brown-white,
originally from IGOR Pro
============= =======================================================
The following colormaps are redundant and may be removed in future
versions. It's recommended to use the names in the descriptions
instead, which produce identical output:
========= =======================================================
Colormap Description
========= =======================================================
gist_gray identical to *gray*
gist_yarg identical to *gray_r*
binary identical to *gray_r*
spectral identical to *nipy_spectral* [#]_
========= =======================================================
.. rubric:: Footnotes
.. [#] Rainbow colormaps, ``jet`` in particular, are considered a poor
choice for scientific visualization by many researchers: `Rainbow Color
Map (Still) Considered Harmful
<http://www.jwave.vt.edu/%7Erkriz/Projects/create_color_table/color_07.pdf>`_
.. [#] Resembles "BkBlAqGrYeOrReViWh200" from NCAR Command
Language. See `Color Table Gallery
<http://www.ncl.ucar.edu/Document/Graphics/color_table_gallery.shtml>`_
.. [#] See `Diverging Color Maps for Scientific Visualization
<http://www.cs.unm.edu/~kmorel/documents/ColorMaps/>`_ by Kenneth
Moreland.
.. [#] See `A Color Map for Effective Black-and-White Rendering of
Color-Scale Images
<http://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m>`_
by Carey Rappaport
.. [#] Changed to distinguish from ColorBrewer's *Spectral* map.
:func:`spectral` still works, but
``set_cmap('nipy_spectral')`` is recommended for clarity.
"""
return sorted(cm.cmap_d.keys())
def _setup_pyplot_info_docstrings():
"""
Generates the plotting and docstring.
These must be done after the entire module is imported, so it is
called from the end of this module, which is generated by
boilerplate.py.
"""
# Generate the plotting docstring
import re
def pad(s, l):
"""Pad string *s* to length *l*."""
if l < len(s):
return s[:l]
return s + ' ' * (l - len(s))
commands = get_plot_commands()
first_sentence = re.compile("(?:\s*).+?\.(?:\s+|$)", flags=re.DOTALL)
# Collect the first sentence of the docstring for all of the
# plotting commands.
rows = []
max_name = 0
max_summary = 0
for name in commands:
doc = globals()[name].__doc__
summary = ''
if doc is not None:
match = first_sentence.match(doc)
if match is not None:
summary = match.group(0).strip().replace('\n', ' ')
name = '`%s`' % name
rows.append([name, summary])
max_name = max(max_name, len(name))
max_summary = max(max_summary, len(summary))
lines = []
sep = '=' * max_name + ' ' + '=' * max_summary
lines.append(sep)
lines.append(' '.join([pad("Function", max_name),
pad("Description", max_summary)]))
lines.append(sep)
for name, summary in rows:
lines.append(' '.join([pad(name, max_name),
pad(summary, max_summary)]))
lines.append(sep)
plotting.__doc__ = '\n'.join(lines)
## Plotting part 1: manually generated functions and wrappers ##
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if mappable is None:
raise RuntimeError('No mappable was found to use for colorbar '
'creation. First define a mappable such as '
'an image (with imshow) or a contour set ('
'with contourf).')
if ax is None:
ax = gca()
ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw)
draw_if_interactive()
return ret
colorbar.__doc__ = matplotlib.colorbar.colorbar_doc
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image.
To apply clim to all axes images do::
clim(0, 0.5)
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images,
use, for example::
for im in gca().get_images():
im.set_clim(0, 0.05)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, eg with imshow')
im.set_clim(vmin, vmax)
draw_if_interactive()
def set_cmap(cmap):
"""
Set the default colormap. Applies to the current image if any.
See help(colormaps) for more information.
*cmap* must be a :class:`~matplotlib.colors.Colormap` instance, or
the name of a registered colormap.
See :func:`matplotlib.cm.register_cmap` and
:func:`matplotlib.cm.get_cmap`.
"""
cmap = cm.get_cmap(cmap)
rc('image', cmap=cmap.name)
im = gci()
if im is not None:
im.set_cmap(cmap)
draw_if_interactive()
@docstring.copy_dedent(_imread)
def imread(*args, **kwargs):
return _imread(*args, **kwargs)
@docstring.copy_dedent(_imsave)
def imsave(*args, **kwargs):
return _imsave(*args, **kwargs)
def matshow(A, fignum=None, **kw):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
With the exception of *fignum*, keyword arguments are passed to
:func:`~matplotlib.pyplot.imshow`. You may set the *origin*
kwarg to "lower" if you want the first row in the array to be
at the bottom instead of the top.
*fignum*: [ None | integer | False ]
By default, :func:`matshow` creates a new figure window with
automatic numbering. If *fignum* is given as an integer, the
created figure will use this figure number. Because of how
:func:`matshow` tries to set the figure aspect ratio to be the
one of the array, if you provide the number of an already
existing figure, strange things may happen.
If *fignum* is *False* or 0, a new figure window will **NOT** be created.
"""
A = np.asanyarray(A)
if fignum is False or fignum is 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized figure
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kw)
sci(im)
draw_if_interactive()
return im
def polar(*args, **kwargs):
"""
Make a polar plot.
call signature::
polar(theta, r, **kwargs)
Multiple *theta*, *r* arguments are supported, with format
strings, as in :func:`~matplotlib.pyplot.plot`.
"""
ax = gca(polar=True)
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
return ret
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',', names=None,
subplots=True, newfig=True, **kwargs):
"""
Plot the data in in a file.
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots if *subplots* is *True*
(the default), or for lines in a single subplot if *subplots*
is *False*.
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, e.g., integer
column numbers in both or column names in both. If *subplots*
is *False*, then including any function such as 'semilogy'
that changes the axis scaling will set the scaling for all
columns.
*comments*, *skiprows*, *checkrows*, *delimiter*, and *names*
are all passed on to :func:`matplotlib.pylab.csv2rec` to
load the data into a record array.
If *newfig* is *True*, the plot always will be made in a new figure;
if *False*, it will be made in the current figure if one exists,
else in a new figure.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'),
plotfuncs={'volume': 'semilogy'})
Note: plotfile is intended as a convenience for quickly plotting
data from flat files; it is not intended as an alternative
interface to general plotting with pyplot or matplotlib.
"""
if newfig:
fig = figure()
else:
fig = gcf()
if len(cols)<1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = dict()
r = mlab.csv2rec(fname, comments=comments, skiprows=skiprows,
checkrows=checkrows, delimiter=delimiter, names=names)
def getname_val(identifier):
'return the name and column data for identifier'
if is_string_like(identifier):
return identifier, r[identifier]
elif is_numlike(identifier):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
ynamelist = []
if len(cols)==1:
ax1 = fig.add_subplot(1,1,1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_ylabel(xname)
else:
N = len(cols)
for i in range(1,N):
if subplots:
if i==1:
ax = ax1 = fig.add_subplot(N-1,1,i)
else:
ax = fig.add_subplot(N-1,1,i, sharex=ax1)
elif i==1:
ax = fig.add_subplot(1,1,1)
yname, y = getname_val(cols[i])
ynamelist.append(yname)
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
if subplots:
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if not subplots:
ax.legend(ynamelist, loc='best')
if xname=='date':
fig.autofmt_xdate()
draw_if_interactive()
def _autogen_docstring(base):
"""Autogenerated wrappers will get their docstring from a base function
with an addendum."""
msg = "\n\nAdditional kwargs: hold = [True|False] overrides default hold state"
addendum = docstring.Appender(msg, '\n\n')
return lambda func: addendum(docstring.copy_dedent(base)(func))
# This function cannot be generated by boilerplate.py because it may
# return an image or a line.
@_autogen_docstring(Axes.spy)
def spy(Z, precision=0, marker=None, markersize=None, aspect='equal', hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.spy(Z, precision, marker, markersize, aspect, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if isinstance(ret, cm.ScalarMappable):
sci(ret)
return ret
################# REMAINING CONTENT GENERATED BY boilerplate.py ##############
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.acorr)
def acorr(x, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.acorr(x, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.arrow)
def arrow(x, y, dx, dy, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.arrow(x, y, dx, dy, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.axhline)
def axhline(y=0, xmin=0, xmax=1, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.axhline(y=y, xmin=xmin, xmax=xmax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.axhspan)
def axhspan(ymin, ymax, xmin=0, xmax=1, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.axhspan(ymin, ymax, xmin=xmin, xmax=xmax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.axvline)
def axvline(x=0, ymin=0, ymax=1, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.axvline(x=x, ymin=ymin, ymax=ymax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.axvspan)
def axvspan(xmin, xmax, ymin=0, ymax=1, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.axvspan(xmin, xmax, ymin=ymin, ymax=ymax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.bar)
def bar(left, height, width=0.8, bottom=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.bar(left, height, width=width, bottom=bottom, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.barh)
def barh(bottom, width, height=0.8, left=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.barh(bottom, width, height=height, left=left, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.broken_barh)
def broken_barh(xranges, yrange, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.broken_barh(xranges, yrange, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.boxplot)
def boxplot(x, notch=False, sym='b+', vert=True, whis=1.5, positions=None,
widths=None, patch_artist=False, bootstrap=None, usermedians=None,
conf_intervals=None, hold=None):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.boxplot(x, notch=notch, sym=sym, vert=vert, whis=whis,
positions=positions, widths=widths,
patch_artist=patch_artist, bootstrap=bootstrap,
usermedians=usermedians, conf_intervals=conf_intervals)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.cohere)
def cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.cohere(x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.clabel)
def clabel(CS, *args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.clabel(CS, *args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.contour)
def contour(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.contour(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if ret._A is not None: sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.contourf)
def contourf(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.contourf(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if ret._A is not None: sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.csd)
def csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.csd(x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.errorbar)
def errorbar(x, y, yerr=None, xerr=None, fmt='-', ecolor=None, elinewidth=None,
capsize=3, barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1, capthick=None,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.errorbar(x, y, yerr=yerr, xerr=xerr, fmt=fmt, ecolor=ecolor,
elinewidth=elinewidth, capsize=capsize,
barsabove=barsabove, lolims=lolims, uplims=uplims,
xlolims=xlolims, xuplims=xuplims,
errorevery=errorevery, capthick=capthick, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.eventplot)
def eventplot(positions, orientation='horizontal', lineoffsets=1, linelengths=1,
linewidths=None, colors=None, linestyles='solid', hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.eventplot(positions, orientation=orientation,
lineoffsets=lineoffsets, linelengths=linelengths,
linewidths=linewidths, colors=colors,
linestyles=linestyles, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.fill)
def fill(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.fill(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.fill_between)
def fill_between(x, y1, y2=0, where=None, interpolate=False, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.fill_between(x, y1, y2=y2, where=where,
interpolate=interpolate, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.fill_betweenx)
def fill_betweenx(y, x1, x2=0, where=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.fill_betweenx(y, x1, x2=x2, where=where, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.hexbin)
def hexbin(x, y, C=None, gridsize=100, bins=None, xscale='linear',
yscale='linear', extent=None, cmap=None, norm=None, vmin=None,
vmax=None, alpha=None, linewidths=None, edgecolors='none',
reduce_C_function=np.mean, mincnt=None, marginals=False, hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.hexbin(x, y, C=C, gridsize=gridsize, bins=bins, xscale=xscale,
yscale=yscale, extent=extent, cmap=cmap, norm=norm,
vmin=vmin, vmax=vmax, alpha=alpha,
linewidths=linewidths, edgecolors=edgecolors,
reduce_C_function=reduce_C_function, mincnt=mincnt,
marginals=marginals, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.hist)
def hist(x, bins=10, range=None, normed=False, weights=None, cumulative=False,
bottom=None, histtype='bar', align='mid', orientation='vertical',
rwidth=None, log=False, color=None, label=None, stacked=False,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.hist(x, bins=bins, range=range, normed=normed,
weights=weights, cumulative=cumulative, bottom=bottom,
histtype=histtype, align=align, orientation=orientation,
rwidth=rwidth, log=log, color=color, label=label,
stacked=stacked, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.hist2d)
def hist2d(x, y, bins=10, range=None, normed=False, weights=None, cmin=None,
cmax=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.hist2d(x, y, bins=bins, range=range, normed=normed,
weights=weights, cmin=cmin, cmax=cmax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret[-1])
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.hlines)
def hlines(y, xmin, xmax, colors='k', linestyles='solid', label='', hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.hlines(y, xmin, xmax, colors=colors, linestyles=linestyles,
label=label, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.imshow)
def imshow(X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=None,
vmin=None, vmax=None, origin=None, extent=None, shape=None,
filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.imshow(X, cmap=cmap, norm=norm, aspect=aspect,
interpolation=interpolation, alpha=alpha, vmin=vmin,
vmax=vmax, origin=origin, extent=extent, shape=shape,
filternorm=filternorm, filterrad=filterrad,
imlim=imlim, resample=resample, url=url, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.loglog)
def loglog(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.loglog(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.pcolor)
def pcolor(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.pcolor(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.pcolormesh)
def pcolormesh(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.pcolormesh(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.pie)
def pie(x, explode=None, labels=None, colors=None, autopct=None,
pctdistance=0.6, shadow=False, labeldistance=1.1, startangle=None,
radius=None, hold=None):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.pie(x, explode=explode, labels=labels, colors=colors,
autopct=autopct, pctdistance=pctdistance, shadow=shadow,
labeldistance=labeldistance, startangle=startangle,
radius=radius)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.plot)
def plot(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.plot_date)
def plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.plot_date(x, y, fmt=fmt, tz=tz, xdate=xdate, ydate=ydate,
**kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.psd)
def psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.psd(x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.quiver)
def quiver(*args, **kw):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kw.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.quiver(*args, **kw)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.quiverkey)
def quiverkey(*args, **kw):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kw.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.quiverkey(*args, **kw)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.scatter)
def scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None, vmin=None,
vmax=None, alpha=None, linewidths=None, verts=None, hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.scatter(x, y, s=s, c=c, marker=marker, cmap=cmap, norm=norm,
vmin=vmin, vmax=vmax, alpha=alpha,
linewidths=linewidths, verts=verts, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.semilogx)
def semilogx(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.semilogx(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.semilogy)
def semilogy(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.semilogy(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.specgram)
def specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128, cmap=None, xextent=None,
pad_to=None, sides='default', scale_by_freq=None, hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.specgram(x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
window=window, noverlap=noverlap, cmap=cmap,
xextent=xextent, pad_to=pad_to, sides=sides,
scale_by_freq=scale_by_freq, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret[-1])
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.stackplot)
def stackplot(x, *args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.stackplot(x, *args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.stem)
def stem(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.stem(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.step)
def step(x, y, *args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.step(x, y, *args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.streamplot)
def streamplot(x, y, u, v, density=1, linewidth=None, color=None, cmap=None,
norm=None, arrowsize=1, arrowstyle='-|>', minlength=0.1,
transform=None, hold=None):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.streamplot(x, y, u, v, density=density, linewidth=linewidth,
color=color, cmap=cmap, norm=norm,
arrowsize=arrowsize, arrowstyle=arrowstyle,
minlength=minlength, transform=transform)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret.lines)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.tricontour)
def tricontour(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.tricontour(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if ret._A is not None: sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.tricontourf)
def tricontourf(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.tricontourf(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if ret._A is not None: sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.tripcolor)
def tripcolor(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.tripcolor(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.triplot)
def triplot(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.triplot(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.vlines)
def vlines(x, ymin, ymax, colors='k', linestyles='solid', label='', hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.vlines(x, ymin, ymax, colors=colors, linestyles=linestyles,
label=label, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.xcorr)
def xcorr(x, y, normed=True, detrend=mlab.detrend_none, usevlines=True,
maxlags=10, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.xcorr(x, y, normed=normed, detrend=detrend,
usevlines=usevlines, maxlags=maxlags, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.barbs)
def barbs(*args, **kw):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kw.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.barbs(*args, **kw)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.cla)
def cla():
ret = gca().cla()
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.grid)
def grid(b=None, which='major', axis='both', **kwargs):
ret = gca().grid(b=b, which=which, axis=axis, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.legend)
def legend(*args, **kwargs):
ret = gca().legend(*args, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.table)
def table(**kwargs):
ret = gca().table(**kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.text)
def text(x, y, s, fontdict=None, withdash=False, **kwargs):
ret = gca().text(x, y, s, fontdict=fontdict, withdash=withdash, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.annotate)
def annotate(*args, **kwargs):
ret = gca().annotate(*args, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.ticklabel_format)
def ticklabel_format(**kwargs):
ret = gca().ticklabel_format(**kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.locator_params)
def locator_params(axis='both', tight=None, **kwargs):
ret = gca().locator_params(axis=axis, tight=tight, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.tick_params)
def tick_params(axis='both', **kwargs):
ret = gca().tick_params(axis=axis, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.margins)
def margins(*args, **kw):
ret = gca().margins(*args, **kw)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.autoscale)
def autoscale(enable=True, axis='both', tight=None):
ret = gca().autoscale(enable=enable, axis=axis, tight=tight)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def autumn():
'''
set the default colormap to autumn and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='autumn')
im = gci()
if im is not None:
im.set_cmap(cm.autumn)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bone():
'''
set the default colormap to bone and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='bone')
im = gci()
if im is not None:
im.set_cmap(cm.bone)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cool():
'''
set the default colormap to cool and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='cool')
im = gci()
if im is not None:
im.set_cmap(cm.cool)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def copper():
'''
set the default colormap to copper and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='copper')
im = gci()
if im is not None:
im.set_cmap(cm.copper)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def flag():
'''
set the default colormap to flag and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='flag')
im = gci()
if im is not None:
im.set_cmap(cm.flag)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def gray():
'''
set the default colormap to gray and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='gray')
im = gci()
if im is not None:
im.set_cmap(cm.gray)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hot():
'''
set the default colormap to hot and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hot')
im = gci()
if im is not None:
im.set_cmap(cm.hot)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hsv():
'''
set the default colormap to hsv and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hsv')
im = gci()
if im is not None:
im.set_cmap(cm.hsv)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def jet():
'''
set the default colormap to jet and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='jet')
im = gci()
if im is not None:
im.set_cmap(cm.jet)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pink():
'''
set the default colormap to pink and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='pink')
im = gci()
if im is not None:
im.set_cmap(cm.pink)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def prism():
'''
set the default colormap to prism and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='prism')
im = gci()
if im is not None:
im.set_cmap(cm.prism)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spring():
'''
set the default colormap to spring and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spring')
im = gci()
if im is not None:
im.set_cmap(cm.spring)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def summer():
'''
set the default colormap to summer and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='summer')
im = gci()
if im is not None:
im.set_cmap(cm.summer)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def winter():
'''
set the default colormap to winter and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='winter')
im = gci()
if im is not None:
im.set_cmap(cm.winter)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spectral():
'''
set the default colormap to spectral and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spectral')
im = gci()
if im is not None:
im.set_cmap(cm.spectral)
draw_if_interactive()
_setup_pyplot_info_docstrings()
| unlicense |
ebolyen/qiime2 | qiime2/metadata.py | 1 | 11345 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import itertools
import os.path
import sqlite3
import uuid
import pandas as pd
class Metadata:
def __init__(self, dataframe):
# Not using DataFrame.empty because empty columns are allowed.
if dataframe.index.empty:
raise ValueError("Metadata is empty, there must be at least one "
"ID associated with it.")
# `/` and `\0` aren't permitted because they are invalid filename
# characters on *nix filesystems. The remaining values aren't permitted
# because they *could* be misinterpreted by a shell (e.g. `*`, `|`).
illegal_chars = ['/', '\0', '\\', '*', '<', '>', '?', '|', '$']
chars_for_msg = ", ".join("%r" % i for i in illegal_chars)
illegal_chars = set(illegal_chars)
for (axis, label) in [(dataframe.columns, 'category label'),
(dataframe.index, 'index')]:
# First check the axis dtype
if axis.dtype_str not in ['object', 'str']:
msg = "Non-string Metadata %s values detected" % label
raise ValueError(invalid_metadata_template % msg)
# Then check for invalid characters along axis
for value in axis:
if illegal_chars & set(value):
msg = "Invalid characters (e.g. %s) detected in " \
"metadata %s: %r" % (chars_for_msg, label, value)
raise ValueError(invalid_metadata_template % msg)
# Finally, ensure unique values along axis
if len(axis) != len(set(axis)):
msg = "Duplicate Metadata %s values detected" % label
raise ValueError(invalid_metadata_template % msg)
self._dataframe = dataframe
self._artifacts = []
def __repr__(self):
return repr(self._dataframe)
def _repr_html_(self):
return self._dataframe._repr_html_()
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self._artifacts == other._artifacts and
self._dataframe.equals(other._dataframe)
)
def __ne__(self, other):
return not (self == other)
@property
def artifacts(self):
return self._artifacts
@classmethod
def from_artifact(cls, artifact):
"""
Parameters
----------
artifact: qiime2.Artifact
Loaded artifact object.
Returns
-------
qiime2.Metadata
"""
if not artifact.has_metadata():
raise ValueError('Artifact has no metadata.')
md = artifact.view(cls)
md._artifacts.append(artifact)
return md
@classmethod
def load(cls, path):
if not os.path.exists(path):
raise OSError(
"Metadata file %s doesn't exist or isn't accessible (e.g., "
"due to incompatible file permissions)." % path)
read_csv_kwargs = {}
with open(path, 'r') as fh:
peek = fh.readline().rstrip('\n')
if peek.startswith('#SampleID'):
header = peek.split('\t')
read_csv_kwargs = {'header': None, 'names': header}
try:
df = pd.read_csv(path, sep='\t', dtype=object, comment='#',
skip_blank_lines=True, **read_csv_kwargs)
df.set_index(df.columns[0], drop=True, append=False, inplace=True)
except (pd.io.common.CParserError, KeyError):
msg = 'Metadata file format is invalid for file %s' % path
raise ValueError(invalid_metadata_template % msg)
return cls(df)
def merge(self, *others):
"""Merge this ``Metadata`` object with other ``Metadata`` objects.
Returns a new ``Metadata`` object containing the merged contents of
this ``Metadata`` object and `others`. The merge is not in-place and
will always return a **new** merged ``Metadata`` object.
The merge will include only those IDs that are shared across **all**
``Metadata`` objects being merged (i.e. the merge is an *inner join*).
Each metadata category (i.e. column) being merged must be unique;
merging metadata with overlapping categories will result in an error.
Parameters
----------
others : tuple
Zero or more ``Metadata`` objects to merge with this ``Metadata``
object.
Returns
-------
Metadata
New object containing merged metadata. The merged IDs will be in
the same relative order as the IDs in this ``Metadata`` object
after performing the inner join. The merged category order
(i.e. column order) will match the category order of ``Metadata``
objects being merged from left to right.
Notes
-----
The merged metadata object tracks all source artifacts that it was
built from to preserve provenance (i.e. the ``.artifacts`` property
on all ``Metadata`` objects is merged).
"""
dfs = []
columns = []
artifacts = []
for md in itertools.chain([self], others):
df = md._dataframe
dfs.append(df)
columns.extend(df.columns.tolist())
artifacts.extend(md.artifacts)
columns = pd.Index(columns)
if columns.has_duplicates:
raise ValueError(
"Cannot merge metadata with overlapping categories "
"(i.e. overlapping columns). The following categories "
"overlap: %s" %
', '.join([repr(e) for e in columns.get_duplicates()]))
merged_df = dfs[0].join(dfs[1:], how='inner')
# Not using DataFrame.empty because empty columns are allowed.
if merged_df.index.empty:
raise ValueError(
"Cannot merge because there are no IDs shared across metadata "
"objects.")
merged_md = self.__class__(merged_df)
merged_md._artifacts = artifacts
return merged_md
def get_category(self, *names):
if len(names) != 1:
# TODO: Make this work with multiple columns as a single series
raise NotImplementedError("Extracting multiple columns is not yet"
" supported.")
try:
result = MetadataCategory(self._dataframe[names[0]])
except KeyError:
raise KeyError(
'%s is not a category in metadata file. Available '
'categories are %s.' %
(names[0], ', '.join(self._dataframe.columns)))
else:
result._artifacts.extend(self.artifacts)
return result
def to_dataframe(self):
return self._dataframe.copy()
def ids(self, where=None):
"""Retrieve IDs matching search criteria.
Parameters
----------
where : str, optional
SQLite WHERE clause specifying criteria IDs must meet to be
included in the results. All IDs are included by default.
Returns
-------
set
IDs matching search criteria specified in `where`.
"""
if where is None:
return set(self._dataframe.index)
conn = sqlite3.connect(':memory:')
conn.row_factory = lambda cursor, row: row[0]
# If the index isn't named, generate a unique random column name to
# store it under in the SQL table. If we don't supply a column name for
# the unnamed index, pandas will choose the name 'index', and if that
# name conflicts with existing columns, the name will be 'level_0',
# 'level_1', etc. Instead of trying to guess what pandas named the
# index column (since this isn't documented behavior), explicitly
# generate an index column name.
index_column = self._dataframe.index.name
if index_column is None:
index_column = self._generate_column_name()
self._dataframe.to_sql('metadata', conn, index=True,
index_label=index_column)
c = conn.cursor()
# In general we wouldn't want to format our query in this way because
# it leaves us open to sql injection, but it seems acceptable here for
# a few reasons:
# 1) This is a throw-away database which we're just creating to have
# access to the query language, so any malicious behavior wouldn't
# impact any data that isn't temporary
# 2) The substitution syntax recommended in the docs doesn't allow
# us to specify complex `where` statements, which is what we need to
# do here. For example, we need to specify things like:
# WHERE Subject='subject-1' AND SampleType='gut'
# but their qmark/named-style syntaxes only supports substition of
# variables, such as:
# WHERE Subject=?
# 3) sqlite3.Cursor.execute will only execute a single statement so
# inserting multiple statements
# (e.g., "Subject='subject-1'; DROP...") will result in an
# OperationalError being raised.
query = ('SELECT "{0}" FROM metadata WHERE {1} GROUP BY "{0}" '
'ORDER BY "{0}";'.format(index_column, where))
try:
c.execute(query)
except sqlite3.OperationalError:
conn.close()
raise ValueError("Selection of IDs failed with query:\n %s"
% query)
ids = set(c.fetchall())
conn.close()
return ids
def _generate_column_name(self):
"""Generate column name that doesn't clash with current columns."""
while True:
name = str(uuid.uuid4())
if name not in self._dataframe.columns:
return name
class MetadataCategory:
def __init__(self, series):
self._series = series
self._artifacts = []
def __repr__(self):
return repr(self._series)
@classmethod
def load(cls, path, category):
return Metadata.load(path).get_category(category)
def to_series(self):
return self._series.copy()
@property
def artifacts(self):
return self._artifacts
@classmethod
def from_artifact(cls, artifact, category):
"""
Parameters
----------
artifact: qiime2.Artifact
Loaded artifact object.
Returns
-------
qiime.Metadata
"""
return Metadata.from_artifact(artifact).get_category(category)
invalid_metadata_template = "%s. There may be more errors present in this " \
"metadata. Currently only QIIME 1 sample/feature metadata mapping files " \
"are officially supported. Sample metadata files can be validated using " \
"Keemei: http://keemei.qiime.org."
| bsd-3-clause |
SalemAmeen/bayespy | bayespy/demos/stochastic_inference.py | 5 | 5013 | ################################################################################
# Copyright (C) 2015 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Stochastic variational inference on mixture of Gaussians
Stochastic variational inference is a scalable variational Bayesian
learning method which utilizes stochastic gradient. For details, see
:cite:`Hoffman:2013`.
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import bayespy.plot as myplt
from bayespy.utils import misc
from bayespy.utils import random
from bayespy.nodes import Gaussian, Categorical, Mixture, Dirichlet
from bayespy.inference.vmp.vmp import VB
from bayespy.inference.vmp import transformations
import bayespy.plot as bpplt
from bayespy.demos import pca
def run(N=100000, N_batch=50, seed=42, maxiter=100, plot=True):
"""
Run deterministic annealing demo for 1-D Gaussian mixture.
"""
if seed is not None:
np.random.seed(seed)
# Number of clusters in the model
K = 20
# Dimensionality of the data
D = 5
# Generate data
K_true = 10
spread = 5
means = spread * np.random.randn(K_true, D)
z = random.categorical(np.ones(K_true), size=N)
data = np.empty((N,D))
for n in range(N):
data[n] = means[z[n]] + np.random.randn(D)
#
# Standard VB-EM algorithm
#
# Full model
mu = Gaussian(np.zeros(D), np.identity(D),
plates=(K,),
name='means')
alpha = Dirichlet(np.ones(K),
name='class probabilities')
Z = Categorical(alpha,
plates=(N,),
name='classes')
Y = Mixture(Z, Gaussian, mu, np.identity(D),
name='observations')
# Break symmetry with random initialization of the means
mu.initialize_from_random()
# Put the data in
Y.observe(data)
# Run inference
Q = VB(Y, Z, mu, alpha)
Q.save(mu)
Q.update(repeat=maxiter)
if plot:
bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'k-')
max_cputime = np.sum(Q.cputime[~np.isnan(Q.cputime)])
#
# Stochastic variational inference
#
# Construct smaller model (size of the mini-batch)
mu = Gaussian(np.zeros(D), np.identity(D),
plates=(K,),
name='means')
alpha = Dirichlet(np.ones(K),
name='class probabilities')
Z = Categorical(alpha,
plates=(N_batch,),
plates_multiplier=(N/N_batch,),
name='classes')
Y = Mixture(Z, Gaussian, mu, np.identity(D),
name='observations')
# Break symmetry with random initialization of the means
mu.initialize_from_random()
# Inference engine
Q = VB(Y, Z, mu, alpha, autosave_filename=Q.autosave_filename)
Q.load(mu)
# Because using mini-batches, messages need to be multiplied appropriately
print("Stochastic variational inference...")
Q.ignore_bound_checks = True
maxiter *= int(N/N_batch)
delay = 1
forgetting_rate = 0.7
for n in range(maxiter):
# Observe a mini-batch
subset = np.random.choice(N, N_batch)
Y.observe(data[subset,:])
# Learn intermediate variables
Q.update(Z)
# Set step length
step = (n + delay) ** (-forgetting_rate)
# Stochastic gradient for the global variables
Q.gradient_step(mu, alpha, scale=step)
if np.sum(Q.cputime[:n]) > max_cputime:
break
if plot:
bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'r:')
bpplt.pyplot.xlabel('CPU time (in seconds)')
bpplt.pyplot.ylabel('VB lower bound')
bpplt.pyplot.legend(['VB-EM', 'Stochastic inference'], loc='lower right')
bpplt.pyplot.title('VB for Gaussian mixture model')
return
if __name__ == '__main__':
import sys, getopt, os
try:
opts, args = getopt.getopt(sys.argv[1:],
"",
["n=",
"batch=",
"seed=",
"maxiter="])
except getopt.GetoptError:
print('python stochastic_inference.py <options>')
print('--n=<INT> Number of data points')
print('--batch=<INT> Mini-batch size')
print('--maxiter=<INT> Maximum number of VB iterations')
print('--seed=<INT> Seed (integer) for the random number generator')
sys.exit(2)
kwargs = {}
for opt, arg in opts:
if opt == "--maxiter":
kwargs["maxiter"] = int(arg)
elif opt == "--seed":
kwargs["seed"] = int(arg)
elif opt in ("--n",):
kwargs["N"] = int(arg)
elif opt in ("--batch",):
kwargs["N_batch"] = int(arg)
run(**kwargs)
plt.show()
| mit |
EtienneCmb/tensorpac | examples/pac/plot_compare_surrogates.py | 1 | 3213 | """
====================================================
Compare methods to correct PAC for spurious coupling
====================================================
This example illustrates the different implemented methods in order to generate
the distribution of surrogates and then to correct the PAC for spurious
couplings. This includes :
* Swapping phase / amplitude trials (Tort et al. 2010,
:cite:`tort2010measuring`)
* Swapping amplitudes time blocks [RECOMMENDED] (Bahramisharif et al. 2013,
:cite:`bahramisharif2013propagating`, Aru et al. 2015,
:cite:`aru2015untangling`)
* Introducing a time lag on phase series (Canolty et al. 2006,
:cite:`canolty2006high`)
"""
import matplotlib.pyplot as plt
from tensorpac import Pac
from tensorpac.signals import pac_signals_wavelet
###############################################################################
# Simulate artificial coupling
###############################################################################
# first, we generate several trials that contains a coupling between a 6z phase
# and a 90hz amplitude. By default, the returned dataset is organized as
# (n_epochs, n_times) where n_times is the number of time points and n_epochs
# is the number of trials
f_pha = 6 # frequency phase for the coupling
f_amp = 70 # frequency amplitude for the coupling
n_epochs = 20 # number of trials
n_times = 4000 # number of time points
sf = 512. # sampling frequency
data, time = pac_signals_wavelet(sf=sf, f_pha=f_pha, f_amp=f_amp, noise=3.,
n_epochs=n_epochs, n_times=n_times)
###############################################################################
# Extract phases and amplitudes
###############################################################################
# now, we are going to extract all the phases and amplitudes. This is going to
# be useful then because it avoid to do it each time we're going to compute the
# PAC.
# define a :class:`tensorpac.Pac` object and use the MVL as the main method
# for measuring PAC
p = Pac(idpac=(1, 0, 0), f_pha=(3, 10, 1, .2), f_amp=(50, 90, 5, 1),
dcomplex='wavelet', width=12)
# Now, extract all of the phases and amplitudes
phases = p.filter(sf, data, ftype='phase')
amplitudes = p.filter(sf, data, ftype='amplitude')
###############################################################################
# Compute PAC and surrogates
###############################################################################
# now the phases and amplitudes are extracted, we can compute the true PAC such
# as the surrogates. Then the true value of PAC is going to be normalized using
# a z-score normalization and using the distribution of surrogates
plt.figure(figsize=(16, 12))
for i, k in enumerate(range(4)):
# change the pac method
p.idpac = (5, k, 1)
# compute only the pac without filtering
xpac = p.fit(phases, amplitudes, n_perm=20)
# plot
title = p.str_surro.replace(' (', '\n(')
plt.subplot(2, 2, k + 1)
p.comodulogram(xpac.mean(-1), title=title, cmap='Reds', vmin=0,
fz_labels=18, fz_title=20, fz_cblabel=18)
plt.tight_layout()
plt.show()
| bsd-3-clause |
jordancheah/zipline | zipline/assets/futures.py | 15 | 5311 | from pandas import Timestamp, Timedelta
from pandas.tseries.tools import normalize_date
class FutureChain(object):
""" Allows users to look up future contracts.
Parameters
----------
asset_finder : AssetFinder
An AssetFinder for future contract lookups, in particular the
AssetFinder of the TradingAlgorithm instance.
get_datetime : function
A function that returns the simulation datetime, in particular
the get_datetime method of the TradingAlgorithm instance.
root_symbol : str
The root symbol of a future chain.
as_of_date : pandas.Timestamp, optional
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this date is
the primary contract, etc. If not provided, the current
simulation date is used as the as_of_date.
Attributes
----------
root_symbol : str
The root symbol of the future chain.
as_of_date
The current as-of date of this future chain.
Methods
-------
as_of(dt)
offset(time_delta)
Raises
------
RootSymbolNotFound
Raised when the FutureChain is initialized with a root symbol for which
a future chain could not be found.
"""
def __init__(self, asset_finder, get_datetime, root_symbol,
as_of_date=None):
self.root_symbol = root_symbol
# Reference to the algo's AssetFinder for contract lookups
self._asset_finder = asset_finder
# Reference to the algo's get_datetime to know the current dt
self._algorithm_get_datetime = get_datetime
# If an as_of_date is provided, self._as_of_date uses that
# value, otherwise None. This attribute backs the as_of_date property.
if as_of_date:
self._as_of_date = normalize_date(Timestamp(as_of_date, tz='UTC'))
else:
self._as_of_date = None
# Attribute to cache the most up-to-date chain, and the dt when it was
# last updated.
self._current_chain = []
self._last_updated = None
# Get the initial chain, since self._last_updated is None.
self._maybe_update_current_chain()
def __repr__(self):
# NOTE: The string returned cannot be used to instantiate this
# exact FutureChain, since we don't want to display the asset
# finder and get_datetime function to the user.
if self._as_of_date:
return "FutureChain(root_symbol='%s', as_of_date='%s')" % (
self.root_symbol, self.as_of_date)
else:
return "FutureChain(root_symbol='%s')" % self.root_symbol
def _get_datetime(self):
"""
Returns the normalized simulation datetime.
Returns
-------
pandas.Timestamp
The normalized datetime of FutureChain's TradingAlgorithm.
"""
return normalize_date(
Timestamp(self._algorithm_get_datetime(), tz='UTC')
)
@property
def as_of_date(self):
"""
The current as-of date of this future chain.
Returns
-------
pandas.Timestamp
The user-provided as_of_date if given, otherwise the
current datetime of the simulation.
"""
if self._as_of_date is not None:
return self._as_of_date
else:
return self._get_datetime()
def _maybe_update_current_chain(self):
""" Updates the current chain if it's out of date, then returns
it.
Returns
-------
list
The up-to-date current chain, a list of Future objects.
"""
dt = self._get_datetime()
if (self._last_updated is None) or (self._last_updated != dt):
self._current_chain = self._asset_finder.lookup_future_chain(
self.root_symbol,
self.as_of_date,
dt
)
self._last_updated = dt
return self._current_chain
def __getitem__(self, key):
return self._maybe_update_current_chain()[key]
def __len__(self):
return len(self._maybe_update_current_chain())
def __iter__(self):
return iter(self._maybe_update_current_chain())
def as_of(self, dt):
""" Get the future chain for this root symbol as of a specific date.
Parameters
----------
dt : datetime.datetime or pandas.Timestamp or str, optional
The as_of_date for the new chain.
Returns
-------
FutureChain
"""
return FutureChain(
asset_finder=self._asset_finder,
get_datetime=self._algorithm_get_datetime,
root_symbol=self.root_symbol,
as_of_date=dt
)
def offset(self, time_delta):
""" Get the future chain for this root symbol with a given
offset from the current as_of_date.
Parameters
----------
time_delta : datetime.timedelta or pandas.Timedelta or str
The offset from the current as_of_date for the new chain.
Returns
-------
FutureChain
"""
return self.as_of(self.as_of_date + Timedelta(time_delta))
| apache-2.0 |
SpatialMetabolomics/ims-simulator | ims_simulator/collectStats.py | 1 | 2783 | #!/usr/bin/env python
from pyimzml.ImzMLParser import ImzMLParser
import numpy as np
def statistics(imzml):
"""
Returns a dictionary:
1) 'sparsity':
Histogram of m/z differences between neighboring centroids in each spectrum.
2) 'intensity':
Histogram of centroid intensities
3) 'min_intensity'
Array of minimum centroid intensities for each spectrum
"""
sparsity_hist_bins = np.linspace(-4, 1, 250)
sparsity_hist = np.zeros(sparsity_hist_bins.shape[0] - 1, dtype=int)
intensity_hist_bins = np.linspace(0, 10, 250)
intensity_hist = np.zeros(intensity_hist_bins.shape[0] - 1, dtype=int)
min_intensities = []
for i, coords in enumerate(imzml.coordinates):
mzs, intensities = imzml.getspectrum(i)
sparsity_hist += np.histogram(np.log10(np.diff(mzs)),
sparsity_hist_bins)[0]
intensity_hist += np.histogram(np.log10(intensities),
intensity_hist_bins)[0]
min_intensities.append(min(intensities))
return {
'sparsity': [sparsity_hist, sparsity_hist_bins],
'intensity': [intensity_hist, intensity_hist_bins],
'min_intensity': min_intensities
}
def plotHistograms(stats_real, stats_sim, key, colors=['b', 'g']):
"""
Plot statistic distribution for real and simulated data on the same plot.
Key must be one of 'sparsityHist', 'intensityHist'.
First two arguments are the script-produced stats loaded via np.load
"""
xlabels = {
'sparsityHist': "log10(m/z difference between neighboring peaks)",
'intensityHist': 'log10(centroid intensity)'
}
import matplotlib.pyplot as plt
h0 = stats_real[key]
h1 = stats_sim[key]
plt.figure(figsize=(12, 6))
mzs = h0[1][:-1]
plt.fill_between(mzs, h0[0], color=colors[0], alpha=0.5, label='Real')
plt.fill_between(mzs, h1[0], color=colors[1], alpha=0.5, label='Simulated')
plt.xlabel(xlabels[key])
plt.ylabel("Peak counts")
plt.legend()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="collect statistics about IMS data")
parser.add_argument('input', type=str, help="input file in .imzML format")
parser.add_argument('output', type=str, help="output file (numpy-readable)")
args = parser.parse_args()
imzml = ImzMLParser(args.input)
stats = statistics(imzml)
sparsityHist = stats['sparsity']
intensityHist = stats['intensity']
with open(args.output, "w+") as f:
np.savez_compressed(f,
sparsityHist=sparsityHist,
intensityHist=intensityHist,
minIntensities=stats['min_intensity'])
| apache-2.0 |
gfyoung/pandas | pandas/tests/indexes/base_class/test_constructors.py | 2 | 1495 | import numpy as np
import pytest
from pandas import Index, MultiIndex
import pandas._testing as tm
class TestIndexConstructor:
# Tests for the Index constructor, specifically for cases that do
# not return a subclass
@pytest.mark.parametrize("value", [1, np.int64(1)])
def test_constructor_corner(self, value):
# corner case
msg = (
r"Index\(\.\.\.\) must be called with a collection of some "
f"kind, {value} was passed"
)
with pytest.raises(TypeError, match=msg):
Index(value)
@pytest.mark.parametrize("index_vals", [[("A", 1), "B"], ["B", ("A", 1)]])
def test_construction_list_mixed_tuples(self, index_vals):
# see gh-10697: if we are constructing from a mixed list of tuples,
# make sure that we are independent of the sorting order.
index = Index(index_vals)
assert isinstance(index, Index)
assert not isinstance(index, MultiIndex)
def test_constructor_wrong_kwargs(self):
# GH #19348
with pytest.raises(TypeError, match="Unexpected keyword arguments {'foo'}"):
with tm.assert_produces_warning(FutureWarning):
Index([], foo="bar")
@pytest.mark.xfail(reason="see GH#21311: Index doesn't enforce dtype argument")
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Index(["a", "b", "c"], dtype=float)
| bsd-3-clause |
jobovy/tact | aa/genfunc/binney_tremaine.py | 1 | 4925 | import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from matplotlib.ticker import MaxNLocator
import matplotlib.cm as cm
from genfunc_3d import find_actions, check_angle_solution
import test_potentials as pot
class logarithmic(object):
def __init__(self):
self.qz2 = 0.7**2
self.qy2 = 0.9**2
def pot(self,x):
return 0.5*np.log(x[0]**2+x[1]**2/self.qy2+x[2]**2/self.qz2+0.1)
def H(self,x):
return 0.5*(x[3]**2+x[4]**2+x[5]**2)+self.pot(x[:3])
def tot_force(self,x,y,z):
p = (x**2+y**2/self.qy2+z**2/self.qz2+0.1)
return -np.array([x,y/self.qy2,z/self.qz2])/p
def find_x(self,y,z):
return np.sqrt(np.exp(1.)-(y**2/self.qy2+z**2/self.qz2+0.1))
from numpy.fft import fft
def second_diff(x):
sd = np.ones(len(x)-1,dtype=complex)
sd[1:] = x[2:]+x[:-2]-2.*x[1:-1]
sd[0] = 2.*(x[1]-x[0])
return sd
def find_freqs(timeseries,results):
freq = np.ones(3)
n = len(timeseries)
frq = np.arange(n)/timeseries[-1]
frq = frq[range(n/2)]
for i in range(3):
X = fft(results.T[i])[range(n/2)]
X[0] = np.real(X[0])
X[-1] = np.real(X[-1])
sd = second_diff(X)
peak = np.argmax(np.abs(X))
while(peak==0):
print "H"
X[0]+=sd[0]*np.pi/n
sd = second_diff(X)
peak = np.argmax(np.abs(X))
sm,s,sp = sd[peak-1:peak+2]
xm = -np.real((s+2.*sm)/(s-sm))
xp = np.real((s+2.*sp)/(s-sp))
x = .5*(xp+xm)
freq[i]=(peak-x)*2.*np.pi/timeseries[-1]
return freq
def plot_spectrum(timeseries,x,axis=None,xlabel="",ylabel=""):
n = len(timeseries) # length of the signal
k = np.arange(n)
T = timeseries[-1]
frq = k/T # two sides frequency range
frq = frq[range(n/2)] # one side frequency range
X = fft(x)/n # fft computing and normalization
X = X[range(n/2)]
print(np.fft.fftfreq(timeseries.shape[-1]))
print(frq[1])
if(axis==None):
plt.plot(frq,np.abs(X))
plt.show()
else:
axis.plot(frq,np.abs(X),'k')
axis.set_xlabel(xlabel)
axis.set_ylabel(ylabel)
def plots(timeseries,results):
fig,axi = plt.subplots(2,3,figsize=[6.64,6.64/1.6])
plt.subplots_adjust(wspace=0.4,hspace=0.25)
axi[0][0].plot(results.T[0],results.T[1],'k')
axi[0][0].set_xlabel(r'$x$')
axi[0][0].set_ylabel(r'$y$')
axi[0][1].plot(results.T[0],results.T[2],'k')
axi[0][1].set_xlabel(r'$x$')
axi[0][1].set_ylabel(r'$z$')
axi[0][2].plot(results.T[1],results.T[2],'k')
axi[0][2].set_xlabel(r'$y$')
axi[0][2].set_ylabel(r'$z$')
plot_spectrum(timeseries,results.T[0],axi[1][0],xlabel=r'Freq/$2\pi$',ylabel=r'$|$FT($x$)$|$')
for i in range(3):
axi[1][i].set_xlim(0.,0.4)
axi[1][i].xaxis.set_major_locator(MaxNLocator(5))
axi[0][i].xaxis.set_major_locator(MaxNLocator(5))
plot_spectrum(timeseries,results.T[1],axi[1][1],xlabel=r'Freq/$2\pi$',ylabel=r'$|$FT($y$)$|$')
plot_spectrum(timeseries,results.T[2],axi[1][2],xlabel=r'Freq/$2\pi$',ylabel=r'$|$FT($z$)$|$')
plt.savefig("../../Documents/thesis/genfunc_aa/nonchaotic_spectrum.pdf",bbox_inches='tight')
plt.clf()
from genfunc_3d import eval_mean_error_functions, check_angle_solution as ctas
def print_freqs():
log = logarithmic()
timeseries = np.linspace(0.,500.,2048)
ntheta,nphi = 30,30
for i in np.arange(4.,ntheta+1)/(ntheta+1):
for j in .5*np.pi*np.arange(15.,nphi+1)/(nphi+1):
# if(i>0.9 and j>np.pi*0.45):
# if(j>np.pi/4.):
# if(i>0.59 and j>np.pi/2.*0.9):
print np.arccos(i),j
st,ct = np.sin(np.arccos(i)),i
sp,cp = np.sin(j),np.cos(j)
combo = cp*cp*st*st+sp*sp*st*st/log.qy2+ct*ct/log.qz2
r = np.sqrt((np.exp(1.)-0.1)/combo)
initial = np.array([r*cp*st,r*sp*st,r*ct,0.0001,0.0001,0.0001])
# initial = np.array([0.111937987197,0.0104758765442,1.12993449025,0.0001,0.0001,0.0001])
results = odeint(pot.orbit_derivs2,initial,timeseries,args=(log,),rtol=1e-5,atol=1e-5)
# print(log.H(initial),log.H(results[-1]))
plots(timeseries,results)
# freq = find_freqs(timeseries,results)
L = find_actions(results, timeseries, N_matrix = 4, ifloop=True,ifprint = False)
# if(L==None):
# break
(act,ang,n_vec,toy_aa,para),loop = L
E = eval_mean_error_functions(act,ang,n_vec,toy_aa,timeseries,withplot=False)/np.std(timeseries)
# ctas(ang,n_vec,toy_aa,timeseries)
# print freq[0],freq[1],freq[2]
print ang[3],ang[4],ang[5],initial[0],initial[1],initial[2],act[0],act[1],act[2] #,E[0],E[1],E[2],E[3],E[4],E[5] #,freq[0],freq[1],freq[2]
exit()
def plot_freqs():
plt.figure(figsize=[3.32,3.6])
g = np.genfromtxt("with_fft")
omega21 = map(lambda i,j: np.abs(j)/i*2. if i>1.2 else j/i,g.T[0],g.T[1])
omega31 = map(lambda i,j: j/i*2. if
i>1.2 else j/i,g.T[0],g.T[2])
# G = np.sqrt(g.T[12]**2) #+g.T[13]**2+g.T[14]**2)
# C = G/np.max(G)
plt.scatter(omega21,omega31,c='k',marker ='.',s=1.,edgecolors="none")
plt.xlabel(r'$\Omega_2/\Omega_1$')
plt.ylabel(r'$\Omega_3/\Omega_1$')
plt.ylim(1.1,2.1)
plt.savefig('binney_tremaine_fig345.pdf')
print_freqs()
# plot_freqs()
| gpl-3.0 |
olologin/scikit-learn | sklearn/neighbors/graph.py | 14 | 6609 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self):
"""Return the query based on include_self param"""
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default=False.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
HarllanAndrye/nilmtk | nilmtk/results.py | 6 | 7403 | import abc
import pandas as pd
import copy
from .timeframe import TimeFrame
from nilmtk.utils import get_tz, tz_localize_naive
class Results(object):
"""Stats results from each node need to be assigned to a specific
class so we know how to combine results from multiple chunks. For
example, Energy can be simply summed; while dropout rate should be
averaged, and gaps need to be merged across chunk boundaries. Results
objects contain a DataFrame, the index of which is the start timestamp for
which the results are valid; the first column ('end') is the end
timestamp for which the results are valid. Other columns are accumulators
for the results.
Attributes
----------
_data : DataFrame
Index is period start.
Columns are: `end` and any columns for internal storage of stats.
Static Attributes
-----------------
name : str
The string used to cache this results object.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
self._data = pd.DataFrame(columns=['end'])
def combined(self):
"""Return all results from each chunk combined. Either return single
float for all periods or a dict where necessary, e.g. if
calculating Energy for a meter which records both apparent
power and active power then get active power with
energyresults.combined['active']
"""
return self._data[self._columns_with_end_removed()].sum()
def per_period(self):
"""return a DataFrame. Index is period start.
Columns are: end and <stat name>
"""
return copy.deepcopy(self._data)
def simple(self):
"""Returns the simplest representation of the results."""
return self.combined()
def append(self, timeframe, new_results):
"""Append a single result.
Parameters
----------
timeframe : nilmtk.TimeFrame
new_results : dict
"""
if not isinstance(timeframe, TimeFrame):
raise TypeError("`timeframe` must be of type 'nilmtk.TimeFrame',"
" not '{}' type.".format(type(timeframe)))
if not isinstance(new_results, dict):
raise TypeError("`new_results` must of a dict, not '{}' type."
.format(type(new_results)))
# check that there is no overlap
for index, series in self._data.iterrows():
tf = TimeFrame(index, series['end'])
tf.check_for_overlap(timeframe)
row = pd.DataFrame(index=[timeframe.start],
columns=['end'] + new_results.keys())
row['end'] = timeframe.end
for key, val in new_results.iteritems():
row[key] = val
self._data = self._data.append(row, verify_integrity=True)
self._data.sort_index(inplace=True)
def check_for_overlap(self):
# TODO this could be made much faster
n = len(self._data)
index = self._data.index
for i in range(n):
row1 = self._data.iloc[i]
tf1 = TimeFrame(index[i], row1['end'])
for j in range(i+1, n):
row2 = self._data.iloc[j]
tf2 = TimeFrame(index[j], row2['end'])
tf1.check_for_overlap(tf2)
def update(self, new_result):
"""Add results from a new chunk.
Parameters
----------
new_result : Results subclass (same
class as self) from new chunk of data.
"""
if not isinstance(new_result, self.__class__):
raise TypeError("new_results must be of type '{}'"
.format(self.__class__))
if new_result._data.empty:
return
self._data = self._data.append(new_result._data)
self._data.sort_index(inplace=True)
self.check_for_overlap()
def unify(self, other):
"""Take results from another table of data (another physical meter)
and merge those results into self. For example, if we have a dual-split
mains supply then we want to merge the results from each physical meter.
The two sets of results must be for exactly the same timeframes.
Parameters
----------
other : Results subclass (same class as self).
Results calculated from another table of data.
"""
assert isinstance(other, self.__class__)
for i, row in self._data.iterrows():
if (other._data['end'].loc[i] != row['end'] or
i not in other._data.index):
raise RuntimeError("The sections we are trying to merge"
" do not have the same end times so we"
" cannot merge them.")
def import_from_cache(self, cached_stat, sections):
"""
Parameters
----------
cached_stat : DataFrame of cached data
sections : list of nilmtk.TimeFrame objects
describing the sections we want to load stats for.
"""
if cached_stat.empty:
return
tz = get_tz(cached_stat)
usable_sections_from_cache = []
def append_row(row, section):
row = row.astype(object)
# We stripped off the timezone when exporting to cache
# so now we must put the timezone back.
row['end'] = tz_localize_naive(row['end'], tz)
if row['end'] == section.end:
usable_sections_from_cache.append(row)
for section in sections:
if not section:
continue
try:
rows_matching_start = cached_stat.loc[section.start]
except KeyError:
pass
else:
if isinstance(rows_matching_start, pd.Series):
append_row(rows_matching_start, section)
else:
for row_i in range(rows_matching_start.shape[0]):
row = rows_matching_start.iloc[row_i]
append_row(row, section)
self._data = pd.DataFrame(usable_sections_from_cache)
self._data.sort_index(inplace=True)
def export_to_cache(self):
"""
Returns
-------
pd.DataFrame
Notes
-----
Objects are converted using `DataFrame.convert_objects()`.
The reason for doing this is to strip out the timezone
information from data columns. We have to do this otherwise
Pandas complains if we try to put a column with multiple
timezones (e.g. Europe/London across a daylight saving
boundary).
"""
return self._data.convert_objects()
def timeframes(self):
"""Returns a list of timeframes covered by this Result."""
# For some reason, using `iterrows()` messes with the
# timezone of the index, hence we need to 'manually' iterate
# over the rows.
return [TimeFrame(self._data.index[i], self._data.iloc[i]['end'])
for i in range(len(self._data))]
def _columns_with_end_removed(self):
cols = set(self._data.columns)
if len(cols) > 0:
cols.remove('end')
cols = list(cols)
return cols
def __repr__(self):
return str(self._data)
| apache-2.0 |
alexeyum/scikit-learn | sklearn/datasets/tests/test_lfw.py | 55 | 7877 | """This test for the LFW require medium-size data downloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
ltiao/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 6 | 9933 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/io/json/test_ujson.py | 2 | 38511 | # -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import calendar
import datetime
import decimal
from functools import partial
import locale
import math
import re
import time
import dateutil
import numpy as np
import pytest
import pytz
import pandas._libs.json as ujson
from pandas._libs.tslib import Timestamp
import pandas.compat as compat
from pandas.compat import StringIO, range, u
from pandas import DataFrame, DatetimeIndex, Index, NaT, Series, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key: u("value1")}
assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))
@pytest.mark.parametrize("double_input", [
math.pi,
-math.pi # Should work with negatives too.
])
def test_encode_double_conversion(self, double_input):
output = ujson.encode(double_input)
assert round(double_input, 5) == round(json.loads(output), 5)
assert round(double_input, 5) == round(ujson.decode(output), 5)
def test_encode_with_decimal(self):
decimal_input = 1.0
output = ujson.encode(decimal_input)
assert output == "1.0"
def test_encode_array_of_nested_arrays(self):
nested_input = [[[[]]]] * 20
output = ujson.encode(nested_input)
assert nested_input == json.loads(output)
assert nested_input == ujson.decode(output)
nested_input = np.array(nested_input)
tm.assert_numpy_array_equal(nested_input, ujson.decode(
output, numpy=True, dtype=nested_input.dtype))
def test_encode_array_of_doubles(self):
doubles_input = [31337.31337, 31337.31337,
31337.31337, 31337.31337] * 10
output = ujson.encode(doubles_input)
assert doubles_input == json.loads(output)
assert doubles_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(doubles_input),
ujson.decode(output, numpy=True))
def test_double_precision(self):
double_input = 30.012345678901234
output = ujson.encode(double_input, double_precision=15)
assert double_input == json.loads(output)
assert double_input == ujson.decode(output)
for double_precision in (3, 9):
output = ujson.encode(double_input,
double_precision=double_precision)
rounded_input = round(double_input, double_precision)
assert rounded_input == json.loads(output)
assert rounded_input == ujson.decode(output)
@pytest.mark.parametrize("invalid_val", [
20, -1, "9", None
])
def test_invalid_double_precision(self, invalid_val):
double_input = 30.12345678901234567890
expected_exception = (ValueError if isinstance(invalid_val, int)
else TypeError)
with pytest.raises(expected_exception):
ujson.encode(double_input, double_precision=invalid_val)
def test_encode_string_conversion2(self):
string_input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(string_input)
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
@pytest.mark.parametrize("unicode_input", [
"Räksmörgås اسامة بن محمد بن عوض بن لادن",
"\xe6\x97\xa5\xd1\x88"
])
def test_encode_unicode_conversion(self, unicode_input):
enc = ujson.encode(unicode_input)
dec = ujson.decode(enc)
assert enc == json_unicode(unicode_input)
assert dec == json.loads(enc)
def test_encode_control_escaping(self):
escaped_input = "\x19"
enc = ujson.encode(escaped_input)
dec = ujson.decode(enc)
assert escaped_input == dec
assert enc == json_unicode(escaped_input)
def test_encode_unicode_surrogate_pair(self):
surrogate_input = "\xf0\x90\x8d\x86"
enc = ujson.encode(surrogate_input)
dec = ujson.decode(enc)
assert enc == json_unicode(surrogate_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8(self):
four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8highest(self):
four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_array_in_array(self):
arr_in_arr_input = [[[[]]]]
output = ujson.encode(arr_in_arr_input)
assert arr_in_arr_input == json.loads(output)
assert output == json.dumps(arr_in_arr_input)
assert arr_in_arr_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(arr_in_arr_input),
ujson.decode(output, numpy=True))
@pytest.mark.parametrize("num_input", [
31337,
-31337, # Negative number.
-9223372036854775808 # Large negative number.
])
def test_encode_num_conversion(self, num_input):
output = ujson.encode(num_input)
assert num_input == json.loads(output)
assert output == json.dumps(num_input)
assert num_input == ujson.decode(output)
def test_encode_list_conversion(self):
list_input = [1, 2, 3, 4]
output = ujson.encode(list_input)
assert list_input == json.loads(output)
assert list_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(list_input),
ujson.decode(output, numpy=True))
def test_encode_dict_conversion(self):
dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = ujson.encode(dict_input)
assert dict_input == json.loads(output)
assert dict_input == ujson.decode(output)
@pytest.mark.parametrize("builtin_value", [None, True, False])
def test_encode_builtin_values_conversion(self, builtin_value):
output = ujson.encode(builtin_value)
assert builtin_value == json.loads(output)
assert output == json.dumps(builtin_value)
assert builtin_value == ujson.decode(output)
def test_encode_datetime_conversion(self):
datetime_input = datetime.datetime.fromtimestamp(time.time())
output = ujson.encode(datetime_input, date_unit="s")
expected = calendar.timegm(datetime_input.utctimetuple())
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encode_date_conversion(self):
date_input = datetime.date.fromtimestamp(time.time())
output = ujson.encode(date_input, date_unit="s")
tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)
expected = calendar.timegm(tup)
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
@pytest.mark.parametrize("test", [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
])
def test_encode_time_conversion_basic(self, test):
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_pytz(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_dateutil(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
@pytest.mark.parametrize("decoded_input", [
NaT,
np.datetime64("NaT"),
np.nan,
np.inf,
-np.inf
])
def test_encode_as_null(self, decoded_input):
assert ujson.encode(decoded_input) == "null", "Expected null"
def test_datetime_units(self):
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
assert roundtrip == stamp.value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
assert roundtrip == stamp.value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
assert roundtrip == stamp.value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
assert roundtrip == stamp.value
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ujson.encode(val, date_unit='foo')
def test_encode_to_utf8(self):
unencoded = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(unencoded, ensure_ascii=False)
dec = ujson.decode(enc)
assert enc == json_unicode(unencoded, ensure_ascii=False)
assert dec == json.loads(enc)
def test_decode_from_unicode(self):
unicode_input = u("{\"obj\": 31337}")
dec1 = ujson.decode(unicode_input)
dec2 = ujson.decode(str(unicode_input))
assert dec1 == dec2
def test_encode_recursion_max(self):
# 8 is the max recursion depth
class O2(object):
member = 0
pass
class O1(object):
member = 0
pass
decoded_input = O1()
decoded_input.member = O2()
decoded_input.member.member = decoded_input
with pytest.raises(OverflowError):
ujson.encode(decoded_input)
def test_decode_jibberish(self):
jibberish = "fdsa sda v9sa fdsa"
with pytest.raises(ValueError):
ujson.decode(jibberish)
@pytest.mark.parametrize("broken_json", [
"[", # Broken array start.
"{", # Broken object start.
"]", # Broken array end.
"}", # Broken object end.
])
def test_decode_broken_json(self, broken_json):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("too_big_char", [
"[",
"{",
])
def test_decode_depth_too_big(self, too_big_char):
with pytest.raises(ValueError):
ujson.decode(too_big_char * (1024 * 1024))
@pytest.mark.parametrize("bad_string", [
"\"TESTING", # Unterminated.
"\"TESTING\\\"", # Unterminated escape.
"tru", # Broken True.
"fa", # Broken False.
"n", # Broken None.
])
def test_decode_bad_string(self, bad_string):
with pytest.raises(ValueError):
ujson.decode(bad_string)
@pytest.mark.parametrize("broken_json", [
'{{1337:""}}',
'{{"key":"}',
'[[[true',
])
def test_decode_broken_json_leak(self, broken_json):
for _ in range(1000):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("invalid_dict", [
"{{{{31337}}}}", # No key.
"{{{{\"key\":}}}}", # No value.
"{{{{\"key\"}}}}", # No colon or value.
])
def test_decode_invalid_dict(self, invalid_dict):
with pytest.raises(ValueError):
ujson.decode(invalid_dict)
@pytest.mark.parametrize("numeric_int_as_str", [
"31337", "-31337" # Should work with negatives.
])
def test_decode_numeric_int(self, numeric_int_as_str):
assert int(numeric_int_as_str) == ujson.decode(numeric_int_as_str)
@pytest.mark.skipif(compat.PY3, reason="only PY2")
def test_encode_unicode_4bytes_utf8_fail(self):
with pytest.raises(OverflowError):
ujson.encode("\xfd\xbf\xbf\xbf\xbf\xbf")
def test_encode_null_character(self):
wrapped_input = "31337 \x00 1337"
output = ujson.encode(wrapped_input)
assert wrapped_input == json.loads(output)
assert output == json.dumps(wrapped_input)
assert wrapped_input == ujson.decode(output)
alone_input = "\x00"
output = ujson.encode(alone_input)
assert alone_input == json.loads(output)
assert output == json.dumps(alone_input)
assert alone_input == ujson.decode(output)
assert '" \\u0000\\r\\n "' == ujson.dumps(u(" \u0000\r\n "))
def test_decode_null_character(self):
wrapped_input = "\"31337 \\u0000 31337\""
assert ujson.decode(wrapped_input) == json.loads(wrapped_input)
def test_encode_list_long_conversion(self):
long_input = [9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807]
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert long_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(long_input),
ujson.decode(output, numpy=True,
dtype=np.int64))
def test_encode_long_conversion(self):
long_input = 9223372036854775807
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert output == json.dumps(long_input)
assert long_input == ujson.decode(output)
@pytest.mark.parametrize("int_exp", [
"1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"
])
def test_decode_numeric_int_exp(self, int_exp):
assert ujson.decode(int_exp) == json.loads(int_exp)
def test_dump_to_file(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.getvalue()
def test_dump_to_file_like(self):
class FileLike(object):
def __init__(self):
self.bytes = ''
def write(self, data_bytes):
self.bytes += data_bytes
f = FileLike()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.bytes
def test_dump_file_args_error(self):
with pytest.raises(TypeError):
ujson.dump([], "")
def test_load_file(self):
data = "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = StringIO(data)
assert exp_data == ujson.load(f)
f = StringIO(data)
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_like(self):
class FileLike(object):
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = FileLike()
assert exp_data == ujson.load(f)
f = FileLike()
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_args_error(self):
with pytest.raises(TypeError):
ujson.load("[]")
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encode_numeric_overflow(self):
with pytest.raises(OverflowError):
ujson.encode(12839128391289382193812939)
def test_encode_numeric_overflow_nested(self):
class Nested(object):
x = 12839128391289382193812939
for _ in range(0, 100):
with pytest.raises(OverflowError):
ujson.encode(Nested())
@pytest.mark.parametrize("val", [
3590016419, 2**31, 2**32, (2**32) - 1
])
def test_decode_number_with_32bit_sign_bit(self, val):
# Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
doc = '{{"id": {val}}}'.format(val=val)
assert ujson.decode(doc)["id"] == val
def test_encode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
escape_input = base * 1024 * 1024 * 2
ujson.encode(escape_input)
def test_decode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
quote = compat.str_to_bytes("\"")
escape_input = quote + (base * 1024 * 1024 * 2) + quote
ujson.decode(escape_input)
def test_to_dict(self):
d = {u("key"): 31337}
class DictTest(object):
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
assert dec == d
def test_default_handler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
msg = "Maximum recursion level reached"
with pytest.raises(OverflowError, match=msg):
ujson.encode(_TestObject("foo"))
assert '"foo"' == ujson.encode(_TestObject("foo"),
default_handler=str)
def my_handler(_):
return "foobar"
assert '"foobar"' == ujson.encode(_TestObject("foo"),
default_handler=my_handler)
def my_handler_raises(_):
raise TypeError("I raise for anything")
with pytest.raises(TypeError, match="I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(_):
return 42
assert ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_int_handler)) == 42
def my_obj_handler(_):
return datetime.datetime(2013, 2, 3)
assert (ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))) ==
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
obj_list = [_TestObject("foo"), _TestObject("bar")]
assert (json.loads(json.dumps(obj_list, default=str)) ==
ujson.decode(ujson.encode(obj_list, default_handler=str)))
class TestNumpyJSONTests(object):
@pytest.mark.parametrize("bool_input", [True, False])
def test_bool(self, bool_input):
b = np.bool(bool_input)
assert ujson.decode(ujson.encode(b)) == b
def test_bool_array(self):
bool_array = np.array([
True, False, True, True,
False, True, False, False], dtype=np.bool)
output = np.array(ujson.decode(
ujson.encode(bool_array)), dtype=np.bool)
tm.assert_numpy_array_equal(bool_array, output)
def test_int(self, any_int_dtype):
klass = np.dtype(any_int_dtype).type
num = klass(1)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_int_array(self, any_int_dtype):
arr = np.arange(100, dtype=np.int)
arr_input = arr.astype(any_int_dtype)
arr_output = np.array(ujson.decode(ujson.encode(arr_input)),
dtype=any_int_dtype)
tm.assert_numpy_array_equal(arr_input, arr_output)
def test_int_max(self, any_int_dtype):
if any_int_dtype in ("int64", "uint64") and compat.is_platform_32bit():
pytest.skip("Cannot test 64-bit integer on 32-bit platform")
klass = np.dtype(any_int_dtype).type
# uint64 max will always overflow,
# as it's encoded to signed.
if any_int_dtype == "uint64":
num = np.iinfo("int64").max
else:
num = np.iinfo(any_int_dtype).max
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(256.2013)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float_array(self, float_dtype):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
float_input = arr.astype(float_dtype)
float_output = np.array(ujson.decode(
ujson.encode(float_input, double_precision=15)),
dtype=float_dtype)
tm.assert_almost_equal(float_input, float_output)
def test_float_max(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(np.finfo(float_dtype).max / 10)
tm.assert_almost_equal(klass(ujson.decode(
ujson.encode(num, double_precision=15))), num)
def test_array_basic(self):
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
@pytest.mark.parametrize("shape", [
(10, 10),
(5, 5, 4),
(100, 1),
])
def test_array_reshaped(self, shape):
arr = np.arange(100)
arr = arr.reshape(shape)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
def test_array_list(self):
arr_list = ["a", list(), dict(), dict(), list(),
42, 97.8, ["a", "b"], {"key": "val"}]
arr = np.array(arr_list)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
def test_array_float(self):
dtype = np.float32
arr = np.arange(100.202, 200.202, 1, dtype=dtype)
arr = arr.reshape((5, 5, 4))
arr_out = np.array(ujson.decode(ujson.encode(arr)), dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
arr_out = ujson.decode(ujson.encode(arr), numpy=True, dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
def test_0d_array(self):
with pytest.raises(TypeError):
ujson.encode(np.array(1))
@pytest.mark.parametrize("bad_input,exc_type,kwargs", [
([{}, []], ValueError, {}),
([42, None], TypeError, {}),
([["a"], 42], ValueError, {}),
([42, {}, "a"], TypeError, {}),
([42, ["a"], 42], ValueError, {}),
(["a", "b", [], "c"], ValueError, {}),
([{"a": "b"}], ValueError, dict(labelled=True)),
({"a": {"b": {"c": 42}}}, ValueError, dict(labelled=True)),
([{"a": 42, "b": 23}, {"c": 17}], ValueError, dict(labelled=True))
])
def test_array_numpy_except(self, bad_input, exc_type, kwargs):
with pytest.raises(exc_type):
ujson.decode(ujson.dumps(bad_input), numpy=True, **kwargs)
def test_array_numpy_labelled(self):
labelled_input = {"a": []}
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.empty((1, 0)) == output[0]).all()
assert (np.array(["a"]) == output[1]).all()
assert output[2] is None
labelled_input = [{"a": 42}]
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.array([u("a")]) == output[2]).all()
assert (np.array([42]) == output[0]).all()
assert output[1] is None
# see gh-10837: write out the dump explicitly
# so there is no dependency on iteration order
input_dumps = ('[{"a": 42, "b":31}, {"a": 24, "c": 99}, '
'{"a": 2.4, "b": 78}]')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert output[1] is None
assert (np.array([u("a"), "b"]) == output[2]).all()
input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, '
'"3": {"a": 2.4, "b": 78}}')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert (np.array(["1", "2", "3"]) == output[1]).all()
assert (np.array(["a", "b"]) == output[2]).all()
class TestPandasJSONTests(object):
def test_dataframe(self, orient, numpy):
if orient == "records" and numpy:
pytest.skip("Not idiomatic pandas")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(df, **encode_kwargs),
**decode_kwargs)
# Ensure proper DataFrame initialization.
if orient == "split":
dec = _clean_dict(output)
output = DataFrame(**dec)
else:
output = DataFrame(output)
# Corrections to enable DataFrame comparison.
if orient == "values":
df.columns = [0, 1, 2]
df.index = [0, 1]
elif orient == "records":
df.index = [0, 1]
elif orient == "index":
df = df.transpose()
tm.assert_frame_equal(output, df, check_dtype=False)
def test_dataframe_nested(self, orient):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
nested = {"df1": df, "df2": df.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"df1": ujson.decode(ujson.encode(df, **kwargs)),
"df2": ujson.decode(ujson.encode(df, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_dataframe_numpy_labelled(self, orient):
if orient in ("split", "values"):
pytest.skip("Incompatible with labelled=True")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"], dtype=np.int)
kwargs = {} if orient is None else dict(orient=orient)
output = DataFrame(*ujson.decode(ujson.encode(df, **kwargs),
numpy=True, labelled=True))
if orient is None:
df = df.T
elif orient == "records":
df.index = [0, 1]
tm.assert_frame_equal(output, df)
def test_series(self, orient, numpy):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(s, **encode_kwargs),
**decode_kwargs)
if orient == "split":
dec = _clean_dict(output)
output = Series(**dec)
else:
output = Series(output)
if orient in (None, "index"):
s.name = None
output = output.sort_values()
s.index = ["6", "7", "8", "9", "10", "15"]
elif orient in ("records", "values"):
s.name = None
s.index = [0, 1, 2, 3, 4, 5]
tm.assert_series_equal(output, s, check_dtype=False)
def test_series_nested(self, orient):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
nested = {"s1": s, "s2": s.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"s1": ujson.decode(ujson.encode(s, **kwargs)),
"s2": ujson.decode(ujson.encode(s, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_index(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# Column indexed.
output = Index(ujson.decode(ujson.encode(i)), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i), numpy=True), name="index")
tm.assert_index_equal(i, output)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
numpy=True))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
output = Index(ujson.decode(ujson.encode(i, orient="values")),
name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="values"),
numpy=True), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="records")),
name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="records"),
numpy=True), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="index")),
name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="index"),
numpy=True), name="index")
tm.assert_index_equal(i, output)
def test_datetime_index(self):
date_unit = "ns"
rng = date_range("1/1/2000", periods=20)
encoded = ujson.encode(rng, date_unit=date_unit)
decoded = DatetimeIndex(np.array(ujson.decode(encoded)))
tm.assert_index_equal(rng, decoded)
ts = Series(np.random.randn(len(rng)), index=rng)
decoded = Series(ujson.decode(ujson.encode(ts, date_unit=date_unit)))
idx_values = decoded.index.values.astype(np.int64)
decoded.index = DatetimeIndex(idx_values)
tm.assert_series_equal(ts, decoded)
@pytest.mark.parametrize("invalid_arr", [
"[31337,]", # Trailing comma.
"[,31337]", # Leading comma.
"[]]", # Unmatched bracket.
"[,]", # Only comma.
])
def test_decode_invalid_array(self, invalid_arr):
with pytest.raises(ValueError):
ujson.decode(invalid_arr)
@pytest.mark.parametrize("arr", [
[], [31337]
])
def test_decode_array(self, arr):
assert arr == ujson.decode(str(arr))
@pytest.mark.parametrize("extreme_num", [
9223372036854775807, -9223372036854775808
])
def test_decode_extreme_numbers(self, extreme_num):
assert extreme_num == ujson.decode(str(extreme_num))
@pytest.mark.parametrize("too_extreme_num", [
"9223372036854775808", "-90223372036854775809"
])
def test_decode_too_extreme_numbers(self, too_extreme_num):
with pytest.raises(ValueError):
ujson.decode(too_extreme_num)
def test_decode_with_trailing_whitespaces(self):
assert {} == ujson.decode("{}\n\t ")
def test_decode_with_trailing_non_whitespaces(self):
with pytest.raises(ValueError):
ujson.decode("{}\n\t a")
def test_decode_array_with_big_int(self):
with pytest.raises(ValueError):
ujson.loads("[18446098363113800555]")
@pytest.mark.parametrize("float_number", [
1.1234567893, 1.234567893, 1.34567893,
1.4567893, 1.567893, 1.67893,
1.7893, 1.893, 1.3,
])
@pytest.mark.parametrize("sign", [-1, 1])
def test_decode_floating_point(self, sign, float_number):
float_number *= sign
tm.assert_almost_equal(float_number,
ujson.loads(str(float_number)),
check_less_precise=15)
def test_encode_big_set(self):
s = set()
for x in range(0, 100000):
s.add(x)
# Make sure no Exception is raised.
ujson.encode(s)
def test_encode_empty_set(self):
assert "[]" == ujson.encode(set())
def test_encode_set(self):
s = {1, 2, 3, 4, 5, 6, 7, 8, 9}
enc = ujson.encode(s)
dec = ujson.decode(enc)
for v in dec:
assert v in s
| bsd-3-clause |
pratapvardhan/scikit-learn | sklearn/utils/tests/test_extmath.py | 12 | 23419 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
assert_greater(15, np.abs(error_2 - error))
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert_true(u_based)
assert_false(v_based)
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert_true(u_based)
assert_false(v_based)
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(1 / (1 + np.exp(-x)))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
# ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
# ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
# ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
# min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
kambysese/mne-python | mne/viz/tests/test_circle.py | 14 | 5024 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
#
# License: Simplified BSD
import numpy as np
import pytest
import matplotlib.pyplot as plt
from mne.viz import plot_connectivity_circle, circular_layout
def test_plot_connectivity_circle():
"""Test plotting connectivity circle."""
node_order = ['frontalpole-lh', 'parsorbitalis-lh',
'lateralorbitofrontal-lh', 'rostralmiddlefrontal-lh',
'medialorbitofrontal-lh', 'parstriangularis-lh',
'rostralanteriorcingulate-lh', 'temporalpole-lh',
'parsopercularis-lh', 'caudalanteriorcingulate-lh',
'entorhinal-lh', 'superiorfrontal-lh', 'insula-lh',
'caudalmiddlefrontal-lh', 'superiortemporal-lh',
'parahippocampal-lh', 'middletemporal-lh',
'inferiortemporal-lh', 'precentral-lh',
'transversetemporal-lh', 'posteriorcingulate-lh',
'fusiform-lh', 'postcentral-lh', 'bankssts-lh',
'supramarginal-lh', 'isthmuscingulate-lh', 'paracentral-lh',
'lingual-lh', 'precuneus-lh', 'inferiorparietal-lh',
'superiorparietal-lh', 'pericalcarine-lh',
'lateraloccipital-lh', 'cuneus-lh', 'cuneus-rh',
'lateraloccipital-rh', 'pericalcarine-rh',
'superiorparietal-rh', 'inferiorparietal-rh', 'precuneus-rh',
'lingual-rh', 'paracentral-rh', 'isthmuscingulate-rh',
'supramarginal-rh', 'bankssts-rh', 'postcentral-rh',
'fusiform-rh', 'posteriorcingulate-rh',
'transversetemporal-rh', 'precentral-rh',
'inferiortemporal-rh', 'middletemporal-rh',
'parahippocampal-rh', 'superiortemporal-rh',
'caudalmiddlefrontal-rh', 'insula-rh', 'superiorfrontal-rh',
'entorhinal-rh', 'caudalanteriorcingulate-rh',
'parsopercularis-rh', 'temporalpole-rh',
'rostralanteriorcingulate-rh', 'parstriangularis-rh',
'medialorbitofrontal-rh', 'rostralmiddlefrontal-rh',
'lateralorbitofrontal-rh', 'parsorbitalis-rh',
'frontalpole-rh']
label_names = ['bankssts-lh', 'bankssts-rh', 'caudalanteriorcingulate-lh',
'caudalanteriorcingulate-rh', 'caudalmiddlefrontal-lh',
'caudalmiddlefrontal-rh', 'cuneus-lh', 'cuneus-rh',
'entorhinal-lh', 'entorhinal-rh', 'frontalpole-lh',
'frontalpole-rh', 'fusiform-lh', 'fusiform-rh',
'inferiorparietal-lh', 'inferiorparietal-rh',
'inferiortemporal-lh', 'inferiortemporal-rh', 'insula-lh',
'insula-rh', 'isthmuscingulate-lh', 'isthmuscingulate-rh',
'lateraloccipital-lh', 'lateraloccipital-rh',
'lateralorbitofrontal-lh', 'lateralorbitofrontal-rh',
'lingual-lh', 'lingual-rh', 'medialorbitofrontal-lh',
'medialorbitofrontal-rh', 'middletemporal-lh',
'middletemporal-rh', 'paracentral-lh', 'paracentral-rh',
'parahippocampal-lh', 'parahippocampal-rh',
'parsopercularis-lh', 'parsopercularis-rh',
'parsorbitalis-lh', 'parsorbitalis-rh',
'parstriangularis-lh', 'parstriangularis-rh',
'pericalcarine-lh', 'pericalcarine-rh', 'postcentral-lh',
'postcentral-rh', 'posteriorcingulate-lh',
'posteriorcingulate-rh', 'precentral-lh', 'precentral-rh',
'precuneus-lh', 'precuneus-rh',
'rostralanteriorcingulate-lh',
'rostralanteriorcingulate-rh', 'rostralmiddlefrontal-lh',
'rostralmiddlefrontal-rh', 'superiorfrontal-lh',
'superiorfrontal-rh', 'superiorparietal-lh',
'superiorparietal-rh', 'superiortemporal-lh',
'superiortemporal-rh', 'supramarginal-lh',
'supramarginal-rh', 'temporalpole-lh', 'temporalpole-rh',
'transversetemporal-lh', 'transversetemporal-rh']
group_boundaries = [0, len(label_names) / 2]
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=group_boundaries)
con = np.random.RandomState(0).randn(68, 68)
plot_connectivity_circle(con, label_names, n_lines=300,
node_angles=node_angles, title='test',
)
pytest.raises(ValueError, circular_layout, label_names, node_order,
group_boundaries=[-1])
pytest.raises(ValueError, circular_layout, label_names, node_order,
group_boundaries=[20, 0])
plt.close('all')
| bsd-3-clause |
xunyou/vincent | examples/stacked_area_examples.py | 11 | 2281 | # -*- coding: utf-8 -*-
"""
Vincent Stacked Area Examples
"""
#Build a Stacked Area Chart from scratch
from vincent import *
import pandas as pd
import pandas.io.data as web
all_data = {}
for ticker in ['AAPL', 'GOOG', 'IBM', 'YHOO', 'MSFT']:
all_data[ticker] = web.get_data_yahoo(ticker, '1/1/2010', '1/1/2013')
price = pd.DataFrame({tic: data['Adj Close']
for tic, data in all_data.items()})
vis = Visualization(width=500, height=300)
vis.padding = {'top': 10, 'left': 50, 'bottom': 50, 'right': 100}
data = Data.from_pandas(price)
vis.data['table'] = data
facets = Transform(type='facet', keys=['data.idx'])
stats = Transform(type='stats', value='data.val')
stat_dat = Data(name='stats', source='table', transform=[facets, stats])
vis.data['stats'] = stat_dat
vis.scales['x'] = Scale(name='x', type='time', range='width',
domain=DataRef(data='table', field="data.idx"))
vis.scales['y'] = Scale(name='y', range='height', type='linear', nice=True,
domain=DataRef(data='stats', field="sum"))
vis.scales['color'] = Scale(name='color', type='ordinal',
domain=DataRef(data='table', field='data.col'),
range='category20')
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
facet = Transform(type='facet', keys=['data.col'])
stack = Transform(type='stack', point='data.idx', height='data.val')
transform = MarkRef(data='table',transform=[facet, stack])
enter_props = PropertySet(x=ValueRef(scale='x', field="data.idx"),
y=ValueRef(scale='y', field="y"),
interpolate=ValueRef(value='monotone'),
y2=ValueRef(field='y2', scale='y'),
fill=ValueRef(scale='color', field='data.col'))
mark = Mark(type='group', from_=transform,
marks=[Mark(type='area',
properties=MarkProperties(enter=enter_props))])
vis.marks.append(mark)
vis.axis_titles(x='Date', y='Price')
vis.legend(title='Tech Stocks')
vis.to_json('vega.json')
#Convenience method
vis = StackedArea(price)
vis.axis_titles(x='Date', y='Price')
vis.legend(title='Tech Stocks')
vis.colors(brew='Paired')
vis.to_json('vega.json')
| mit |
shahankhatch/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
kevin-intel/scikit-learn | examples/model_selection/plot_nested_cross_validation_iris.py | 5 | 4483 | """
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100],
"gamma": [.01, .1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "GroupKFold", "LeaveOneOut", "LeaveOneGroupOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=outer_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print("Average difference of {:6f} with std. dev. of {:6f}."
.format(score_difference.mean(), score_difference.std()))
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
non_nested_scores_line, = plt.plot(non_nested_scores, color='r')
nested_line, = plt.plot(nested_scores, color='b')
plt.ylabel("score", fontsize="14")
plt.legend([non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, .4, .5, 0))
plt.title("Non-Nested and Nested Cross Validation on Iris Dataset",
x=.5, y=1.1, fontsize="15")
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend([difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, .8, 0))
plt.ylabel("score difference", fontsize="14")
plt.show()
| bsd-3-clause |
peterdougstuart/PCWG | pcwg/core/status.py | 2 | 1838 | import pandas as pd
class Status:
Instance = None
@classmethod
def set_verbosity(cls, verbosity):
cls.get().verbosity = verbosity
@classmethod
def add(cls, message, red = False, orange=False, verbosity = 1):
cls.get().add_message(message, red, orange, verbosity)
@classmethod
def set_portfolio_status(cls, completed, total, finished):
cls.get().set_portfolio_status_method(completed, total, finished)
@classmethod
def initialize_status(cls, status_method, set_portfolio_status_method=None, verbosity = 1):
# Note: verbosity must be passed (amd not read directly form preferencecs)
# in to avoid circulate reference
status = cls.get()
status.status_method = status_method
status.verbosity = verbosity
if set_portfolio_status_method is not None:
status.set_portfolio_status_method = set_portfolio_status_method
@classmethod
def get(cls):
if cls.Instance == None:
cls.Instance = Status()
return cls.Instance
def __init__(self):
self.verbosity = 1
def add_message(self, message, red, orange, verbosity):
if verbosity <= self.verbosity:
if isinstance(message, pd.DataFrame) or isinstance(message, pd.core.frame.DataFrame):
text = str(message.head())
else:
text = str(message)
lines = text.split("\n")
for line in lines:
self.status_method(line, red, orange, self.verbosity)
def status_method(self, message, red, orange, verbosity):
print message
def set_portfolio_status_method(self, completed, total, finished):
print "{0}/{1} Complete".format(completed, total) | mit |
jmmease/pandas | pandas/tests/io/sas/test_xport.py | 16 | 4831 | import pandas as pd
import pandas.util.testing as tm
from pandas.io.sas.sasreader import read_sas
import numpy as np
import os
# CSV versions of test xpt files were obtained using the R foreign library
# Numbers in a SAS xport file are always float64, so need to convert
# before making comparisons.
def numeric_as_float(data):
for v in data.columns:
if data[v].dtype is np.dtype('int64'):
data[v] = data[v].astype(np.float64)
class TestXport(object):
def setup_method(self, method):
self.dirpath = tm.get_data_path()
self.file01 = os.path.join(self.dirpath, "DEMO_G.xpt")
self.file02 = os.path.join(self.dirpath, "SSHSV1_A.xpt")
self.file03 = os.path.join(self.dirpath, "DRXFCD_G.xpt")
self.file04 = os.path.join(self.dirpath, "paxraw_d_short.xpt")
def test1_basic(self):
# Tests with DEMO_G.xpt (all numeric file)
# Compare to this
data_csv = pd.read_csv(self.file01.replace(".xpt", ".csv"))
numeric_as_float(data_csv)
# Read full file
data = read_sas(self.file01, format="xport")
tm.assert_frame_equal(data, data_csv)
num_rows = data.shape[0]
# Test reading beyond end of file
reader = read_sas(self.file01, format="xport", iterator=True)
data = reader.read(num_rows + 100)
assert data.shape[0] == num_rows
reader.close()
# Test incremental read with `read` method.
reader = read_sas(self.file01, format="xport", iterator=True)
data = reader.read(10)
reader.close()
tm.assert_frame_equal(data, data_csv.iloc[0:10, :])
# Test incremental read with `get_chunk` method.
reader = read_sas(self.file01, format="xport", chunksize=10)
data = reader.get_chunk()
reader.close()
tm.assert_frame_equal(data, data_csv.iloc[0:10, :])
# Test read in loop
m = 0
reader = read_sas(self.file01, format="xport", chunksize=100)
for x in reader:
m += x.shape[0]
reader.close()
assert m == num_rows
# Read full file with `read_sas` method
data = read_sas(self.file01)
tm.assert_frame_equal(data, data_csv)
def test1_index(self):
# Tests with DEMO_G.xpt using index (all numeric file)
# Compare to this
data_csv = pd.read_csv(self.file01.replace(".xpt", ".csv"))
data_csv = data_csv.set_index("SEQN")
numeric_as_float(data_csv)
# Read full file
data = read_sas(self.file01, index="SEQN", format="xport")
tm.assert_frame_equal(data, data_csv, check_index_type=False)
# Test incremental read with `read` method.
reader = read_sas(self.file01, index="SEQN", format="xport",
iterator=True)
data = reader.read(10)
reader.close()
tm.assert_frame_equal(data, data_csv.iloc[0:10, :],
check_index_type=False)
# Test incremental read with `get_chunk` method.
reader = read_sas(self.file01, index="SEQN", format="xport",
chunksize=10)
data = reader.get_chunk()
reader.close()
tm.assert_frame_equal(data, data_csv.iloc[0:10, :],
check_index_type=False)
def test1_incremental(self):
# Test with DEMO_G.xpt, reading full file incrementally
data_csv = pd.read_csv(self.file01.replace(".xpt", ".csv"))
data_csv = data_csv.set_index("SEQN")
numeric_as_float(data_csv)
reader = read_sas(self.file01, index="SEQN", chunksize=1000)
all_data = [x for x in reader]
data = pd.concat(all_data, axis=0)
tm.assert_frame_equal(data, data_csv, check_index_type=False)
def test2(self):
# Test with SSHSV1_A.xpt
# Compare to this
data_csv = pd.read_csv(self.file02.replace(".xpt", ".csv"))
numeric_as_float(data_csv)
data = read_sas(self.file02)
tm.assert_frame_equal(data, data_csv)
def test_multiple_types(self):
# Test with DRXFCD_G.xpt (contains text and numeric variables)
# Compare to this
data_csv = pd.read_csv(self.file03.replace(".xpt", ".csv"))
data = read_sas(self.file03, encoding="utf-8")
tm.assert_frame_equal(data, data_csv)
def test_truncated_float_support(self):
# Test with paxraw_d_short.xpt, a shortened version of:
# http://wwwn.cdc.gov/Nchs/Nhanes/2005-2006/PAXRAW_D.ZIP
# This file has truncated floats (5 bytes in this case).
# GH 11713
data_csv = pd.read_csv(self.file04.replace(".xpt", ".csv"))
data = read_sas(self.file04, format="xport")
tm.assert_frame_equal(data.astype('int64'), data_csv)
| bsd-3-clause |
tuxite/pharmaship | pharmaship/gui/plots/dispatch.py | 1 | 3707 | # -*- coding: utf-8 -*-
from django.utils.translation import gettext as _
import numpy as np
from pharmaship.core.utils import log
from pharmaship.gui.plots import utils
try:
from matplotlib.backends.backend_gtk3agg import (
FigureCanvasGTK3Agg as FigureCanvas)
except Exception:
# To avoid errors when testing in Docker container
from matplotlib.backend_bases import FigureCanvasBase as FigureCanvas
from matplotlib.figure import Figure
KEYS = [
"missing",
"perished",
"warning",
"nc"
]
def get_values(key, data):
"""Return a Dict of values from `data[key]`."""
if key not in data:
return {
"values": [0, 0, 0, 0, 0],
"total": 0
}
result = {
"values": [],
"total": data[key]["total"]
}
for item in KEYS:
result["values"].append(len(data[key][item]))
result["values"].append(data[key]["in_range"])
return result
def figure(data, params):
"""Create a graph showing situation of elements per category."""
category_names = [
{
"name": _('Missing'),
"color": "#2e3436",
},
{
"name": _('Perished'),
"color": "#a40000",
},
{
"name": _('Near expiry'),
"color": "#f57900",
},
{
"name": _('Non-conform'),
"color": "#3465a4",
},
{
"name": _('In range'),
"color": "#069a17",
},
]
results = {
"molecules": {
"label": _("Medicines"),
},
"equipment": {
"label": _("Equipment"),
},
"rescue_bag": {
"label": _("Rescue bags"),
},
"first_aid_kit": {
"label": _("First Aid Kits"),
}
}
if params.has_telemedical:
results["telemedical"] = {
"label": _("Telemedical"),
}
if params.has_laboratory:
results["laboratory"] = {
"label": _("Laboratory"),
}
for item in results:
results[item].update(get_values(item, data))
fig = Figure(figsize=(5, 2), dpi=100, facecolor="#00000000")
ax = fig.add_subplot()
labels = []
values = []
totals = []
for item in results:
labels.append(results[item]["label"])
values.append(results[item]["values"])
totals.append(results[item]["total"])
totals = np.array(totals)
data = np.array(values)
data_cum = data.cumsum(axis=1)
ax.invert_yaxis()
ax.xaxis.set_visible(False)
ax.set_xlim(0, 1)
for i in range(len(category_names)):
_values = data[:, i]
_values_cum = data_cum[:, i]
widths = np.divide(_values, totals, out=np.zeros(_values.shape, dtype=float), where=totals!=0)
starts = np.divide(_values_cum, totals, out=np.zeros(_values_cum.shape, dtype=float), where=totals!=0) - widths
ax.barh(
labels,
widths,
left=starts,
height=0.5,
label=category_names[i]["name"],
color=category_names[i]["color"]
)
xcenters = starts + widths / 2
text_color = utils.fg_color(category_names[i]["color"])
for y, (x, c) in enumerate(zip(xcenters, _values)):
if c == 0:
continue
ax.text(x, y, str(int(c)), ha='center', va='center',
color=text_color)
ax.legend(ncol=len(category_names), bbox_to_anchor=(0, 1),
loc='lower left', fontsize='small')
canvas = FigureCanvas(fig) # a Gtk.DrawingArea
canvas.set_size_request(800, 300)
return canvas
| agpl-3.0 |
elkeschaper/hts | hts/data_tasks/qc_matplotlib.py | 1 | 7838 | # (C) 2015, 2016 Elke Schaper @ Vital-IT.ch
"""
:synopsis: ``qc_matplotlib`` implements common plots to visualise HTS data available on the fly.
.. moduleauthor:: Elke Schaper <[email protected]>
"""
import itertools
import logging
import math
import matplotlib
#matplotlib.use('pdf')
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
import numpy as np
import GPy
LOG = logging.getLogger(__name__)
def create_report(*args, **kwargs):
"""
This methods is expected by run.Run .
Todo: Needs implementation if Matplotlib reports are required.
"""
return None
################ Readout wise methods ####################
def heat_map_single(run, data_tag, plate_tag, **kwargs):
""" Create a heat_map for a single plate
Create a heat_map for a single plate
"""
plate = run.plates[plate_tag]
data = plate.filter(value_data_type="readout", value_data_tag=data_tag, return_list=False, **kwargs)
# Invert all columns (to comply with naming standards in HTS).
# Unfortunately, simply inverting the y-axis did not seem to work.
#data = data[::-1]
if not type(data) == np.ndarray:
data = np.array(data)
data = np.ma.array(data, mask=np.isnan(data))
fig = plt.figure()
plt.pcolor(data)
plt.colorbar()
ax = plt.gca()
ax.set_ylim(ax.get_ylim()[::-1])
ax.set_aspect('equal')
#fig.set_size_inches(5, 4, forward=True)
plt.show()
def heat_map_single_gaussian_process_model(run, data_tag_readout, sample_tag, plate_tag, magnification=5, *args, **kwargs):
""" Create a heat_map for multiple readouts
Create a heat_map for multiple readouts
model_as_gaussian_process(self, data_tag_readout, sample_key,
kernel_type='m32',
n_max_iterations=1000,
plot_kwargs=False,
"""
wells_high_resolution = list(itertools.product(np.arange(0,run.width,1/magnification), np.arange(0,run.height,1/magnification)))
x_wells = np.array(wells_high_resolution)
plate = run.plates[plate_tag]
m, y_mean, y_std = plate.model_as_gaussian_process(data_tag_readout=data_tag_readout, sample_tag=sample_tag, **kwargs)
y_predicted_mean, y_predicted_var = m.predict(x_wells)
y_predicted_as_matrix = y_predicted_mean.reshape(magnification*run.width,magnification*run.height).transpose()
fig = plt.figure()
# im is of type matplotlib.image.AxesImage
plt.imshow(y_predicted_as_matrix)
plt.colorbar()
plt.gca().invert_yaxis()
fig.set_size_inches(5, 4, forward=True)
plt.show()
def slice_single_gaussian_process_model(run, data_tag_readout, sample_tag, plate_tag, slice=5, **kwargs):
""" Create a heat_map for multiple readouts
Create a heat_map for multiple readouts
# Currently: using Plotly.
# Perhaps, matplotlib.axes._subplots.AxesSubplot can be integrated with Matplotlib's AxesGrid as above.
"""
plate = run.plates[plate_tag]
m, y_mean, y_std = plate.model_as_gaussian_process(data_tag_readout=data_tag_readout, sample_tag=sample_tag, **kwargs)
m.plot(fixed_inputs=[(1,slice)], plot_data=False)
################ Batch wise methods ####################
def heat_map_multiple(run, data_tag, result_file=None, n_plates_max=10, *args, **kwargs):
""" Create a heat_map for multiple readouts
Create a heat_map for multiple readouts
"""
data = run.filter(value_data_type="readout", value_data_tag=data_tag, return_list=False, **kwargs)
# Invert all columns (to comply with naming standards in HTS).
# Unfortunately, simply inverting the y-axis did not seem to work.
data = [plate[::-1] for plate in data]
data = np.array(data)
plate_tags = run.plates.keys()
if len(plate_tags) > n_plates_max:
plate_tags = plate_tags[::round(len(plate_tags)/n_plates_max)]
a = math.ceil(len(plate_tags)/2)
b = 2
fig = plt.figure()
grid = AxesGrid(fig, 111,
nrows_ncols=(b, a),
axes_pad=0.05,
share_all=True,
label_mode="L",
cbar_location="right",
cbar_mode="single",
)
for val, ax in zip(data,grid):
im = ax.imshow(val)
grid.cbar_axes[0].colorbar(im)
for cax in grid.cbar_axes:
cax.toggle_label(False)
if result_file:
pp = PdfPages(result_file)
pp.savefig(fig, dpi=20, bbox_inches='tight')
pp.close()
fig.set_size_inches(30, 14, forward=True)
plt.show()
# Do we need this line?
#fig.clear()
def heat_map_multiple_gaussian_process_model(run, kernel_tag, result_file=None, magnification=5, n_plates_max=10, *args, **kwargs):
""" Create a heat_map for multiple readouts
Create a heat_map for multiple readouts
"""
wells_high_resolution = list(itertools.product(np.arange(0,run.width,1/magnification), np.arange(0,run.height,1/magnification)))
x_wells = np.array(wells_high_resolution)
plate_tags = run.plates.keys()
if len(plate_tags) > n_plates_max:
plate_tags = plate_tags[::round(len(plate_tags)/n_plates_max)]
a = math.ceil(len(plate_tags)/2)
b = 2
data = []
for plate_tag in plate_tags:
gp = run.gp_models
gp_model = list(gp.filter(plate_tag=plate_tag, kernel_tag=kernel_tag))[0]
y_predicted_mean_normalized, y_predicted_var_normalized, y_predicted_mean, y_predicted_sd = gp_model.predict(x_wells)
y_predicted_as_matrix = y_predicted_mean.reshape(magnification*run.width,magnification*run.height).transpose()
data.append(y_predicted_as_matrix)
fig = plt.figure()
grid = AxesGrid(fig, 111,
nrows_ncols=(b, a),
axes_pad=0.05,
share_all=True,
label_mode="L",
cbar_location="right",
cbar_mode="single",
)
for val, ax in zip(data,grid):
im = ax.imshow(val)
# im is of type matplotlib.image.AxesImage
grid.cbar_axes[0].colorbar(im)
for cax in grid.cbar_axes:
cax.toggle_label(False)
if result_file:
pp = PdfPages(result_file)
pp.savefig(fig, dpi=20, bbox_inches='tight')
pp.close()
fig.set_size_inches(30, 14, forward=True)
plt.show()
def slice_multiple_gaussian_process_model(run, data_tag_readout, sample_tag, result_file=None, slice=5, n_plates_max=10, *args, **kwargs):
""" Create a heat_map for multiple readouts
Create a heat_map for multiple readouts
# Currently: using Plotly.
# Perhaps, matplotlib.axes._subplots.AxesSubplot can be integrated with Matplotlib's AxesGrid as above.
"""
plate_tags = run.plates.keys()
if len(plate_tags) > n_plates_max:
plate_tags = plate_tags[::round(len(plate_tags)/n_plates_max)]
a = math.ceil(len(plate_tags)/2)
b = 2
models = []
for plate_tag in plate_tags:
plate = run.plates[plate_tag]
m, y_mean, y_std = plate.model_as_gaussian_process(data_tag_readout=data_tag_readout, sample_tag=sample_tag, **kwargs)
models.append(m)
GPy.plotting.change_plotting_library('plotly')
figure = GPy.plotting.plotting_library().figure(len(models), 1, shared_xaxes=True,)
for i,m in enumerate(models):
canvas = m.plot(figure=figure, fixed_inputs=[(1,slice)], row=(i+1), plot_data=False)
# canvas is of type matplotlib.axes._subplots.AxesSubplot
#grid.cbar_axes[0].colorbar(im)
GPy.plotting.show(canvas, filename='basic_gp_regression_notebook_slicing')
GPy.plotting.change_plotting_library('matplotlib')
| gpl-2.0 |
tbrockman/artificial-synethesia-network | predict.py | 1 | 5377 | from keras.models import load_model
from osc_handler import OSCHandler
import numpy as np
import sys, cv2, scipy.misc, time, os, PIL
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../WarpPerspective")
import warpperspective
tempo = 120
default_threshold = 0.1
last_prediction = np.array([])
saved_predictions = []
notes = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
def load_model_and_osc(model_path, osc_params):
model = load_model(model_path)
osc = OSCHandler(osc_params[0], osc_params[1])
return model, osc
def convert_midi_values_to_notes(midi_array):
converted = []
for midi in midi_array:
converted.append(convert_midi_to_note(midi))
return converted
def convert_midi_to_note(midi):
index = midi % 12
return notes[index]
def predict_image_and_send_osc(model, image, osc, threshold=0.035):
highest_midi, predictions = predict_image(model, image)
close_midi = threshold_predictions(predictions, highest_midi, threshold)
#saved_predictions.append(convert_midi_values_to_notes(close_midi))
#saved_predictions.append(close_midi)
if (osc):
send_midi_on_osc(osc, close_midi)
return close_midi.tolist()
def threshold_predictions(predictions, highest, threshold):
value = predictions[0][highest]
indices = (-predictions[0]).argsort()[:5]
# print predictions[0][indices]
close_midi = np.where(predictions > (value - threshold))
return close_midi[1]
def send_midi_on_osc(osc, midi, performance=True):
#global last_prediction
#if not np.array_equal(last_prediction, midi):
# last_prediction = midi
if performance:
send_off = [1] * 8
for track in midi:
# TODO: send osc on for present tracks
# send osc off for not present tracks
osc.sendMessage('/noteon', track)
send_off[track] = 0
for i in range(len(send_off)):
if send_off[i]:
osc.sendMessage('/noteoff', i)
else:
osc.sendMessage('/cnn_midi', midi.tolist())
def predict_image(model, img):
predictions = model.predict(img, batch_size=1)
highest_midi = np.argmax(predictions[0])
return highest_midi, predictions
def preprocess_frame_for_prediction(frame):
img = scipy.misc.imresize(frame, [299, 299])
img = img[np.newaxis, ...] # turn into batch of size 1
return img
def capture_and_preprocess_webcam_image(warper=None):
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
if (warper):
frame = warper.warp(frame)
img = preprocess_frame_for_prediction(frame)
return img, frame
def show_image_with_overlayed_midi(img, midi):
notes = convert_midi_values_to_notes(midi)
cv2.putText(img,str(notes),(10,50), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2)
cv2.imshow('frame',img)
if cv2.waitKey(100) & 0xFF == ord('q'):
return 1
else:
return 0
def start_video_capture(model, osc, video_path):
vidcap = cv2.VideoCapture(video_path)
count = 0
while(vidcap.isOpened()):
success, frame = vidcap.read()
count += 1
if success and count % 10 == 0:
img = preprocess_frame_for_prediction(frame)
midi = predict_image_and_send_osc(model, img, osc)
exit = show_image_with_overlayed_midi(frame, midi)
if (exit):
break
vidcap.release()
cv2.destroyAllWindows()
def batch_predict_folder(model, osc=None, folder_path="labeled", save=False, overlay=False):
filenames = os.listdir(folder_path)
if (save):
saved_predictions = []
for i in range(len(filenames)):
if filenames[i].endswith(".jpg"):
image_path = os.path.join(folder_path, filenames[i])
raw = np.asarray(PIL.Image.open(image_path))
img = preprocess_frame_for_prediction(raw)
midi = predict_image_and_send_osc(model, img, osc)
if (save):
saved_predictions.append(midi)
if overlay:
exit = show_image_with_overlayed_midi(raw, midi)
if (exit):
cv2.destroyAllWindows()
break
return saved_predictions
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Need model path for predictions.'
sys.exit(0)
model_path = sys.argv[1]
model, osc = load_model_and_osc(model_path, ('127.0.0.1', 57120))
print osc
if len(sys.argv) > 2:
path = sys.argv[2]
if (os.path.isfile(path)):
start_video_capture(model, osc, path)
elif (os.path.isdir(path)):
batch_predict_folder(model, osc, path, False, True, default_threshold)
else:
print 'Second argument must be a path to a file or a folder.'
sys.exit(0)
else:
img, frame = capture_and_preprocess_webcam_image()
warped = None
wc = warpperspective.WarpCalibrator()
warper = wc.calibrate(frame)
while(1):
img, frame = capture_and_preprocess_webcam_image(warper)
midi = predict_image_and_send_osc(model, img, osc, default_threshold)
exit = show_image_with_overlayed_midi(frame, midi)
if (exit):
cv2.destroyAllWindows()
break
| apache-2.0 |
lituan/tools | ancestor_sequence/mega_ancestor_msa_parallel.py | 1 | 4329 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pipeline
construct phylogenetic tree and estimate ancestors using msa
"""
import os
import sys
import subprocess
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from multiprocessing import Pool
from collections import OrderedDict
from scipy.stats import ttest_ind
def read_mega(mega_f):
with open(mega_f) as o_f:
lines = o_f.readlines()
lines = [line.rstrip('\r\n') for line in lines]
pro_line_num = [i for i, line in enumerate(
lines) if '>' in line] + [len(lines)]
seqs = [lines[n:pro_line_num[i + 1]]
for i, n in enumerate(pro_line_num[:-1])]
seqs = [(seq[0][1:], ''.join(seq[1:])) for seq in seqs]
seq_num = len(seqs)
sequences = []
i = 0
for name,seq in seqs:
if i < (seq_num + 1)/2:
sequences.append(('current',name,seq))
else:
sequences.append(('ancestor',name,seq))
i += 1
return sequences
def get_similarity(seqs):
# seqs format, [['ancestor',pro,'ATR...']...]
def get_sim(r1,r2):
return len([1 for i,r1i in enumerate(r1) if r1i == r2[i]])*1.0/len(r1)
similarity = []
for state,pro,seq in seqs:
repeats = [seq[i:i+18] for i in range(0,len(seq),18)]
sims = []
for i in range(8):
for j in range(8):
if j > i:
sims.append(get_sim(repeats[i],repeats[j]))
similarity.append((state,pro,sims))
return similarity
def strip_plot(similarity,fname):
# similarity format [['ancestor',pro,[0.3,04...]]...]
p_label = []
for i in range(28):
anc = [si[i] for s,name,si in similarity if s == 'ancestor']
now = [si[i] for s,name,si in similarity if s == 'current']
pvalue = ttest_ind(anc,now,equal_var=False)
if pvalue[-1] < 0.0001:
p_label.append('****')
elif pvalue[-1] < 0.001:
p_label.append('***')
elif pvalue[-1] < 0.01:
p_label.append('**')
elif pvalue[-1] < 0.05:
p_label.append('*')
else:
p_label.append('')
state = [ ]
repeat_pair = []
repeat_pair_similarity = []
keys = [str(i)+'_'+str(j) for i in range(8) for j in range(8) if j > i]
for s,name,sims in similarity:
state += [s] * 28
repeat_pair += keys
repeat_pair_similarity += sims
df = pd.DataFrame({'state':state,'repeat_pair':repeat_pair,'repeat_pair_similarity':repeat_pair_similarity})
sns.set(style='whitegrid', color_codes=True)
f,ax = plt.subplots(figsize=(12,8))
# sns.stripplot(x='repeat_pair',y='repeat_pair_similarity',hue='state',palette='Set2',data=df,jitter=True)
# sns.violinplot(x='repeat_pair',y='repeat_pair_similarity',hue='state',palette='Set2',data=df,split=True)
sns.violinplot(x='repeat_pair',y='repeat_pair_similarity',hue='state',palette='muted',data=df,split=True)
# add pvalue label
anno_y = [max([si[i] for _,_,si in similarity])+0.10 for i in range(28) ]
for i in range(28):
ax.annotate(p_label[i],(i,anno_y[i]))
# remove duplicated legends
handles,labels = ax.get_legend_handles_labels()
plt.legend(handles[:2],labels[:2])
plt.savefig(fname+'_repeats_similarity.png',dpi=300)
return p_label.count('*') + p_label.count('**') + p_label.count('***') + p_label.count('****')
def single_fun(mega_f):
fpath,fname = os.path.split(mega_f)
fname = os.path.splitext(fname)[0]
seqs = read_mega(mega_f)
similarity = get_similarity(seqs)
f_name = os.path.join(fpath,fname)
significant = strip_plot(similarity,f_name)
return fname,significant
def main():
parameters = []
for root,dirs,files in os.walk(sys.argv[-1]):
for f in files:
if f[-17:] == 'most_probable.fas':
f = os.path.join(root,f)
parameters.append(f)
# single_fun(parameters[0])
p = Pool(6)
result = p.map(single_fun,parameters)
p.close()
with open('significant.txt','w') as w_f:
for r in result:
print >> w_f,r[0],'\t',r[1]
print [r for r in result if r[1] > 14]
if __name__ == "__main__":
main()
| cc0-1.0 |
COMBINE-lab/QuantAnalysis | analysis_utils/AnalysisUtils.py | 1 | 2151 |
def filterValues(colname, DF, val):
DF.loc[DF[colname] < val, colname] = 0.0
def relError(c1, c2, DF, cutoff=0.00999999, verbose=False):
import pandas as pd
import numpy as np
nz = DF[DF[c1] > cutoff]
re = (nz[c1] - nz[c2]) / nz[c1]
return re
def proportionalityCorrelation(c1, c2, DF, offset=0.01):
import numpy as np
return (2.0 * np.log(DF[c1] + offset).cov(np.log(DF[c2] + offset))) / (np.log(DF[c1] + offset).var() + np.log(DF[c2] + offset).var())
def relDiffTP(c1, c2, DF, cutoff=0.1):
import pandas as pd
import numpy as np
tpindex = DF[DF[c1] > cutoff]
rd = (tpindex[c2] - tpindex[c1]) / tpindex[c1]
return rd
def getMedian(df): return df.median()
def getMean(df): return df.mean()
def relDiff(c1, c2, DF, cutoff=0.01, verbose=False):
import pandas as pd
"""
Computes the relative difference between the values
in columns c1 and c2 of DF.
c1 and c2 are column names and DF is a Pandas data frame.
Values less than cutoff will be treated as 0.
The relative difference is defined as
d(x_i, y_i) =
0.0 if x_i and y_i are 0
(x_i - y_i) / (0.5 * |x_i - y_i|) otherwise
This function returns two values.
rd is a DataFrame where the "relDiff" column contains all
of the computed relative differences.
nz is a set of indexes into rd for data points where
x_i and y_i were not *both* zero.
"""
import numpy as np
rd = pd.DataFrame(data = {"Name" : DF.index, "relDiff" : np.zeros(len(DF.index))*np.nan})
rd.set_index("Name", inplace=True)
bothZero = DF.loc[(DF[c1] < cutoff) & (DF[c2] < cutoff)].index
nonZero = DF.index.difference(bothZero)
if (verbose):
print("Zero occurs in both columns {} times".format(len(rd.loc[bothZero])))
print("Nonzero occurs in at least 1 column {} times".format(len(rd.loc[nonZero])))
allRD = 2.0 * ((DF[c1] - DF[c2]) / (DF[c1] + DF[c2]).abs())
assert(len(rd.loc[nonZero]["relDiff"]) == len(allRD[nonZero]))
rd["relDiff"][nonZero] = allRD[nonZero]
if len(bothZero) > 0:
rd["relDiff"][bothZero] = 0.0
return rd, nonZero
| mit |
annayqho/TheCannon | code/lamost/mass_age/no_reddening/run.py | 1 | 3440 | """
Run the test step on all the LAMOST DR2 objects.
"""
import numpy as np
import glob
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, '/home/annaho')
#from lamost import load_spectra
#import dataset
#import model
from TheCannon import dataset
from TheCannon import model
#from astropy.table import Table
from matplotlib.colors import LogNorm
from matplotlib import rc
rc('font', family='serif')
rc('text', usetex=True)
import os
def test_step_iteration(ds, m, starting_guess):
errs, chisq = m.infer_labels(ds, starting_guess)
return ds.test_label_vals, chisq, errs
def test_step(date):
direc = "../xcalib_4labels"
wl = np.load("%s/wl.npz" %direc)['arr_0']
test_ID = np.load("%s/output/%s_ids.npz" %(direc, date))['arr_0']
print(str(len(test_ID)) + " objects")
test_flux = np.load("%s/output/%s_norm.npz" %(direc,date))['arr_0']
test_ivar = np.load("%s/output/%s_norm.npz" %(direc,date))['arr_1']
lamost_label = np.load("%s/output/%s_tr_label.npz" %(direc,date))['arr_0']
apogee_label = np.load("./tr_label.npz")['arr_0']
ds = dataset.Dataset(wl, test_ID, test_flux[0:2,:], test_ivar[0:2,:],
lamost_label, test_ID, test_flux, test_ivar)
ds.set_label_names(
['T_{eff}', '\log g', '[Fe/H]', '[\\alpha/Fe]', 'logM', 'A_k'])
m = model.CannonModel(2)
m.coeffs = np.load("./coeffs.npz")['arr_0']
m.scatters = np.load("./scatters.npz")['arr_0']
m.chisqs = np.load("./chisqs.npz")['arr_0']
m.pivots = np.load("./pivots.npz")['arr_0']
nlabels = len(m.pivots)
nobj = len(test_ID)
nguesses = 7
choose = np.random.randint(0,nobj,size=nguesses)
starting_guesses = apogee_label[choose]-m.pivots
labels = np.zeros((nguesses, nobj, nlabels))
chisq = np.zeros((nguesses, nobj))
errs = np.zeros(labels.shape)
for ii,guess in enumerate(starting_guesses):
a,b,c = test_step_iteration(ds,m,starting_guesses[ii])
labels[ii,:] = a
chisq[ii,:] = b
errs[ii,:] = c
np.savez("output/%s_cannon_label_guesses.npz" %date, labels)
np.savez("output/%s_cannon_chisq_guesses.npz" %date, labels)
choose = np.argmin(chisq, axis=0)
best_chisq = np.min(chisq, axis=0)
best_labels = np.zeros((nobj, nlabels))
best_errs = np.zeros(best_labels.shape)
for jj,val in enumerate(choose):
best_labels[jj,:] = labels[:,jj,:][val]
best_errs[jj,:] = errs[:,jj,:][val]
np.savez("output/%s_all_cannon_labels.npz" %date, best_labels)
np.savez("output/%s_cannon_label_chisq.npz" %date, best_chisq)
np.savez("output/%s_cannon_label_errs.npz" %date, best_errs)
ds.test_label_vals = best_labels
#ds.diagnostics_survey_labels(figname="%s_survey_labels_triangle.png" %date)
ds.test_label_vals = best_labels[:,0:3]
ds.set_label_names(['T_{eff}', '\log g', '[M/H]'])
ds.diagnostics_1to1(figname = "%s_1to1_test_label" %date)
if __name__=="__main__":
dates = os.listdir("/home/share/LAMOST/DR2/DR2_release")
dates = np.array(dates)
dates = np.delete(dates, np.where(dates=='.directory')[0][0])
dates = np.delete(dates, np.where(dates=='all_folders.list')[0][0])
dates = np.delete(dates, np.where(dates=='dr2.lis')[0][0])
for date in dates:
print("running %s" %date)
if glob.glob("output/%s_all_cannon_labels.npz" %date): print("already done")
else:
test_step(date)
| mit |
ilo10/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
kcompher/pygraphistry | graphistry/plotter.py | 1 | 18526 | from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from builtins import range
from builtins import object
import random
import string
import copy
import types
import pandas
from . import pygraphistry
from . import util
class Plotter(object):
"""Graph plotting class.
Created using ``Graphistry.bind()``.
Chained calls successively add data and visual encodings, and end with a plot call.
To streamline reuse and replayable notebooks, Plotter manipulations are immutable. Each chained call returns a new instance that derives from the previous one. The old plotter or the new one can then be used to create different graphs.
The class supports convenience methods for mixing calls across Pandas, NetworkX, and IGraph.
"""
_defaultNodeId = '__nodeid__'
def __init__(self):
# Bindings
self._edges = None
self._nodes = None
self._source = None
self._destination = None
self._node = None
self._edge_title = None
self._edge_label = None
self._edge_color = None
self._edge_weight = None
self._point_title = None
self._point_label = None
self._point_color = None
self._point_size = None
# Settings
self._height = 500
self._url_params = {'info': 'true'}
def __repr__(self):
bnds = ['edges', 'nodes', 'source', 'destination', 'node', 'edge_title',
'edge_label', 'edge_color', 'edge_weight', 'point_title',
'point_label', 'point_color', 'point_size']
stgs = ['height', 'url_params']
rep = {'bindings': dict([(f, getattr(self, '_' + f)) for f in bnds]),
'settings': dict([(f, getattr(self, '_' + f)) for f in stgs])}
if util.in_ipython():
from IPython.lib.pretty import pretty
return pretty(rep)
else:
return str(rep)
def bind(self, source=None, destination=None, node=None,
edge_title=None, edge_label=None, edge_color=None, edge_weight=None,
point_title=None, point_label=None, point_color=None, point_size=None):
"""Relate data attributes to graph structure and visual representation.
To facilitate reuse and replayable notebooks, the binding call is chainable. Invocation does not effect the old binding: it instead returns a new Plotter instance with the new bindings added to the existing ones. Both the old and new bindings can then be used for different graphs.
:param source: Attribute containing an edge's source ID
:type source: String.
:param destination: Attribute containing an edge's destination ID
:type destination: String.
:param node: Attribute containing a node's ID
:type node: String.
:param edge_title: Attribute overriding edge's minimized label text. By default, the edge source and destination is used.
:type edge_title: HtmlString.
:param edge_label: Attribute overriding edge's expanded label text. By default, scrollable list of attribute/value mappings.
:type edge_label: HtmlString.
:param edge_color: Attribute overriding edge's color. `See palette definitions <https://github.com/graphistry/pygraphistry/blob/master/graphistry.com/palette.html>`_ for values. Based on Color Brewer.
:type edge_color: String.
:param edge_weight: Attribute overriding edge weight. Default is 1. Advanced layout controls will relayout edges based on this value.
:type edge_weight: String.
:param point_title: Attribute overriding node's minimized label text. By default, the node ID is used.
:type point_title: HtmlString.
:param point_label: Attribute overriding node's expanded label text. By default, scrollable list of attribute/value mappings.
:type point_label: HtmlString.
:param point_color: Attribute overriding node's color. `See palette definitions <https://github.com/graphistry/pygraphistry/blob/master/graphistry.com/palette.html>`_ for values. Based on Color Brewer.
:type point_color: Integer.
:param point_size: Attribute overriding node's size. By default, uses the node degree. The visualization will normalize point sizes and adjust dynamically using semantic zoom.
:type point_size: HtmlString.
:returns: Plotter.
:rtype: Plotter.
**Example: Minimal**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst')
**Example: Node colors**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst',
node='id', point_color='color')
**Example: Chaining**
::
import graphistry
g = graphistry.bind(source='src', destination='dst', node='id')
g1 = g.bind(point_color='color1', point_size='size1')
g.bind(point_color='color1b')
g2a = g1.bind(point_color='color2a')
g2b = g1.bind(point_color='color2b', point_size='size2b')
g3a = g2a.bind(point_size='size3a')
g3b = g2b.bind(point_size='size3b')
In the above **Chaining** example, all bindings use src/dst/id. Colors and sizes bind to:
::
g: default/default
g1: color1/size1
g2a: color2a/size1
g2b: color2b/size2b
g3a: color2a/size3a
g3b: color2b/size3b
"""
res = copy.copy(self)
res._source = source or self._source
res._destination = destination or self._destination
res._node = node or self._node
res._edge_title = edge_title or self._edge_title
res._edge_label = edge_label or self._edge_label
res._edge_color = edge_color or self._edge_color
res._edge_weight = edge_weight or self._edge_weight
res._point_title = point_title or self._point_title
res._point_label = point_label or self._point_label
res._point_color = point_color or self._point_color
res._point_size = point_size or self._point_size
return res
def nodes(self, nodes):
"""Specify the set of nodes and associated data.
Must include any nodes referenced in the edge list.
:param nodes: Nodes and their attributes.
:type point_size: Pandas dataframe
:returns: Plotter.
:rtype: Plotter.
**Example**
::
import graphistry
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
g = graphistry
.bind(source='src', destination='dst')
.edges(es)
vs = pandas.DataFrame({'v': [0,1,2], 'lbl': ['a', 'b', 'c']})
g = g.bind(node='v').nodes(vs)
g.plot()
"""
res = copy.copy(self)
res._nodes = nodes
return res
def edges(self, edges):
"""Specify edge list data and associated edge attribute values.
:param edges: Edges and their attributes.
:type point_size: Pandas dataframe, NetworkX graph, or IGraph graph.
:returns: Plotter.
:rtype: Plotter.
**Example**
::
import graphistry
df = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
graphistry
.bind(source='src', destination='dst')
.edges(df)
.plot()
"""
res = copy.copy(self)
res._edges = edges
return res
def graph(self, ig):
"""Specify the node and edge data.
:param ig: Graph with node and edge attributes.
:type ig: NetworkX graph or an IGraph graph.
:returns: Plotter.
:rtype: Plotter.
"""
res = copy.copy(self)
res._edges = ig
res._nodes = None
return res
def settings(self, height=None, url_params={}):
"""Specify iframe height and add URL parameter dictionary.
The library takes care of URI component encoding for the dictionary.
:param height: Height in pixels.
:type height: Integer.
:param url_params: Dictionary of querystring parameters to append to the URL.
:type url_params: Dictionary
"""
res = copy.copy(self)
res._height = height or self._height
res._url_params = dict(self._url_params, **url_params)
return res
def plot(self, graph=None, nodes=None):
"""Upload data to the Graphistry server and show as an iframe of it.
Uses the currently bound schema structure and visual encodings. Optional parameters override the current bindings.
When used in a notebook environment, will also show an iframe of the visualization.
:param graph: Edge table or graph.
:type graph: Pandas dataframe, NetworkX graph, or IGraph graph.
:param nodes: Nodes table.
:type nodes: Pandas dataframe.
**Example: Simple**
::
import graphistry
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
graphistry
.bind(source='src', destination='dst')
.edges(es)
.plot()
**Example: Shorthand**
::
import graphistry
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
graphistry
.bind(source='src', destination='dst')
.plot(es)
"""
if graph is None:
if self._edges is None:
util.error('Graph/edges must be specified.')
g = self._edges
else:
g = graph
n = self._nodes if nodes is None else nodes
self._check_mandatory_bindings(not isinstance(n, type(None)))
dataset = self._plot_dispatch(g, n)
if dataset is None:
util.error('Expected Pandas dataframe(s) or Igraph/NetworkX graph.')
dataset_name = pygraphistry.PyGraphistry._etl(dataset)
viz_url = pygraphistry.PyGraphistry._viz_url(dataset_name, self._url_params)
if util.in_ipython() is True:
from IPython.core.display import HTML
return HTML(self._iframe(viz_url))
else:
print('Url: ', viz_url)
import webbrowser
webbrowser.open(viz_url)
return self
def pandas2igraph(self, edges, directed=True):
"""Convert a pandas edge dataframe to an IGraph graph.
Uses current bindings. Defaults to treating edges as directed.
**Example**
::
import graphistry
g = graphistry.bind()
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
g = g.bind(source='src', destination='dst')
ig = g.pandas2igraph(es)
ig.vs['community'] = ig.community_infomap().membership
g.bind(point_color='community').plot(ig)
"""
import igraph
self._check_mandatory_bindings(False)
self._check_bound_attribs(edges, ['source', 'destination'], 'Edge')
if self._node is None:
util.warn('"node" is unbound, automatically binding it to "%s".' % Plotter._defaultNodeId)
self._node = self._node or Plotter._defaultNodeId
eattribs = edges.columns.values.tolist()
eattribs.remove(self._source)
eattribs.remove(self._destination)
cols = [self._source, self._destination] + eattribs
etuples = [tuple(x) for x in edges[cols].values]
return igraph.Graph.TupleList(etuples, directed=directed, edge_attrs=eattribs,
vertex_name_attr=self._node)
def igraph2pandas(self, ig):
"""Under current bindings, transform an IGraph into a pandas edges dataframe and a nodes dataframe.
**Example**
::
import graphistry
g = graphistry.bind()
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
g = g.bind(source='src', destination='dst').edges(es)
ig = g.pandas2igraph(es)
ig.vs['community'] = ig.community_infomap().membership
(es2, vs2) = g.igraph2pandas(ig)
g.nodes(vs2).bind(point_color='community').plot()
"""
def get_edgelist(ig):
idmap = dict(enumerate(ig.vs[self._node]))
for e in ig.es:
t = e.tuple
yield dict({self._source: idmap[t[0]], self._destination: idmap[t[1]]},
**e.attributes())
self._check_mandatory_bindings(False)
if self._node is None:
util.warn('"node" is unbound, automatically binding it to "%s".' % Plotter._defaultNodeId)
ig.vs[Plotter._defaultNodeId] = [v.index for v in ig.vs]
self._node = Plotter._defaultNodeId
elif self._node not in ig.vs.attributes():
util.error('Vertex attribute "%s" bound to "node" does not exist.' % self._node)
edata = get_edgelist(ig)
ndata = [v.attributes() for v in ig.vs]
nodes = pandas.DataFrame(ndata, columns=ig.vs.attributes())
cols = [self._source, self._destination] + ig.es.attributes()
edges = pandas.DataFrame(edata, columns=cols)
return (edges, nodes)
def networkx2pandas(self, g):
def get_nodelist(g):
for n in g.nodes(data=True):
yield dict({self._node: n[0]}, **n[1])
def get_edgelist(g):
for e in g.edges(data=True):
yield dict({self._source: e[0], self._destination: e[1]}, **e[2])
self._check_mandatory_bindings(False)
vattribs = g.nodes(data=True)[0][1] if g.number_of_nodes() > 0 else []
if self._node is None:
util.warn('"node" is unbound, automatically binding it to "%s".' % Plotter._defaultNodeId)
elif self._node in vattribs:
util.error('Vertex attribute "%s" already exists.' % self._node)
self._node = self._node or Plotter._defaultNodeId
nodes = pandas.DataFrame(get_nodelist(g))
edges = pandas.DataFrame(get_edgelist(g))
return (edges, nodes)
def _check_mandatory_bindings(self, node_required):
if self._source is None or self._destination is None:
util.error('Both "source" and "destination" must be bound before plotting.')
if node_required and self._node is None:
util.error('Node identifier must be bound when using node dataframe.')
def _check_bound_attribs(self, df, attribs, typ):
cols = df.columns.values.tolist()
for a in attribs:
b = getattr(self, '_' + a)
if b not in cols:
util.error('%s attribute "%s" bound to "%s" does not exist.' % (typ, a, b))
def _plot_dispatch(self, graph, nodes):
if isinstance(graph, pandas.core.frame.DataFrame):
return self._pandas2dataset(graph, nodes)
try:
import igraph
if isinstance(graph, igraph.Graph):
(e, n) = self.igraph2pandas(graph)
return self._pandas2dataset(e, n)
except ImportError:
pass
try:
import networkx
if isinstance(graph, networkx.classes.graph.Graph) or \
isinstance(graph, networkx.classes.digraph.DiGraph) or \
isinstance(graph, networkx.classes.multigraph.MultiGraph) or \
isinstance(graph, networkx.classes.multidigraph.MultiDiGraph):
(e, n) = self.networkx2pandas(graph)
return self._pandas2dataset(e, n)
except ImportError:
pass
return None
def _pandas2dataset(self, edges, nodes):
def bind(df, pbname, attrib, default=None):
bound = getattr(self, attrib)
if bound:
if bound in df.columns.tolist():
df[pbname] = df[bound]
else:
util.warn('Attribute "%s" bound to %s does not exist.' % (bound, attrib))
elif default:
df[pbname] = df[default]
self._check_bound_attribs(edges, ['source', 'destination'], 'Edge')
nodeid = self._node or Plotter._defaultNodeId
elist = edges.reset_index(drop=True)
bind(elist, 'edgeColor', '_edge_color')
bind(elist, 'edgeLabel', '_edge_label')
bind(elist, 'edgeTitle', '_edge_title')
bind(elist, 'edgeWeight', '_edge_weight')
if nodes is None:
nodes = pandas.DataFrame()
nodes[nodeid] = pandas.concat([edges[self._source], edges[self._destination]],
ignore_index=True).drop_duplicates()
else:
self._check_bound_attribs(nodes, ['node'], 'Vertex')
nlist = nodes.reset_index(drop=True)
bind(nlist, 'pointColor', '_point_color')
bind(nlist, 'pointLabel', '_point_label')
bind(nlist, 'pointTitle', '_point_title', nodeid)
bind(nlist, 'pointSize', '_point_size')
return self._make_dataset(elist, nlist)
def _make_dataset(self, elist, nlist=None):
edict = elist.where((pandas.notnull(elist)), None).to_dict(orient='records')
name = ''.join(random.choice(string.ascii_uppercase +
string.digits) for _ in range(10))
bindings = {'idField': self._node or Plotter._defaultNodeId,
'destinationField': self._destination, 'sourceField': self._source}
dataset = {'name': pygraphistry.PyGraphistry._dataset_prefix + name,
'bindings': bindings, 'type': 'edgelist', 'graph': edict}
if nlist is not None:
ndict = nlist.where((pandas.notnull(nlist)), None).to_dict(orient='records')
dataset['labels'] = ndict
return dataset
def _iframe(self, url):
tag = '<iframe src="%s" style="width:100%%; height:%dpx; border: 1px solid #DDD"></iframe>'
return tag % (url, self._height)
| bsd-3-clause |
kpei/cs-rating | wl_model/spcl_case.py | 1 | 1599 | import pandas as pd
import pymc3 as pm
import numpy as np
def fix_teams(h_teams):
h_teams.loc[7723, 'Name'] = 'Morior Invictus'
h_teams.loc[8241, 'Name'] = 'ex-Nitrious'
h_teams.loc[8349, 'Name'] = 'Good People'
h_teams.loc[8008, 'Name'] = 'Grayhound'
h_teams.loc[5293, 'Name'] = 'AVANT'
h_teams.loc[8030, 'Name'] = 'Not Academy'
return h_teams
def prep_pymc_model(n_teams, n_maps):
with pm.Model() as rating_model:
omega = pm.HalfCauchy('omega', 0.5)
tau = pm.HalfCauchy('tau', 0.5)
rating = pm.Normal('rating', 0, omega, shape=n_teams)
theta_tilde = pm.Normal('rate_t', mu=0, sd=1, shape=(n_maps, n_teams))
rating_map = pm.Deterministic('rating | map', rating + tau * theta_tilde)
alpha = pm.Normal('alpha', 1., 0.2)
beta = pm.Normal('beta', 0.5, 0.2)
sigma = pm.HalfCauchy('sigma', 0.5)
return rating_model
def prep_pymc_time_model(n_teams, n_maps, n_periods):
with pm.Model() as rating_model:
rho = pm.Uniform('rho', -1, 1)
omega = pm.HalfNormal('omega', 0.5)
sigma = pm.HalfNormal('sigma', 0.5)
time_rating = [pm.Normal('rating_0', 0, omega, shape=n_teams)]
theta_tilde = pm.Normal('rate_t', mu=0, sd=1, shape=(n_maps, n_teams))
tau = pm.HalfCauchy('tau', 0.5)
time_rating_map = [pm.Deterministic('rating_0 | map', time_rating[0] + tau * theta_tilde)]
gamma = pm.HalfNormal('gamma', 1.5)
for i in np.arange(1, n_periods):
time_rating.append(pm.Normal('rating_'+str(i), rho*time_rating[i-1], sigma, shape=n_teams))
time_rating_map.append(pm.Deterministic('rating_'+str(i)+' | map', time_rating[i] + tau * theta_tilde))
return rating_model | gpl-3.0 |
cjqian/incubator-airflow | airflow/contrib/hooks/salesforce_hook.py | 10 | 12120 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a Salesforce Hook
which allows you to connect to your Salesforce instance,
retrieve data from it, and write that data to a file
for other uses.
NOTE: this hook also relies on the simple_salesforce package:
https://github.com/simple-salesforce/simple-salesforce
"""
from simple_salesforce import Salesforce
from airflow.hooks.base_hook import BaseHook
import json
import pandas as pd
import time
from airflow.utils.log.logging_mixin import LoggingMixin
class SalesforceHook(BaseHook, LoggingMixin):
def __init__(
self,
conn_id,
*args,
**kwargs
):
"""
Create new connection to Salesforce
and allows you to pull data out of SFDC and save it to a file.
You can then use that file with other
Airflow operators to move the data into another data source
:param conn_id: the name of the connection that has the parameters
we need to connect to Salesforce.
The conenction shoud be type `http` and include a
user's security token in the `Extras` field.
.. note::
For the HTTP connection type, you can include a
JSON structure in the `Extras` field.
We need a user's security token to connect to Salesforce.
So we define it in the `Extras` field as:
`{"security_token":"YOUR_SECRUITY_TOKEN"}`
"""
self.conn_id = conn_id
self._args = args
self._kwargs = kwargs
# get the connection parameters
self.connection = self.get_connection(conn_id)
self.extras = self.connection.extra_dejson
def sign_in(self):
"""
Sign into Salesforce.
If we have already signed it, this will just return the original object
"""
if hasattr(self, 'sf'):
return self.sf
# connect to Salesforce
sf = Salesforce(
username=self.connection.login,
password=self.connection.password,
security_token=self.extras['security_token'],
instance_url=self.connection.host
)
self.sf = sf
return sf
def make_query(self, query):
"""
Make a query to Salesforce. Returns result in dictionary
:param query: The query to make to Salesforce
"""
self.sign_in()
self.log.info("Querying for all objects")
query = self.sf.query_all(query)
self.log.info(
"Received results: Total size: %s; Done: %s",
query['totalSize'], query['done']
)
query = json.loads(json.dumps(query))
return query
def describe_object(self, obj):
"""
Get the description of an object from Salesforce.
This description is the object's schema
and some extra metadata that Salesforce stores for each object
:param obj: Name of the Salesforce object
that we are getting a description of.
"""
self.sign_in()
return json.loads(json.dumps(self.sf.__getattr__(obj).describe()))
def get_available_fields(self, obj):
"""
Get a list of all available fields for an object.
This only returns the names of the fields.
"""
self.sign_in()
desc = self.describe_object(obj)
return [f['name'] for f in desc['fields']]
def _build_field_list(self, fields):
# join all of the fields in a comma separated list
return ",".join(fields)
def get_object_from_salesforce(self, obj, fields):
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
"""
field_string = self._build_field_list(fields)
query = "SELECT {0} FROM {1}".format(field_string, obj)
self.log.info(
"Making query to Salesforce: %s",
query if len(query) < 30 else " ... ".join([query[:15], query[-15:]])
)
return self.make_query(query)
@classmethod
def _to_timestamp(cls, col):
"""
Convert a column of a dataframe to UNIX timestamps if applicable
:param col: A Series object representing a column of a dataframe.
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
col = pd.to_datetime(col)
except ValueError:
log = LoggingMixin().log
log.warning(
"Could not convert field to timestamps: %s", col.name
)
return col
# now convert the newly created datetimes into timestamps
# we have to be careful here
# because NaT cannot be converted to a timestamp
# so we have to return NaN
converted = []
for i in col:
try:
converted.append(i.timestamp())
except ValueError:
converted.append(pd.np.NaN)
except AttributeError:
converted.append(pd.np.NaN)
# return a new series that maintains the same index as the original
return pd.Series(converted, index=col.index)
def write_object_to_file(
self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False
):
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-separated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line deliminated
instead of comman deliminated like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV,
but as milisecond Unix timestamps.
By default, this function will try and leave all values as
they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes
to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your
datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:param filename: the name of the file where the data
should be dumped to
:param fmt: the format you want the output in.
*Default:* csv.
:param coerce_to_timestamp: True if you want all datetime fields to be
converted into Unix timestamps.
False if you want them to be left in the
same format as they were in Salesforce.
Leaving the value as False will result
in datetimes being strings.
*Defaults to False*
:param record_time_added: *(optional)* True if you want to add a
Unix timestamp field to the resulting data
that marks when the data
was fetched from Salesforce.
*Default: False*.
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {0}".format(fmt))
# this line right here will convert all integers to floats if there are
# any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [c.lower() for c in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]['attributes']['type']
self.log.info("Coercing timestamps for: %s", object_name)
schema = self.describe_object(object_name)
# possible columns that can be convereted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
i['name'].lower()
for i in schema['fields']
if i['type'] in ["date", "datetime"] and
i['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(
lambda x: self._to_timestamp(x)
)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects
# that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
self.log.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\r\n", "")
)
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\n", "")
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df
| apache-2.0 |
iamshang1/Projects | Advanced_ML/Text_Classification/feature_extraction.py | 1 | 4544 | import sys
import ast
import re
from itertools import groupby
import numpy as np
import collections
from gensim.models import Word2Vec
from matplotlib import pyplot as plt
from sklearn.manifold import TSNE
import logging
import pickle
#get json filepath
args = (sys.argv)
if len(args) != 2:
raise Exception("Usage: python feature_extraction.py <path to Yelp json file>")
json_path = args[1]
#logging setup
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
#store records
labels = []
tokens = []
maxsentlen = 0
maxdoclen = 0
#process json one line at a time
with open(json_path,'r') as f:
lineno = 0
for line in f:
lineno += 1
sys.stdout.write("processing line %i of aprox 4.15 million \r" \
% lineno)
sys.stdout.flush()
dic = ast.literal_eval(line)
#only keep records from 2013 (to reduce dataset size)
if dic['date'][:4]!='2013':
continue
text = dic['text']
#process text
text = text.lower()
text = re.sub("'", '', text)
text = re.sub("\.{2,}", '.', text)
text = re.sub('[^\w_|\.|\?|!]+', ' ', text)
text = re.sub('\.', ' . ', text)
text = re.sub('\?', ' ? ', text)
text = re.sub('!', ' ! ', text)
#tokenize
text = text.split()
#drop empty reviews
if len(text) == 0:
continue
#split into sentences
sentences = []
sentence = []
for t in text:
if t not in ['.','!','?']:
sentence.append(t)
else:
sentence.append(t)
sentences.append(sentence)
if len(sentence) > maxsentlen:
maxsentlen = len(sentence)
sentence = []
if len(sentence) > 0:
sentences.append(sentence)
#add split sentences to tokens
tokens.append(sentences)
if len(sentences) > maxdoclen:
maxdoclen = len(sentences)
#add label
labels.append(dic['stars'])
print '\nsaved %i records' % len(tokens)
#generate Word2Vec embeddings
print "generating word2vec embeddings"
#used all processed raw text to train word2vec
allsents = [sent for doc in tokens for sent in doc]
embedding_size = 200
model = Word2Vec(allsents, min_count=5, size=embedding_size, workers=4, iter=5)
model.init_sims(replace=True)
'''
#get most common words
print "getting common words"
allwords = [word for sent in allsents for word in sent]
counts = collections.Counter(allwords).most_common(500)
#reduce embeddings to 2d using tsne
print "reducing embeddings to 2D"
embeddings = np.empty((500,embedding_size))
for i in range(500):
embeddings[i,:] = model[counts[i][0]]
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=7500)
embeddings = tsne.fit_transform(embeddings)
#plot embeddings
print "plotting most common words"
fig, ax = plt.subplots(figsize=(30, 30))
for i in range(500):
ax.scatter(embeddings[i,0],embeddings[i,1])
ax.annotate(counts[i][0], (embeddings[i,0],embeddings[i,1]))
plt.show()
'''
#save all word embeddings to matrix
print "saving word vectors to matrix"
vocab = np.zeros((len(model.wv.vocab)+1,embedding_size))
word2id = {}
#first row of embedding matrix isn't used so that 0 can be masked
for key,val in model.wv.vocab.iteritems():
idx = val.__dict__['index'] + 1
vocab[idx,:] = model[key]
word2id[key] = idx
#normalize embeddings
vocab -= vocab.mean()
vocab /= (vocab.std()*2)
#reset first row to 0
vocab[0,:] = np.zeros((embedding_size))
#add additional word embedding for unknown words
vocab = np.concatenate((vocab, np.random.rand(1,embedding_size)))
#index for unknown words
unk = len(vocab)-1
#convert words to word indicies
print "converting words to indices"
data = {}
for idx,doc in enumerate(tokens):
sys.stdout.write('processing %i of %i records \r' % (idx+1,len(tokens)))
sys.stdout.flush()
dic = {}
dic['label'] = labels[idx]
dic['text'] = doc
indicies = []
for sent in doc:
indicies.append([word2id[word] if word in word2id else unk for word in sent])
dic['idx'] = indicies
data[idx] = dic
#save preprocessed data and embeddings to disk
print "\nsaving data to disk"
np.save('embeddings',vocab)
with open('data.pkl', 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
| mit |
prheenan/Research | Perkins/Projects/WetLab/Demos/Dilutions/2016-9-31-SolutionProtocols/2016-9-4PrepGel/main_prep-gel.py | 1 | 1658 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("../../../..")
from Util import DilutionUtil
def run():
"""
For aliquotting things...
"""
# TCEP is already present (25uL at 4mM),
# assume effectively that we want the full aliquot
# Stats list is formattd like <name,Concentraiton string, stock, desired,
# already present> s
TotalDNADesiredNanograms = 10e3
Volume = 136 # units of vol_units
Stats = [ ["DNA","ng",88.2,TotalDNADesiredNanograms/Volume,0],
["loading buffer","x",6,1,0]]
# get the stocks, desired concntrations, and already-present concentraitons
Stocks = [s[2] for s in Stats]
Desired = [s[3] for s in Stats]
Already = [s[4] for s in Stats]
vol_units = "uL"
Volumes = DilutionUtil.\
GetVolumesNeededByConcentration(Stocks,Desired,Volume,
AlreadyHaveMass=Already)
BufferVolume = Volume - sum(Volumes)
print("In a total solution of {:.1f}uL...".format(Volume))
for (name,conc_units,conc_stock,desired_conc,_),vol_stock in\
zip(Stats,Volumes):
print("\t{:.3g}{:s} of {:.3g}{:s} {:s} for {:.3g}{:s} in solution".\
format(vol_stock,vol_units,conc_stock,conc_units,name,
desired_conc,conc_units))
print("\tRemainder is ({:.3g}{:s}) of buffer".\
format(BufferVolume,vol_units))
if __name__ == "__main__":
run()
| gpl-3.0 |
websterkgd/PeasantMath | Roots/code/ErrorComparison.py | 2 | 4158 | from __future__ import division
import sys
import os
import matplotlib.pyplot as plt
import scipy
from scipy import special
mydir = os.path.expanduser("~/")
sys.path.append(mydir + "/GitHub/PeasantMath/Roots/code")
import functions as fxn
ks = [2, 3, 4, 5, 6, 7, 8, 9, 10, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5]
ks = [3]
for i, k in enumerate(ks):
fs = 12
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
x = 2.0
xs = []
rts = []
WLs = []
guesses = []
for j in range(100):
xs.append(x)
y = x**(1.0/k)
rts.append(y)
a = fxn.WHL_kth(x, k)
a = abs(y - a)/y
WLs.append(a)
b = fxn.Guess(x, k)
b = abs(y - b)/y
guesses.append(b)
x += 1
#plt.scatter(xs, rts, s=50, color='m', facecolors='none', label='root'+str(k))
plt.scatter(xs, WLs, color='c', alpha=0.9, label='WHL rule')
plt.scatter(xs, guesses, color='grey', alpha=0.9, label='Random guesses \nfor the remainder')
#plt.scatter(xs, guesses, color='grey', alpha=0.9, label='Assuming the remainder\nis 1/2')
#plt.yscale('log')
#plt.xscale('log')
plt.xlabel('x', fontsize=fs)
plt.ylabel('Percent error', fontsize=fs)
plt.xlim(min(xs), max(xs))
#plt.ylim(min(WLs), max(rts))
plt.legend(bbox_to_anchor=(-0.04, 1.1, 2.59, .3), loc=10, ncol=2,
mode="expand",prop={'size':16})
#leg = plt.legend(loc=4,prop={'size':12})
#leg.draw_frame(False)
#plt.text(-50, 14, "How well does the WHL Rule approximate square roots?", fontsize=16)
ax = fig.add_subplot(2,2,2)
x = 2.0
xs = []
rts = []
WLs = []
guesses = []
for j in range(100):
xs.append(x)
y = x**(1.0/k)
rts.append(y)
a = fxn.WHL_kth(x, k)
a = abs(y - a)/y
WLs.append(a)
b = fxn.Guess(x, k)
b = abs(y - b)/y
guesses.append(b)
x += 10
#plt.scatter(xs, rts, s=50, color='m', facecolors='none', label='root'+str(k))
plt.scatter(xs, WLs, color='c', alpha=0.9)
plt.scatter(xs, guesses, color='grey', alpha=0.9)
#plt.yscale('log')
#plt.xscale('log')
plt.xlabel('x', fontsize=fs)
plt.ylabel('Percent error', fontsize=fs)
plt.xlim(min(xs)*0.5, max(xs))
#plt.ylim(min(WLs)*0.5, max(rts)*1.5)
ax = fig.add_subplot(2,2,3)
x = 2.0
xs = []
rts = []
WLs = []
guesses = []
for j in range(30):
xs.append(x)
y = x**(1.0/k)
rts.append(y)
a = fxn.WHL_kth(x, k)
a = abs(y - a)/y
WLs.append(a)
b = fxn.Guess(x, k)
b = abs(y - b)/y
guesses.append(b)
x = x*1.5
#plt.scatter(xs, rts, s=50, color='m', facecolors='none', label='root'+str(k))
plt.scatter(xs, WLs, color='c', alpha=0.9)
plt.scatter(xs, guesses, color='grey', alpha=0.9)
#plt.yscale('log')
plt.xscale('log')
plt.xlabel('x', fontsize=fs)
plt.ylabel('Percent error', fontsize=fs)
plt.xlim(min(xs)*0.5, max(xs)*1.5)
#plt.ylim(min(WLs)*0.5, max(rts)*1.5)
ax = fig.add_subplot(2,2,4)
x = 2.0
xs = []
rts = []
WLs = []
guesses = []
for j in range(30):
xs.append(x)
y = x**(1.0/k)
rts.append(y)
a = fxn.WHL_kth(x, k)
a = abs(y - a)/y
WLs.append(a)
b = fxn.Guess(x, k)
b = abs(y - b)/y
guesses.append(b)
x = x*2
#plt.scatter(xs, rts, s=50, color='m', facecolors='none', label='root'+str(k))
plt.scatter(xs, WLs, color='c', alpha=0.9)
plt.scatter(xs, guesses, color='grey', alpha=0.9)
#plt.yscale('log')
plt.xscale('log')
plt.xlabel('x', fontsize=fs)
plt.ylabel('Percent error', fontsize=fs)
plt.xlim(min(xs)*0.5, max(xs)*1.5)
#plt.ylim(min(WLs)*0.5, max(rts)*1.5)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.subplots_adjust(wspace=0.5, hspace=0.3)
plt.savefig(mydir+'/GitHub/PeasantMath/Roots/figs/ErrorAnalysis/error_and_random_remainder-Root_'+str(k)+'.png', dpi=600, bbox_inches = 'tight')#, pad_inches=0)
print 'finished root',k
#plt.show()
| cc0-1.0 |
mjudsp/Tsallis | examples/gaussian_process/plot_gpr_prior_posterior.py | 104 | 2878 | """
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a GPR with different
kernels. Mean, standard deviation, and 10 samples are shown for both prior
and posterior.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
nu=1.5)]
for fig_index, kernel in enumerate(kernels):
# Specify Gaussian Process
gp = GaussianProcessRegressor(kernel=kernel)
# Plot prior
plt.figure(fig_index, figsize=(8, 8))
plt.subplot(2, 1, 1)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Prior (kernel: %s)" % kernel, fontsize=12)
# Generate data and fit GP
rng = np.random.RandomState(4)
X = rng.uniform(0, 5, 10)[:, np.newaxis]
y = np.sin((X[:, 0] - 2.5) ** 2)
gp.fit(X, y)
# Plot posterior
plt.subplot(2, 1, 2)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
% (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
Innixma/kaggle2017 | scripts/common/__init__.py | 1 | 2112 | import matplotlib.pyplot as plt
from skimage import measure, morphology
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import numpy as np
import pandas as pd
def plot_slice(img, slice=80):
# Show some slice in the middle
plt.imshow(img[slice])
plt.show()
def plot_3d(image, threshold=-100):
# Position the scan upright,
# so the head of the patient would be at the top facing the camera
# p = image.transpose(2,1,0)
p = image
results = measure.marching_cubes(p, threshold)
verts = results[0]
faces = results[1]
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces], alpha=0.70)
face_color = [0.45, 0.45, 0.75]
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
ax.set_xlim(0, p.shape[0])
ax.set_ylim(0, p.shape[1])
ax.set_zlim(0, p.shape[2])
plt.savefig('plot3d.png')
def save(arr, pth):
with open(pth, 'wb+') as fh:
np.savez_compressed(fh, data=arr)
def load(pth):
return np.load(pth)['data']
def read_mapping_file(pth):
return pd.read_csv(pth)
def shuffle_weights(model, weights=None):
"""Randomly permute the weights in `model`, or the given `weights`.
This is a fast approximation of re-initializing the weights of a model.
Assumes weights are distributed independently of the dimensions of the weight tensors
(i.e., the weights have the same distribution along each dimension).
:param Model model: Modify the weights of the given model.
:param list(ndarray) weights: The model's weights will be replaced by a random permutation of these weights.
If `None`, permute the model's current weights.
"""
if weights is None:
weights = model.get_weights()
weights = [np.random.permutation(w.flat).reshape(w.shape) for w in weights]
# Faster, but less random: only permutes along the first dimension
# weights = [np.random.permutation(w) for w in weights]
model.set_weights(weights)
| mit |
ch3ll0v3k/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
mattjj/pylds | examples/diagonal_meanfield.py | 1 | 2372 | from __future__ import division
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
from pybasicbayes.distributions import Regression, DiagonalRegression
from pybasicbayes.util.text import progprint_xrange
from pylds.models import LDS, DefaultLDS
npr.seed(0)
# Parameters
D_obs = 1
D_latent = 2
D_input = 0
T = 2000
# Simulate from an LDS
truemodel = DefaultLDS(D_obs, D_latent, D_input)
inputs = np.random.randn(T, D_input)
data, stateseq = truemodel.generate(T, inputs=inputs)
# Fit with an LDS with diagonal observation noise
model = LDS(
dynamics_distn=Regression(nu_0=D_latent + 2,
S_0=D_latent * np.eye(D_latent),
M_0=np.zeros((D_latent, D_latent + D_input)),
K_0=(D_latent + D_input) * np.eye(D_latent + D_input)),
emission_distn=DiagonalRegression(D_obs, D_latent+D_input))
model.add_data(data, inputs=inputs)
# Fit with mean field
def update(model):
return model.meanfield_coordinate_descent_step()
for _ in progprint_xrange(100):
model.resample_model()
N_steps = 100
vlbs = [update(model) for _ in progprint_xrange(N_steps)]
plt.figure(figsize=(3,4))
plt.plot([0, N_steps], truemodel.log_likelihood() * np.ones(2), '--k')
plt.plot(vlbs)
plt.xlabel('iteration')
plt.ylabel('variational lower bound')
# Predict forward in time
T_given = 1800
T_predict = 200
given_data= data[:T_given]
given_inputs = inputs[:T_given]
preds = \
model.sample_predictions(
given_data, inputs=given_inputs,
Tpred=T_predict,
inputs_pred=inputs[T_given:T_given + T_predict])
# Plot the predictions
plt.figure()
plt.plot(np.arange(T), data, 'b-', label="true")
plt.plot(T_given + np.arange(T_predict), preds, 'r--', label="prediction")
ylim = plt.ylim()
plt.plot([T_given, T_given], ylim, '-k')
plt.xlabel('time index')
plt.xlim(max(0, T_given - 200), T)
plt.ylabel('prediction')
plt.ylim(ylim)
plt.legend()
# Smooth the data (TODO: Clean this up)
E_CD,_,_,_ = model.emission_distn.mf_expectations
E_C, E_D = E_CD[:,:D_latent], E_CD[:,D_latent:]
ys = model.states_list[0].smoothed_mus.dot(E_C.T) + inputs.dot(E_D.T)
plt.figure()
plt.plot(data, 'b-', label="true")
plt.plot(ys, 'r-', lw=2, label="smoothed")
plt.xlabel("Time")
plt.xlim(max(0, T_given-200), T)
plt.ylabel("Smoothed Data")
plt.legend()
plt.show()
| mit |
ShyamSS-95/Bolt | example_problems/nonrelativistic_boltzmann/linear_modes/linear_modes_euler/2D/entropy_mode/check_convergence.py | 1 | 5449 | import numpy as np
import h5py
import matplotlib as mpl
mpl.use('agg')
import pylab as pl
import input_files.domain as domain
import input_files.params as params
# Optimized plot parameters to make beautiful plots:
pl.rcParams['figure.figsize'] = 12, 7.5
pl.rcParams['figure.dpi'] = 300
pl.rcParams['image.cmap'] = 'jet'
pl.rcParams['lines.linewidth'] = 1.5
pl.rcParams['font.family'] = 'serif'
pl.rcParams['font.weight'] = 'bold'
pl.rcParams['font.size'] = 20
pl.rcParams['font.sans-serif'] = 'serif'
pl.rcParams['text.usetex'] = True
pl.rcParams['axes.linewidth'] = 1.5
pl.rcParams['axes.titlesize'] = 'medium'
pl.rcParams['axes.labelsize'] = 'medium'
pl.rcParams['xtick.major.size'] = 8
pl.rcParams['xtick.minor.size'] = 4
pl.rcParams['xtick.major.pad'] = 8
pl.rcParams['xtick.minor.pad'] = 8
pl.rcParams['xtick.color'] = 'k'
pl.rcParams['xtick.labelsize'] = 'medium'
pl.rcParams['xtick.direction'] = 'in'
pl.rcParams['ytick.major.size'] = 8
pl.rcParams['ytick.minor.size'] = 4
pl.rcParams['ytick.major.pad'] = 8
pl.rcParams['ytick.minor.pad'] = 8
pl.rcParams['ytick.color'] = 'k'
pl.rcParams['ytick.labelsize'] = 'medium'
pl.rcParams['ytick.direction'] = 'in'
omega = 0
# Defining the functions for the analytical solution:
def n_ana(q1, q2, t):
n_b = params.density_background
pert_real_n = 1
pert_imag_n = 0
pert_n = pert_real_n + 1j * pert_imag_n
n_ana = n_b + params.amplitude * pert_n * \
np.exp( 1j * (params.k_q1 * q1 + params.k_q2 * q2)
+ omega * t
).real
return(n_ana)
def v1_ana(q1, q2, t):
v1_b = params.v1_bulk_background
n_b = params.density_background
pert_real_v1 = 0
pert_imag_v1 = 0
pert_v1 = pert_real_v1 + 1j * pert_imag_v1
v1_ana = v1_b + params.amplitude * pert_v1 * \
np.exp( 1j * (params.k_q1 * q1 + params.k_q2 * q2)
+ omega * t
).real
return(v1_ana)
def v2_ana(q1, q2, t):
v2_b = params.v2_bulk_background
n_b = params.density_background
pert_real_v2 = 0
pert_imag_v2 = 0
pert_v2 = pert_real_v2 + 1j * pert_imag_v2
v2_ana = v2_b + params.amplitude * pert_v2 * \
np.exp( 1j * (params.k_q1 * q1 + params.k_q2 * q2)
+ omega * t
).real
return(v2_ana)
def T_ana(q1, q2, t):
T_b = params.temperature_background
n_b = params.density_background
pert_real_T = -T_b / n_b
pert_imag_T = 0
pert_T = pert_real_T + 1j * pert_imag_T
T_ana = T_b + params.amplitude * pert_T * \
np.exp( 1j * (params.k_q1 * q1 + params.k_q2 * q2)
+ omega * t
).real
return(T_ana)
N_g_q = domain.N_ghost_q
N = np.array([32, 48, 64, 96, 128])
error_n = np.zeros(N.size)
error_v1 = np.zeros(N.size)
error_v2 = np.zeros(N.size)
error_T = np.zeros(N.size)
for i in range(N.size):
dq1 = (domain.q1_end - domain.q1_start) / int(N[i])
dq2 = (domain.q2_end - domain.q2_start) / int(N[i])
q1 = domain.q1_start + (0.5 + np.arange(int(N[i]))) * dq1
q2 = domain.q2_start + (0.5 + np.arange(int(N[i]))) * dq2
q2, q1 = np.meshgrid(q2, q1)
h5f = h5py.File('dump/N_%04d'%(int(N[i])) + '.h5')
mom = h5f['moments'][:]
h5f.close()
n_nls = np.transpose(mom[:, :, 0], (1, 0))
v1_nls = np.transpose(mom[:, :, 2], (1, 0)) / n_nls
v2_nls = np.transpose(mom[:, :, 3], (1, 0)) / n_nls
v3_nls = np.transpose(mom[:, :, 4], (1, 0)) / n_nls
T_nls = (1 / params.p_dim) * ( 2 * np.transpose(mom[:, :, 1], (1, 0))
- n_nls * v1_nls**2
- n_nls * v2_nls**2
- n_nls * v3_nls**2
) / n_nls
n_analytic = n_ana(q1, q2, params.t_final)
v1_analytic = v1_ana(q1, q2, params.t_final)
v2_analytic = v2_ana(q1, q2, params.t_final)
T_analytic = T_ana(q1, q2, params.t_final)
error_n[i] = np.mean(abs(n_nls - n_analytic))
error_v1[i] = np.mean(abs(v1_nls - v1_analytic))
error_v2[i] = np.mean(abs(v2_nls - v2_analytic))
error_T[i] = np.mean(abs(T_nls - T_analytic))
print('Errors Obtained:')
print('L1 norm of error for density:', error_n)
print('L1 norm of error for velocity-1:', error_v1)
print('L1 norm of error for velocity-2:', error_v2)
print('L1 norm of error for temperature:', error_T)
print('\nConvergence Rates:')
print('Order of convergence for density:', np.polyfit(np.log10(N), np.log10(error_n), 1)[0])
print('Order of convergence for velocity-1:', np.polyfit(np.log10(N), np.log10(error_v1), 1)[0])
print('Order of convergence for velocity-2:', np.polyfit(np.log10(N), np.log10(error_v2), 1)[0])
print('Order of convergence for temperature:', np.polyfit(np.log10(N), np.log10(error_T), 1)[0])
pl.loglog(N, error_n, '-o', label = 'Density')
pl.loglog(N, error_v1, '-o', label = 'Velocity-1')
pl.loglog(N, error_v2, '-o', label = 'Velocity-2')
pl.loglog(N, error_T, '-o', label = 'Temperature')
pl.loglog(N, error_n[0]*32**2/N**2, '--', color = 'black', label = r'$O(N^{-2})$')
pl.xlabel(r'$N$')
pl.ylabel('Error')
pl.legend()
pl.savefig('convergenceplot.png')
| gpl-3.0 |
palful/yambopy | yambopy/dbs/electronsdb.py | 1 | 9254 | # Copyright (c) 2016, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
from netCDF4 import Dataset
import numpy as np
from itertools import product
import collections
ha2ev = 27.211396132
max_exp = 50
min_exp =-100.
def abs2(x):
return x.real**2 + x.imag**2
def fermi(e):
""" fermi dirac function
"""
if e > max_exp:
return 0
elif e < -max_exp:
return 1
return 1/(np.exp(e)+1)
def fermi_array(e_array,ef,invsmear):
"""
Fermi dirac function for an array
"""
e_array = (e_array-ef)/invsmear
return [ fermi(e) for e in e_array]
def histogram_eiv(eiv,weights,emin=-5.0,emax=5.0,step=0.01,sigma=0.05,ctype='lorentzian'):
"""
Histogram of eigenvalues
"""
eiv = np.array(eiv)
#sigma = 0.005
x = np.arange(emin,emax,step,dtype=np.float32)
y = np.zeros([len(x)],dtype=np.float32)
if ctype == 'gaussian':
c = 1.0/(sigma*sqrt(2))
a = -1.0/(2*sigma)
else:
#lorentzian stuff
s2 = (.5*sigma)**2
c = (.5*sigma)
eiv = eiv.flatten()
weights = weights.flatten()
weights = weights[emin < eiv]
eiv = eiv[emin < eiv]
weights = weights[eiv < emax]
eiv = eiv[eiv < emax]
if ctype == 'gaussian':
for e,w in zip(eiv,weights):
x1 = (x-e)**2
#add gaussian
y += c*np.exp(a*x1)
else:
#lorentzian stuff
for e,w in zip(eiv,weights):
x1 = (x-e)**2
#add lorentzian
y += w*c/(x1+s2)
return x, y
class YamboElectronsDB():
"""
Class to read information about the electrons from the ``ns.db1`` produced by yambo
Arguments:
``lattice``: instance of YamboLatticeDB or YamboSaveDB
``filename``: netcdf database to read from (default:ns.db1)
"""
def __init__(self,lattice,save='SAVE',filename='ns.db1'):
self.lattice = lattice
self.filename = '%s/%s'%(save,filename)
self.efermi = None
self.readDB()
if self.nkpoints != self.lattice.nkpoints: #sanity check
raise ValueError("The number of k-points in the lattice database and electrons database is different.")
self.expandEigenvalues()
def readDB(self):
try:
database = Dataset(self.filename)
except:
raise IOError("Error opening file %s in YamboElectronsDB"%self.filename)
self.eigenvalues_ibz = database.variables['EIGENVALUES'][0,:]*ha2ev
self.iku_kpoints = database.variables['K-POINTS'][:].T
dimensions = database.variables['DIMENSIONS'][:]
self.nbands = dimensions[5]
self.temperature = dimensions[13]
self.nelectrons = int(dimensions[14])
self.nkpoints = int(dimensions[6])
self.nbands = int(dimensions[5])
self.spin = int(dimensions[11])
self.time_rev = dimensions[9]
database.close()
#spin degeneracy if 2 components degen 1 else degen 2
self.spin_degen = [0,2,1][int(self.spin)]
#number of occupied bands
self.nbandsv = self.nelectrons / self.spin_degen
self.nbandsc = self.nbands-self.nbandsv
def expandEigenvalues(self):
"""
Expand eigenvalues to the full brillouin zone
"""
self.eigenvalues = self.eigenvalues_ibz[self.lattice.kpoints_indexes]
self.nkpoints_ibz = len(self.eigenvalues_ibz)
self.weights_ibz = np.zeros([self.nkpoints_ibz],dtype=np.float32)
self.nkpoints = len(self.eigenvalues)
#counter counts the number of occurences of element in a list
for nk_ibz,inv_weight in collections.Counter(self.lattice.kpoints_indexes).items():
self.weights_ibz[nk_ibz] = float(inv_weight)/self.nkpoints
#kpoints weights
self.weights = np.full((self.nkpoints), 1.0/self.nkpoints,dtype=np.float32)
def getDOS(self,broad=0.1,emin=-10,emax=10,step=0.01):
"""
Calculate the density of states.
Should work for metals as well but untested for that case
"""
eigenvalues = self.eigenvalues_ibz
weights = self.weights_ibz
nkpoints = self.nkpoints_ibz
na = np.newaxis
weights_bands = np.ones(eigenvalues.shape,dtype=np.float32)*weights[:,na]
energies, self.dos = histogram_eiv(eigenvalues,weights_bands,emin=emin,emax=emax,step=step,sigma=broad)
return energies, self.dos
def setLifetimes(self,broad=0.1):
"""
Add electronic lifetimes using the DOS
"""
self.lifetimes_ibz = np.ones(self.eigenvalues_ibz.shape,dtype=np.float32)*broad
self.lifetimes = np.ones(self.eigenvalues.shape,dtype=np.float32)*broad
def setLifetimesDOS(self,broad=0.1,debug=False):
"""
Approximate the electronic lifetimes using the DOS
"""
eigenvalues = self.eigenvalues_ibz
weights = self.weights_ibz
nkpoints = self.nkpoints_ibz
#get dos
emin = np.min(eigenvalues)-broad
emax = np.max(eigenvalues)+broad
energies, dos = self.getDOS(emin=emin, emax=emax, step=0.1, broad=broad)
#normalize dos to broad
dos = dos/np.max(dos)*broad
#create a interpolation function to get the lifetimes for all the values
from scipy.interpolate import interp1d
f = interp1d(energies, dos, kind='cubic')
if debug:
"""
plot the calculated values for the DOS and the interpolated values
"""
import matplotlib.pyplot as plt
x = np.arange(emin+d,emax-d,0.001)
plt.plot(energies,dos,'o')
plt.plot(x,f(x))
plt.show()
exit()
#add imaginary part to the energies proportional to the DOS
self.lifetimes_ibz = np.array([ [f(eig) for eig in eigk] for eigk in self.eigenvalues_ibz],dtype=np.float32)
self.lifetimes = np.array([ [f(eig) for eig in eigk] for eigk in self.eigenvalues],dtype=np.float32)
def setFermi(self,fermi,invsmear):
"""
Set the fermi energy of the system
"""
self.invsmear = invsmear
self.efermi = fermi
#full brillouin zone
self.eigenvalues -= self.efermi
self.occupations = np.zeros([self.nkpoints,self.nbands],dtype=np.float32)
for nk in xrange(self.nkpoints):
self.occupations[nk] = fermi_array(self.eigenvalues[nk,:],0,self.invsmear)
#for the ibz
self.eigenvalues_ibz -= self.efermi
self.occupations_ibz = np.zeros([self.nkpoints_ibz,self.nbands],dtype=np.float32)
for nk in xrange(self.nkpoints_ibz):
self.occupations_ibz[nk] = fermi_array(self.eigenvalues_ibz[nk,:],0,self.invsmear)
return self.efermi
def setFermiFixed(self,broad=1e-5):
"""
Get fermi level using fixed occupations method
Useful for semi-conductors
"""
eigenvalues = self.eigenvalues_ibz
weights = self.weights_ibz
nkpoints = self.nkpoints_ibz
nbands = self.nelectrons/self.spin_degen
#top of valence
top = np.max(eigenvalues[:,nbands])
#bottom of conduction
bot = np.max(eigenvalues[:,nbands-1])
self.efermi = (top+bot)/2
self.setFermi(self.efermi,broad)
def energy_gaps(self,GWshift=0.):
"""
Calculate the enegy of the gap (by Fulvio Paleari)
"""
eiv = self.eigenvalues
nv = self.nbandsv
nc = self.nbandsc
homo = np.max(eiv[:,nv-1])
lumo = np.min(eiv[:,nv])
Egap = lumo-homo
for k in eiv:
if k[nv-1]==homo:
lumo_dir=k[nv]
Edir = lumo_dir-homo
eiv[:,nv:]+=GWshift
print('DFT Energy gap: %s eV'%Egap)
print('DFT Direct gap: %s eV'%Edir)
print('GW shift: %s eV'%GWshift)
return np.copy(eiv)
def getFermi(self,invsmear,setfermi=True):
"""
Determine the fermi energy
"""
if self.efermi: return self.efermi
from scipy.optimize import bisect
eigenvalues = self.eigenvalues_ibz
weights = self.weights_ibz
nkpoints = self.nkpoints_ibz
min_eival, max_eival = np.min(eigenvalues), np.max(eigenvalues)
self.invsmear = invsmear
def occupation_minus_ne(ef):
"""
The total occupation minus the total number of electrons
"""
return sum([sum(self.spin_degen*fermi_array(eigenvalues[nk],ef,self.invsmear))*weights[nk] for nk in xrange(nkpoints)])-self.nelectrons
fermi = bisect(occupation_minus_ne,min_eival,max_eival)
if setfermi: self.setFermi(fermi,invsmear)
return self.efermi
def __str__(self):
s = ""
s += "spin_degen: %d\n"%self.spin_degen
s += "nelectrons: %d\n"%self.nelectrons
s += "nbands: %d\n"%self.nbands
s += "nkpoints: %d"%self.nkpoints
return s
| bsd-3-clause |