repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jm-begon/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
cfe-lab/MiCall | release_test_compare.py | 1 | 29035 | """ Compare result files in shared folder with previous release. """
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import csv
from collections import namedtuple, defaultdict
from concurrent.futures.process import ProcessPoolExecutor
from difflib import Differ
from enum import IntEnum
from functools import partial
from itertools import groupby, zip_longest, chain
from glob import glob
from operator import itemgetter
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import Levenshtein
from micall.core.aln2counts import SeedNucleotide, MAX_CUTOFF
from micall.utils.primer_tracker import PrimerTracker
MICALL_VERSION = '7.14'
MiseqRun = namedtuple('MiseqRun', 'source_path target_path is_done')
MiseqRun.__new__.__defaults__ = (None,) * 3
SampleFiles = namedtuple(
'SampleFiles',
'cascade coverage_scores g2p_summary region_consensus remap_counts conseq')
SampleFiles.__new__.__defaults__ = (None,) * 6
Sample = namedtuple('Sample', 'run name source_files target_files')
ConsensusDistance = namedtuple('ConsensusDistance',
'region cutoff distance pct_diff')
ConsensusDistance.__new__.__defaults__ = (None,) * 4
SampleComparison = namedtuple('SampleComparison',
['diffs', # multi-line string
'scenarios', # {Scenarios: [description]}
'consensus_distances']) # [ConsensusDistance]
class Scenarios(IntEnum):
NONE = 0
REMAP_COUNTS_CHANGED = 1
MAIN_CONSENSUS_CHANGED = 2
OTHER_CONSENSUS_CHANGED = 4
CONSENSUS_DELETIONS_CHANGED = 8
VPR_FRAME_SHIFT_FIXED = 16
CONSENSUS_EXTENDED = 32
INSERTIONS_ADDED = 64
differ = Differ()
def parse_args():
# noinspection PyTypeChecker
parser = ArgumentParser(
description='Compare sample results for testing a new release.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--denovo',
action='store_true',
help='Compare old remapped results to new assembled results.')
parser.add_argument('source_folder',
help='Main RAWDATA folder with results from previous version.')
parser.add_argument('target_folder',
help='Testing RAWDATA folder to compare with.')
return parser.parse_args()
def find_runs(source_folder, target_folder, use_denovo):
run_paths = glob(os.path.join(target_folder, 'MiSeq', 'runs', '*'))
run_paths.sort()
for run_path in run_paths:
run_name = os.path.basename(run_path)
target_path = os.path.join(run_path,
'Results',
'version_' + MICALL_VERSION)
done_path = os.path.join(target_path, 'doneprocessing')
is_done = os.path.exists(done_path)
if use_denovo:
target_path = os.path.join(target_path, 'denovo')
source_results_path = os.path.join(source_folder,
'MiSeq',
'runs',
run_name,
'Results')
source_versions = os.listdir(source_results_path)
try:
source_versions.sort(key=parse_version)
except ValueError as ex:
message = f'Unexpected results file name in {run_name}.'
raise ValueError(message) from ex
source_path = os.path.join(source_results_path, source_versions[-1])
yield MiseqRun(source_path, target_path, is_done)
def parse_version(version_name):
version_text = version_name.split('_')[-1]
if version_text.endswith('.zip'):
version_text = version_text[:-4]
return tuple(map(int, version_text.split('.')))
def report_source_versions(runs):
version_runs = defaultdict(list) # {version: [source_path]}
for run in runs:
version = os.path.basename(run.source_path)
version_runs[version].append(run.source_path)
yield run
if version_runs:
max_count = max(len(paths) for paths in version_runs.values())
else:
max_count = 0
for version, paths in sorted(version_runs.items()):
if len(paths) == max_count:
print(version, max_count)
else:
print(version, paths)
def read_samples(runs):
missing_sources = []
missing_targets = []
missing_files = []
bad_files = []
for run in runs:
if run.source_path is None:
missing_sources.append(run.target_path)
continue
if run.target_path is None:
missing_targets.append(run.source_path)
continue
try:
source_cascades = group_samples(os.path.join(run.source_path,
'cascade.csv'))
target_cascades = group_samples(os.path.join(run.target_path,
'cascade.csv'))
source_g2ps = group_samples(os.path.join(run.source_path,
'g2p_summary.csv'))
target_g2ps = group_samples(os.path.join(run.target_path,
'g2p_summary.csv'))
source_coverages = group_samples(os.path.join(run.source_path,
'coverage_scores.csv'))
target_coverages = group_samples(os.path.join(run.target_path,
'coverage_scores.csv'))
source_conseq = group_samples(os.path.join(run.source_path,
'conseq.csv'))
target_conseq = group_samples(os.path.join(run.target_path,
'conseq.csv'))
source_region_consensus = group_nucs(os.path.join(run.source_path,
'nuc.csv'))
target_region_consensus = group_nucs(os.path.join(run.target_path,
'nuc.csv'))
source_counts = group_samples(os.path.join(run.source_path,
'remap_counts.csv'))
target_counts = group_samples(os.path.join(run.target_path,
'remap_counts.csv'))
except FileNotFoundError as ex:
missing_files.append(str(ex))
continue
except Exception as ex:
bad_files.append((ex, run.target_path))
continue
sample_names = sorted(target_cascades.keys())
for sample_name in sample_names:
yield Sample(run,
sample_name,
SampleFiles(source_cascades.get(sample_name),
source_coverages.get(sample_name),
source_g2ps.get(sample_name),
source_region_consensus.get(sample_name),
source_counts.get(sample_name),
source_conseq.get(sample_name)),
SampleFiles(target_cascades.get(sample_name),
target_coverages.get(sample_name),
target_g2ps.get(sample_name),
target_region_consensus.get(sample_name),
target_counts.get(sample_name),
target_conseq.get(sample_name)))
if missing_targets:
print('Missing targets: ', missing_targets)
if missing_sources:
print('Missing sources: ', missing_sources)
print('\n'.join(missing_files))
if bad_files:
print(bad_files)
def group_samples(output_file_name):
with open(output_file_name) as output_file:
return group_samples_file(output_file)
def group_samples_file(output_file):
reader = csv.DictReader(output_file)
return {key: list(rows)
for key, rows in groupby(reader, itemgetter('sample'))}
def group_nucs(output_file_name):
with open(output_file_name) as output_file:
return group_nucs_file(output_file)
def group_nucs_file(output_file):
reader = csv.DictReader(output_file)
groups = {}
for sample, sample_rows in groupby(reader, itemgetter('sample')):
sample_conseqs = {}
for seed, seed_rows in groupby(sample_rows, itemgetter('seed')):
for region, region_rows in groupby(seed_rows, itemgetter('region')):
consensus = [(choose_consensus(row), row)
for row in region_rows]
sample_conseqs[(seed, region)] = consensus
groups[sample] = sample_conseqs
return groups
def choose_consensus(nuc_row: dict) -> str:
coverage = int(nuc_row['coverage'])
if coverage < 100:
return 'x'
nuc = SeedNucleotide()
for nuc_seq in nuc.COUNTED_NUCS:
source_nuc = 'del' if nuc_seq == '-' else nuc_seq
nuc.count_nucleotides(nuc_seq, int(nuc_row[source_nuc]))
consensus = nuc.get_consensus(MAX_CUTOFF)
if int(nuc_row['ins']) > coverage / 2:
consensus += 'i'
return consensus
def get_run_name(sample):
dirname = os.path.dirname(os.path.dirname(sample.run.target_path))
run_name = os.path.basename(dirname)
if run_name == 'Results':
run_name = os.path.basename(os.path.dirname(dirname))
return run_name
def compare_g2p(sample, diffs):
source_fields = sample.source_files.g2p_summary
target_fields = sample.target_files.g2p_summary
if source_fields == target_fields:
return
assert source_fields is not None, (sample.run, sample.name)
assert target_fields is not None, (sample.run, sample.name)
assert len(source_fields) == 1, source_fields
assert len(target_fields) == 1, target_fields
run_name = get_run_name(sample)
source_x4_pct = source_fields[0]['X4pct']
target_x4_pct = target_fields[0]['X4pct']
source_final = source_fields[0].get('final')
target_final = target_fields[0].get('final')
if source_final != target_final:
diffs.append('{}:{} G2P: {} {} => {} {}'.format(run_name,
sample.name,
source_final,
source_x4_pct,
target_final,
target_x4_pct))
return
if source_x4_pct == target_x4_pct:
return
try:
x4_pct_diff = abs(float(target_x4_pct) - float(source_x4_pct))
if x4_pct_diff < 2.0:
return
except ValueError:
pass
diffs.append('{}:{} G2P: {} => {}'.format(run_name,
sample.name,
source_x4_pct,
target_x4_pct))
def map_coverage(coverage_scores):
if coverage_scores is None:
return {}
filtered_scores = {(score['project'],
score['region']): (score['on.score'],
score.get('seed'),
score.get('which.key.pos'))
for score in coverage_scores}
if MICALL_VERSION == '7.14':
filtered_scores = {
(project, region): scores
for (project, region), scores in filtered_scores.items()
if project not in ('NFLHIVDNA', 'SARSCOV2')}
return filtered_scores
def map_remap_counts(counts):
if counts is None:
return {}
row_parts = [row['type'].split() for row in counts]
count_parts = [(parts[0].split('-'), parts[1])
for parts in row_parts
if len(parts) == 2]
seed_counts = {parts[1]: int(parts[0][1])
for parts in count_parts
if len(parts[0]) == 2 and parts[0][1] != 'final'}
return seed_counts
def compare_coverage(sample, diffs, scenarios_reported, scenarios):
if sample.source_files.coverage_scores == sample.target_files.coverage_scores:
return
source_scores = map_coverage(sample.source_files.coverage_scores)
target_scores = map_coverage(sample.target_files.coverage_scores)
if source_scores == target_scores:
return
source_counts = map_remap_counts(sample.source_files.remap_counts)
target_counts = map_remap_counts(sample.target_files.remap_counts)
run_name = get_run_name(sample)
keys = sorted(set(source_scores.keys()) | target_scores.keys())
for key in keys:
(source_score,
source_seed,
source_key_pos) = source_scores.get(key, ('-', None, None))
(target_score,
target_seed,
target_key_pos) = target_scores.get(key, ('-', None, None))
source_compare = '-' if source_score == '1' else source_score
target_compare = '-' if target_score == '1' else target_score
if (source_compare == '-' and
sample.name.startswith('90308A') and
MICALL_VERSION == '7.11'):
# One sample failed in 7.10.
pass
elif source_compare != target_compare:
project, region = key
message = '{}:{} coverage: {} {} {} => {}'.format(
run_name,
sample.name,
project,
region,
source_score,
target_score)
scenario = ' ' + message + '\n'
if (source_counts != target_counts and
scenarios_reported & Scenarios.REMAP_COUNTS_CHANGED):
scenarios[Scenarios.REMAP_COUNTS_CHANGED].append(scenario)
else:
diffs.append(message)
def adjust_region(region):
if region.startswith('HCV'):
parts = region.split('-')
return 'HCV-' + parts[-1]
return region
def is_consensus_interesting(region, cutoff):
if region.startswith('HLA-'):
return cutoff == '0.250'
return cutoff == 'MAX'
def display_consensus(sequence):
if sequence is None:
return []
return [sequence + '\n']
def calculate_distance(region, cutoff, sequence1, sequence2):
if sequence1 is None or sequence2 is None:
return
if len(sequence1) > len(sequence2):
sequence2 += '-' * (len(sequence1) - len(sequence2))
elif len(sequence2) > len(sequence1):
sequence1 += '-' * (len(sequence2) - len(sequence1))
distance = Levenshtein.distance(sequence1, sequence2)
return ConsensusDistance(region=region,
cutoff=cutoff,
distance=distance,
pct_diff=distance/len(sequence1)*100)
def has_big_prevalence_change(nuc: str, old_counts: dict, new_counts: dict):
""" Check if a nucleotide's prevalence has changed significantly.
True if it has changed by more than 5 percentage points.
Ignore deletions and low coverage. Ties in the MAX consensus are always
significant.
"""
if nuc in 'x-':
return False
if nuc not in 'ACTG':
return True
if old_counts['coverage'] == '0' or new_counts['coverage'] == '0':
return True
old_prevalence = int(old_counts[nuc]) / int(old_counts['coverage'])
new_prevalence = int(new_counts[nuc]) / int(new_counts['coverage'])
return 0.05 < abs(new_prevalence - old_prevalence)
def compare_consensus(sample: Sample,
diffs,
scenarios_reported,
scenarios,
use_denovo=False):
consensus_distances = []
source_seqs = filter_consensus_sequences(sample.source_files, use_denovo)
target_seqs = filter_consensus_sequences(sample.target_files, use_denovo)
run_name = get_run_name(sample)
source_counts = map_remap_counts(sample.source_files.remap_counts)
target_counts = map_remap_counts(sample.target_files.remap_counts)
keys = sorted(set(source_seqs.keys()) | target_seqs.keys())
primer_trackers = {}
for row in sample.target_files.conseq or ():
if row['consensus-percent-cutoff'] == 'MAX':
conseq = row['sequence']
offset = int(row['offset'])
conseq = 'x' * offset + conseq
seed_name = row['region']
primer_trackers[seed_name] = PrimerTracker(conseq, seed_name)
cutoff = MAX_CUTOFF
for key in keys:
seed, region = key
primer_tracker = primer_trackers.get(seed)
source_details = source_seqs.get(key)
target_details = target_seqs.get(key)
source_nucs = []
target_nucs = []
if source_details is None:
has_big_change = True
target_nucs = [nuc for nuc, row in target_details]
elif target_details is None:
has_big_change = True
source_nucs = [nuc for nuc, row in source_details]
else:
has_big_change = False
dummy_row = {'refseq.nuc.pos': '-1',
'coverage': '0'}
for source_item, target_item in zip_longest(source_details,
target_details,
fillvalue=('', dummy_row)):
source_consensus, source_row = source_item
target_consensus, target_row = target_item
if source_consensus != target_consensus:
if has_big_prevalence_change(source_consensus,
source_row,
target_row):
has_big_change = True
else:
source_consensus = source_consensus.lower()
if has_big_prevalence_change(target_consensus,
target_row,
source_row):
has_big_change = True
else:
target_consensus = target_consensus.lower()
if not has_big_change and 'x' in (source_consensus,
target_consensus):
pos = int(target_row['query.nuc.pos'])
is_ignored = primer_tracker and primer_tracker.is_ignored(pos)
is_dropping = target_consensus == 'x'
if MICALL_VERSION == '7.14' and is_ignored and is_dropping:
pass
else:
old_coverage = int(source_row['coverage'])
new_coverage = int(target_row['coverage'])
has_big_change = (old_coverage < new_coverage/2 or
new_coverage < old_coverage/2)
source_nucs.append(source_consensus)
target_nucs.append(target_consensus)
source_seq = ''.join(source_nucs) or None
target_seq = ''.join(target_nucs) or None
consensus_distance = calculate_distance(region,
cutoff,
source_seq,
target_seq)
if False and consensus_distance.pct_diff > 5:
print(sample.run.source_path, sample.name, consensus_distance)
print(source_seq)
print(target_seq)
if consensus_distance is not None:
consensus_distances.append(consensus_distance)
if not has_big_change:
continue
is_main = is_consensus_interesting(region, cutoff)
trimmed_source_seq = (source_seq and
source_seq.replace('-', '').replace('x', ''))
trimmed_target_seq = (target_seq and
target_seq.replace('-', '').replace('x', ''))
if (trimmed_source_seq == trimmed_target_seq and
(scenarios_reported & Scenarios.CONSENSUS_DELETIONS_CHANGED)):
scenarios[Scenarios.CONSENSUS_DELETIONS_CHANGED].append('.')
continue
if source_seq and target_seq:
stripped_source_seq = source_seq.strip('x')
stripped_target_seq = target_seq.strip('x')
if stripped_source_seq in stripped_target_seq:
scenarios[Scenarios.CONSENSUS_EXTENDED].append('.')
continue
if (use_denovo and
is_main and
seed == 'HIV1-B' and
region == 'HIV1B-vpr' and
(scenarios_reported & Scenarios.VPR_FRAME_SHIFT_FIXED)):
if source_seq and source_seq[212] == '-':
source_seq = source_seq[:212] + source_seq[213:273]
target_seq = target_seq and target_seq[:272]
if source_seq == target_seq:
scenarios[Scenarios.VPR_FRAME_SHIFT_FIXED].append('.')
continue
if (source_counts != target_counts and
is_main and
scenarios_reported & Scenarios.REMAP_COUNTS_CHANGED):
scenario = f' {run_name}:{sample.name} consensus {region}\n'
scenarios[Scenarios.REMAP_COUNTS_CHANGED].append(scenario)
elif is_main and scenarios_reported & Scenarios.MAIN_CONSENSUS_CHANGED:
scenarios[Scenarios.MAIN_CONSENSUS_CHANGED].append('.')
elif (not is_main and
scenarios_reported & Scenarios.OTHER_CONSENSUS_CHANGED):
scenarios[Scenarios.OTHER_CONSENSUS_CHANGED].append('.')
else:
diffs.append('{}:{} consensus: {} {} {}'.format(run_name,
sample.name,
seed,
region,
cutoff))
diff = list(differ.compare(display_consensus(source_seq),
display_consensus(target_seq)))
diffs.extend(line.rstrip() for line in diff)
return consensus_distances
def filter_consensus_sequences(files, use_denovo):
region_consensus = files.region_consensus
coverage_scores = files.coverage_scores
if not (region_consensus and coverage_scores):
return {}
if MICALL_VERSION == '7.14':
coverage_scores = [row
for row in coverage_scores
if row['project'] != 'SARSCOV2']
covered_regions = {(row.get('seed'), row.get('region'))
for row in coverage_scores
if row['on.score'] == '4'}
return {adjust_seed(seed, region, use_denovo): consensus
for (seed, region), consensus in region_consensus.items()
if (seed, region) in covered_regions}
def adjust_seed(seed: str, region: str, use_denovo: bool):
if use_denovo:
if region == 'V3LOOP':
seed = 'some-HIV-seed'
elif seed.startswith('HIV'):
seed = '-'.join(seed.split('-')[:2]) + '-?-seed'
return seed, region
def compare_sample(sample,
scenarios_reported=Scenarios.NONE,
use_denovo=False):
scenarios = defaultdict(list)
diffs = []
compare_g2p(sample, diffs)
compare_coverage(sample, diffs, scenarios_reported, scenarios)
consensus_distances = compare_consensus(sample,
diffs,
scenarios_reported,
scenarios,
use_denovo)
diffs.append('')
return SampleComparison(diffs='\n'.join(diffs),
scenarios=scenarios,
consensus_distances=consensus_distances)
def format_cutoff(row):
cutoff = row['cutoff']
count = row['count']
try:
cutoff = float(cutoff)
cutoff = int(100 * cutoff)
cutoff = str(cutoff)
except ValueError:
pass
return cutoff + '_' + str(count)
def plot_distances(distance_data, filename, title, plot_variable='distance'):
seeds = sorted(set(distance_data['region']))
distance_data = distance_data.sort_values(['region', 'cutoff'])
sns.set()
num_plots = len(seeds)
figure, axes_sets = plt.subplots(nrows=num_plots, ncols=1, squeeze=False)
axes_sets = list(chain(*axes_sets)) # 2-dim array -> 1-dim list
for ax, seed in zip(axes_sets, seeds):
seed_data = distance_data[distance_data['region'] == seed]
seed_data = seed_data.assign(
count=lambda df: df['cutoff'].map(
df.groupby(by=['cutoff'])[plot_variable].count()))
seed_data['cutoff_n'] = seed_data.apply(format_cutoff, 'columns')
sns.violinplot(x='cutoff_n',
y=plot_variable,
data=seed_data,
cut=0,
alpha=0.7,
ax=ax)
plt.setp(ax.lines, zorder=100)
plt.setp(ax.collections, zorder=100)
sns.swarmplot(x='cutoff_n',
y=plot_variable,
data=seed_data,
color='k',
ax=ax)
ax.set_ylabel(seed + '\n' + plot_variable)
axes_sets[0].set_title(title)
plt.savefig(filename)
def main():
print('Starting.')
args = parse_args()
with ProcessPoolExecutor() as pool:
runs = find_runs(args.source_folder, args.target_folder, args.denovo)
runs = report_source_versions(runs)
samples = read_samples(runs)
# noinspection PyTypeChecker
scenarios_reported = (Scenarios.OTHER_CONSENSUS_CHANGED |
Scenarios.CONSENSUS_DELETIONS_CHANGED |
Scenarios.VPR_FRAME_SHIFT_FIXED |
Scenarios.CONSENSUS_EXTENDED |
Scenarios.REMAP_COUNTS_CHANGED)
results = pool.map(partial(compare_sample,
scenarios_reported=scenarios_reported,
use_denovo=args.denovo),
samples,
chunksize=50)
scenario_summaries = defaultdict(list)
i = 0
all_consensus_distances = []
report_limit = 1000
report_count = 0
for i, (report, scenarios, consensus_distances) in enumerate(results, 1):
if report:
report_count += 1
if report_count > report_limit:
print(f'More than {report_limit} reports, skipping the rest.')
break
print(report, end='')
all_consensus_distances.extend(consensus_distances)
for key, messages in scenarios.items():
scenario_summaries[key] += scenarios[key]
for key, messages in sorted(scenario_summaries.items()):
if messages:
sample_names = {message.split()[0] for message in messages}
summary = [key, len(messages), 'changes']
body = ''.join(messages).rstrip('.')
if body:
summary.extend(['in', len(sample_names), 'samples'])
print(*summary, end='.\n')
print(body, end='')
report_distances(all_consensus_distances)
print('Finished {} samples.'.format(i))
def report_distances(all_consensus_distances):
if not all_consensus_distances:
return
distance_data = pd.DataFrame(all_consensus_distances)
non_zero_distances = distance_data[distance_data['distance'] != 0]
region_names = sorted(non_zero_distances['region'].unique())
names_iter = iter(region_names)
for page_num, region_group in enumerate(zip_longest(names_iter, names_iter, names_iter), 1):
group_distances = distance_data[distance_data['region'].isin(region_group)]
plot_distances(group_distances,
'consensus_distances_{}.svg'.format(page_num),
'Consensus Distances Between Previous and v' + MICALL_VERSION)
plot_distances(group_distances,
'consensus_diffs_{}.svg'.format(page_num),
'Consensus Differences Between Previous and v' + MICALL_VERSION,
'pct_diff')
if __name__ == '__main__':
main()
| agpl-3.0 |
wzbozon/statsmodels | statsmodels/sandbox/tests/test_predict_functional.py | 29 | 12873 | from statsmodels.sandbox.predict_functional import predict_functional
import numpy as np
import pandas as pd
import statsmodels.api as sm
from numpy.testing import dec
# If true, the output is written to a multi-page pdf file.
pdf_output = False
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
def pctl(q):
return lambda x : np.percentile(x, 100 *q)
class TestPredFunc(object):
@classmethod
def setup_class(cls):
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
cls.pdf = PdfPages("predict_functional.pdf")
@classmethod
def teardown_class(cls):
if pdf_output:
cls.pdf.close()
def close_or_save(self, fig):
if pdf_output:
self.pdf.savefig(fig)
else:
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_formula(self):
np.random.seed(542)
n = 500
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
x4 = np.random.randint(0, 5, size=n)
x4 = np.asarray(["ABCDE"[i] for i in x4])
x5 = np.random.normal(size=n)
y = 0.3*x2**2 + (x4 == "B") + 0.1*(x4 == "B")*x2**2 + x5 + np.random.normal(size=n)
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3, "x4": x4, "x5": x5})
fml = "y ~ x1 + bs(x2, df=4) + x3 + x2*x3 + I(x1**2) + C(x4) + C(x4)*bs(x2, df=4) + x5"
model = sm.OLS.from_formula(fml, data=df)
result = model.fit()
summaries = {"x1": np.mean, "x3": pctl(0.75), "x5": np.mean}
values = {"x4": "B"}
pr1, ci1, fvals1 = predict_functional(result, "x2", summaries, values)
values = {"x4": "C"}
pr2, ci2, fvals2 = predict_functional(result, "x2", summaries, values)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x4=B')
plt.plot(fvals2, pr2, '-', label='x4=C')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x4=B')
plt.fill_between(fvals1, ci1[:, 0], ci1[:, 1], color='grey')
plt.plot(fvals2, pr2, '-', label='x4=C')
plt.fill_between(fvals2, ci2[:, 0], ci2[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_lm_contrast(self):
np.random.seed(542)
n = 200
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
y = x1 + 2*x2 + x3 - x1*x2 + x2*x3 + np.random.normal(size=n)
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3})
fml = "y ~ x1 + x2 + x3 + x1*x2 + x2*x3"
model = sm.OLS.from_formula(fml, data=df)
result = model.fit()
values = {"x2": 1, "x3": 1} # y = 4
values2 = {"x2": 0, "x3": 0} # y = x1
pr, cb, fvals = predict_functional(result, "x1", values=values,
values2=values2, ci_method='scheffe')
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.67, 0.8])
plt.plot(fvals, pr, '-', label="Estimate", color='orange', lw=4)
plt.plot(fvals, 4 - fvals, '-', label="Truth", color='lime', lw=4)
plt.fill_between(fvals, cb[:, 0], cb[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
plt.ylabel("Mean contrast", size=15)
plt.title("Linear model contrast")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_glm_formula_contrast(self):
np.random.seed(542)
n = 50
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
mn = 5 + 0.1*x1 + 0.1*x2 + 0.1*x3 - 0.1*x1*x2
y = np.random.poisson(np.exp(mn), size=len(mn))
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3})
fml = "y ~ x1 + x2 + x3 + x1*x2"
model = sm.GLM.from_formula(fml, data=df, family=sm.families.Poisson())
result = model.fit()
values = {"x2": 1, "x3": 1} # y = 5.2
values2 = {"x2": 0, "x3": 0} # y = 5 + 0.1*x1
pr, cb, fvals = predict_functional(result, "x1", values=values,
values2=values2, ci_method='simultaneous')
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.67, 0.8])
plt.plot(fvals, pr, '-', label="Estimate", color='orange', lw=4)
plt.plot(fvals, 0.2 - 0.1*fvals, '-', label="Truth", color='lime', lw=4)
plt.fill_between(fvals, cb[:, 0], cb[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
plt.ylabel("Linear predictor contrast", size=15)
plt.title("Poisson regression contrast")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_scb(self):
np.random.seed(473)
n = 100
x = np.random.normal(size=(n,4))
x[:, 0] = 1
for fam_name in "poisson", "binomial", "gaussian":
if fam_name == "poisson":
y = np.random.poisson(20, size=n)
fam = sm.families.Poisson()
true_mean = 20
true_lp = np.log(20)
elif fam_name == "binomial":
y = 1 * (np.random.uniform(size=n) < 0.5)
fam = sm.families.Binomial()
true_mean = 0.5
true_lp = 0
elif fam_name == "gaussian":
y = np.random.normal(size=n)
fam = sm.families.Gaussian()
true_mean = 0
true_lp = 0
model = sm.GLM(y, x, family=fam)
result = model.fit()
# CB is for linear predictor or mean response
for linear in False, True:
true = true_lp if linear else true_mean
values = {'const': 1, "x2": 0}
summaries = {"x3": np.mean}
pred1, cb1, fvals1 = predict_functional(result, "x1",
values=values, summaries=summaries, linear=linear)
pred2, cb2, fvals2 = predict_functional(result, "x1",
values=values, summaries=summaries,
ci_method='simultaneous', linear=linear)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.58, 0.8])
plt.plot(fvals1, pred1, '-', color='black', label='Estimate')
plt.plot(fvals1, true * np.ones(len(pred1)), '-', color='purple',
label='Truth')
plt.plot(fvals1, cb1[:, 0], color='blue', label='Pointwise CB')
plt.plot(fvals1, cb1[:, 1], color='blue')
plt.plot(fvals2, cb2[:, 0], color='green', label='Simultaneous CB')
plt.plot(fvals2, cb2[:, 1], color='green')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
if linear:
plt.ylabel("Linear predictor", size=15)
else:
plt.ylabel("Fitted mean", size=15)
plt.title("%s family prediction" % fam_name.capitalize())
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_glm_formula(self):
np.random.seed(542)
n = 500
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.randint(0, 3, size=n)
x3 = np.asarray(["ABC"[i] for i in x3])
lin_pred = -1 + 0.5*x1**2 + (x3 == "B")
prob = 1 / (1 + np.exp(-lin_pred))
y = 1 * (np.random.uniform(size=n) < prob)
df = pd.DataFrame({"y": y, "x1": x1, "x2": x2, "x3": x3})
fml = "y ~ x1 + I(x1**2) + x2 + C(x3)"
model = sm.GLM.from_formula(fml, family=sm.families.Binomial(), data=df)
result = model.fit()
summaries = {"x2": np.mean}
for linear in False, True:
values = {"x3": "B"}
pr1, ci1, fvals1 = predict_functional(result, "x1", summaries, values, linear=linear)
values = {"x3": "C"}
pr2, ci2, fvals2 = predict_functional(result, "x1", summaries, values, linear=linear)
exact1 = -1 + 0.5*fvals1**2 + 1
exact2 = -1 + 0.5*fvals2**2
if not linear:
exact1 = 1 / (1 + np.exp(-exact1))
exact2 = 1 / (1 + np.exp(-exact2))
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x3=B')
plt.plot(fvals2, pr2, '-', label='x3=C')
plt.plot(fvals1, exact1, '-', label='x3=B (exact)')
plt.plot(fvals2, exact2, '-', label='x3=C (exact)')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
if linear:
plt.ylabel("Fitted linear predictor", size=15)
else:
plt.ylabel("Fitted probability", size=15)
plt.title("Binomial GLM prediction")
self.close_or_save(fig)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x3=B', color='orange')
plt.fill_between(fvals1, ci1[:, 0], ci1[:, 1], color='grey')
plt.plot(fvals2, pr2, '-', label='x3=C', color='lime')
plt.fill_between(fvals2, ci2[:, 0], ci2[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
if linear:
plt.ylabel("Fitted linear predictor", size=15)
else:
plt.ylabel("Fitted probability", size=15)
plt.title("Binomial GLM prediction")
self.close_or_save(fig)
@dec.skipif(not have_matplotlib)
def test_noformula_prediction(self):
np.random.seed(6434)
n = 200
x1 = np.random.normal(size=n)
x2 = np.random.normal(size=n)
x3 = np.random.normal(size=n)
y = x1 - x2 + np.random.normal(size=n)
exog = np.vstack((x1, x2, x3)).T
model = sm.OLS(y, exog)
result = model.fit()
summaries = {"x3": pctl(0.75)}
values = {"x2": 1}
pr1, ci1, fvals1 = predict_functional(result, "x1", summaries, values)
values = {"x2": -1}
pr2, ci2, fvals2 = predict_functional(result, "x1", summaries, values)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x2=1', lw=4, alpha=0.6, color='orange')
plt.plot(fvals2, pr2, '-', label='x2=-1', lw=4, alpha=0.6, color='lime')
ha, lb = ax.get_legend_handles_labels()
leg = plt.figlegend(ha, lb, "center right")
leg.draw_frame(False)
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
plt.clf()
fig = plt.figure()
ax = plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(fvals1, pr1, '-', label='x2=1', lw=4, alpha=0.6, color='orange')
plt.fill_between(fvals1, ci1[:, 0], ci1[:, 1], color='grey')
plt.plot(fvals1, pr2, '-', label='x2=1', lw=4, alpha=0.6, color='lime')
plt.fill_between(fvals2, ci2[:, 0], ci2[:, 1], color='grey')
ha, lb = ax.get_legend_handles_labels()
plt.figlegend(ha, lb, "center right")
plt.xlabel("Focus variable", size=15)
plt.ylabel("Fitted mean", size=15)
plt.title("Linear model prediction")
self.close_or_save(fig)
| bsd-3-clause |
billy-inn/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
dougalsutherland/skl-groups | docs/conf.py | 1 | 8992 | # -*- coding: utf-8 -*-
#
# skl-groups documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 9 15:38:37 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.dirname(os.path.abspath('.')))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'numpydoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'skl-groups'
copyright = u'2014, Dougal J. Sutherland'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
import sphinx_rtd_theme
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'skl-groupsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'skl-groups.tex', u'skl-groups Documentation',
u'Dougal J. Sutherland', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'skl-groups', u'skl-groups Documentation',
[u'Dougal J. Sutherland'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'skl-groups', u'skl-groups Documentation',
u'Dougal J. Sutherland', 'skl-groups', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
numpydoc_show_class_members = False
autosummary_generate = True
autodoc_default_flags = ['members']
mathjax_path = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
todo_include_todos = True
intersphinx_mapping = {
'python': ('http://docs.python.org/3', None),
'sklearn': ('http://scikit-learn.org/stable/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| bsd-3-clause |
vantares/trading-with-python | lib/qtpandas.py | 77 | 7937 | '''
Easy integration of DataFrame into pyqt framework
Copyright: Jev Kuznetsov
Licence: BSD
'''
from PyQt4.QtCore import (QAbstractTableModel,Qt,QVariant,QModelIndex,SIGNAL)
from PyQt4.QtGui import (QApplication,QDialog,QVBoxLayout, QHBoxLayout, QTableView, QPushButton,
QWidget,QTableWidget, QHeaderView, QFont,QMenu,QAbstractItemView)
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self,parent=None):
super(DataFrameModel,self).__init__(parent)
self.df = DataFrame()
self.columnFormat = {} # format columns
def setFormat(self,fmt):
"""
set string formatting for the output
example : format = {'close':"%.2f"}
"""
self.columnFormat = fmt
def setDataFrame(self,dataFrame):
self.df = dataFrame
self.signalUpdate()
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not efficient)'''
self.layoutChanged.emit()
def __repr__(self):
return str(self.df)
def setData(self,index,value, role=Qt.EditRole):
if index.isValid():
row,column = index.row(), index.column()
dtype = self.df.dtypes.tolist()[column] # get column dtype
if np.issubdtype(dtype,np.float):
val,ok = value.toFloat()
elif np.issubdtype(dtype,np.int):
val,ok = value.toInt()
else:
val = value.toString()
ok = True
if ok:
self.df.iloc[row,column] = val
return True
return False
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(
QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
def appendRow(self, index, data=0):
self.df.loc[index,:] = data
self.signalUpdate()
def deleteRow(self, index):
idx = self.df.index[index]
#self.beginRemoveRows(QModelIndex(), index,index)
#self.df = self.df.drop(idx,axis=0)
#self.endRemoveRows()
#self.signalUpdate()
#------------- table display functions -----------------
def headerData(self,section,orientation,role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
#return self.df.index.tolist()
return str(self.df.index.tolist()[section])
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
col = self.df.ix[:,index.column()] # get a column slice first to get the right data type
elm = col[index.row()]
#elm = self.df.ix[index.row(),index.column()]
if self.df.columns[index.column()] in self.columnFormat.keys():
return QVariant(self.columnFormat[self.df.columns[index.column()]] % elm )
else:
return QVariant(str(elm))
def sort(self,nCol,order):
self.layoutAboutToBeChanged.emit()
if order == Qt.AscendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=True)
elif order == Qt.DescendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=False)
self.layoutChanged.emit()
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("delete row")
Action.triggered.connect(self.deleteRow)
menu.exec_(event.globalPos())
def deleteRow(self):
print "Action triggered from " + self.name
print 'Selected rows:'
for idx in self.selectionModel().selectedRows():
print idx.row()
# self.model.deleteRow(idx.row())
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self,name='DataFrameTable1', parent=None):
super(DataFrameWidget,self).__init__(parent)
self.name = name
self.dataModel = DataFrameModel()
self.dataModel.setDataFrame(DataFrame())
self.dataTable = QTableView()
#self.dataTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.dataTable.setSortingEnabled(True)
self.dataTable.setModel(self.dataModel)
self.dataModel.signalUpdate()
#self.dataTable.setFont(QFont("Courier New", 8))
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
def setFormat(self,fmt):
""" set non-default string formatting for a column """
for colName, f in fmt.iteritems():
self.dataModel.columnFormat[colName]=f
def fitColumns(self):
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
def setDataFrame(self,df):
self.dataModel.setDataFrame(df)
def resizeColumnsToContents(self):
self.dataTable.resizeColumnsToContents()
def insertRow(self,index, data=None):
self.dataModel.appendRow(index,data)
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int':[1,2,3],'float':[1./3,2.5,3.5],'string':['a','b','c'],'nan':[np.nan,np.nan,np.nan]}
return DataFrame(data, index=Index(['AAA','BBB','CCC']))[['int','float','string','nan']]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
df = testDf() # make up some data
self.table = DataFrameWidget(parent=self)
self.table.setDataFrame(df)
#self.table.resizeColumnsToContents()
self.table.fitColumns()
self.table.setFormat({'float': '%.2f'})
#buttons
#but_add = QPushButton('Add')
but_test = QPushButton('Test')
but_test.clicked.connect(self.testFcn)
hbox = QHBoxLayout()
#hbox.addself.table(but_add)
hbox.addWidget(but_test)
layout = QVBoxLayout()
layout.addWidget(self.table)
layout.addLayout(hbox)
self.setLayout(layout)
def testFcn(self):
print 'test function'
self.table.insertRow('foo')
if __name__=='__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| bsd-3-clause |
12AngryMen/votca-scripts | xtp/xtp_Molpol_pattern.py | 2 | 2265 | #!/usr/bin/env python
import sqlite3
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import os
import os.path
import numpy.linalg as lg
import argparse as ap
parser=ap.ArgumentParser(description="reads in pdb or gro file and creates a molpol pattern from it")
parser.add_argument("-f","--strucfile",required=True,help=".pdb or grofile file to read residues from")
parser.add_argument("--type",type=str,required=True,nargs="+",help="Residue names which should not be scaled")
args=parser.parse_args()
filetype=os.path.splitext(args.strucfile)[1]
if type(args.type)==str:
args.type=[args.type]
print args.type
print "The residues {} are not scaled".format(" ".join(args.type))
print "Opening {}".format(args.strucfile)
if filetype==".pdb":
with open(args.strucfile,"r") as f:
lines=f.readlines()
pattern=[]
usedresidue=[]
lastresname=None
for line in lines:
if "ATOM" in line or "HETATM" in line:
resname=str(line[17:20]).strip()
if lastresname!=resname and lastresname!=None:
usedresidue.append(lastresname)
if resname in usedresidue:
break
if resname in args.type:
pattern.append("N")
else:
pattern.append("Y")
lastresname=resname
elif filetype==".gro":
with open(args.strucfile,"r") as f:
lines=f.readlines()
pattern=[]
usedresidue=[]
lastresname=None
for line in lines:
if len(line.split())>4:
resname=str(line[5:11]).strip()
if lastresname!=resname and lastresname!=None:
usedresidue.append(lastresname)
#print usedresidue
if resname in usedresidue:
break
if resname in args.type:
pattern.append("N")
else:
pattern.append("Y")
lastresname=resname
else:
print "Format {} not known, use either pdb or gro".format(filetype)
sys.exit()
print "Pattern is:"
print " ".join(pattern)
| apache-2.0 |
GuessWhoSamFoo/pandas | pandas/tests/indexes/multi/test_get_set.py | 1 | 15757 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas.compat import range
import pandas as pd
from pandas import CategoricalIndex, Index, MultiIndex
import pandas.util.testing as tm
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
def test_get_level_number_integer(idx):
idx.names = [1, 0]
assert idx._get_level_number(1) == 0
assert idx._get_level_number(0) == 1
pytest.raises(IndexError, idx._get_level_number, 2)
with pytest.raises(KeyError, match='Level fourth not found'):
idx._get_level_number('fourth')
def test_get_level_values(idx):
result = idx.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = idx.get_level_values('first')
expected = idx.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
codes=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_value_duplicates():
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_level_values_all_na():
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_int_with_na():
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na():
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_set_name_methods(idx, index_names):
# so long as these are synonyms, we don't need to test set_names
assert idx.rename == idx.set_names
new_names = [name + "SUFFIX" for name in index_names]
ind = idx.set_names(new_names)
assert idx.names == index_names
assert ind.names == new_names
with pytest.raises(ValueError, match="^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = idx.set_names(new_names[0], level=0)
assert idx.names == index_names
assert ind.names == [new_names[0], index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], index_names[1]]
# set names for multiple levels
ind = idx.set_names(new_names, level=[0, 1])
assert idx.names == index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
def test_set_levels_codes_directly(idx):
# setting levels/codes directly raises AttributeError
levels = idx.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
codes = idx.codes
major_codes, minor_codes = codes
major_codes = [(x + 1) % 3 for x in major_codes]
minor_codes = [(x + 1) % 1 for x in minor_codes]
new_codes = [major_codes, minor_codes]
with pytest.raises(AttributeError):
idx.levels = new_levels
with pytest.raises(AttributeError):
idx.codes = new_codes
def test_set_levels(idx):
# side note - you probably wouldn't want to use levels and codes
# directly like this - but it is possible.
levels = idx.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
# level changing [w/o mutation]
ind2 = idx.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# level changing [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = idx.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(idx.levels, levels)
ind2 = idx.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(idx.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = idx.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# level changing specific level [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(idx.levels, levels)
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(idx.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = idx.copy()
for inplace in [True, False]:
with pytest.raises(ValueError, match="^On"):
idx.set_levels(['c'], level=0, inplace=inplace)
assert_matching(idx.levels, original_index.levels,
check_dtype=True)
with pytest.raises(ValueError, match="^On"):
idx.set_codes([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(idx.codes, original_index.codes,
check_dtype=True)
with pytest.raises(TypeError, match="^Levels"):
idx.set_levels('c', level=0, inplace=inplace)
assert_matching(idx.levels, original_index.levels,
check_dtype=True)
with pytest.raises(TypeError, match="^Codes"):
idx.set_codes(1, level=0, inplace=inplace)
assert_matching(idx.codes, original_index.codes,
check_dtype=True)
def test_set_codes(idx):
# side note - you probably wouldn't want to use levels and codes
# directly like this - but it is possible.
codes = idx.codes
major_codes, minor_codes = codes
major_codes = [(x + 1) % 3 for x in major_codes]
minor_codes = [(x + 1) % 1 for x in minor_codes]
new_codes = [major_codes, minor_codes]
# changing codes w/o mutation
ind2 = idx.set_codes(new_codes)
assert_matching(ind2.codes, new_codes)
assert_matching(idx.codes, codes)
# changing label w/ mutation
ind2 = idx.copy()
inplace_return = ind2.set_codes(new_codes, inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, new_codes)
# codes changing specific level w/o mutation
ind2 = idx.set_codes(new_codes[0], level=0)
assert_matching(ind2.codes, [new_codes[0], codes[1]])
assert_matching(idx.codes, codes)
ind2 = idx.set_codes(new_codes[1], level=1)
assert_matching(ind2.codes, [codes[0], new_codes[1]])
assert_matching(idx.codes, codes)
# codes changing multiple levels w/o mutation
ind2 = idx.set_codes(new_codes, level=[0, 1])
assert_matching(ind2.codes, new_codes)
assert_matching(idx.codes, codes)
# label changing specific level w/ mutation
ind2 = idx.copy()
inplace_return = ind2.set_codes(new_codes[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, [new_codes[0], codes[1]])
assert_matching(idx.codes, codes)
ind2 = idx.copy()
inplace_return = ind2.set_codes(new_codes[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, [codes[0], new_codes[1]])
assert_matching(idx.codes, codes)
# codes changing multiple levels [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_codes(new_codes, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, new_codes)
assert_matching(idx.codes, codes)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_codes = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_codes])
# [w/o mutation]
result = ind.set_codes(codes=new_codes, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_codes(codes=new_codes, level=1, inplace=True)
assert result.equals(expected)
with tm.assert_produces_warning(FutureWarning):
ind.set_codes(labels=new_codes, level=1)
def test_set_labels_deprecated():
# GH23752
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
with tm.assert_produces_warning(FutureWarning):
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
with tm.assert_produces_warning(FutureWarning):
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_codes_names_bad_input(idx):
levels, codes = idx.levels, idx.codes
names = idx.names
with pytest.raises(ValueError, match='Length of levels'):
idx.set_levels([levels[0]])
with pytest.raises(ValueError, match='Length of codes'):
idx.set_codes([codes[0]])
with pytest.raises(ValueError, match='Length of names'):
idx.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with pytest.raises(TypeError, match='list of lists-like'):
idx.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with pytest.raises(TypeError, match='list of lists-like'):
idx.set_codes(codes[0])
# shouldn't scalar data error, instead should demand list-like
with pytest.raises(TypeError, match='list-like'):
idx.set_names(names[0])
# should have equal lengths
with pytest.raises(TypeError, match='list of lists-like'):
idx.set_levels(levels[0], level=[0, 1])
with pytest.raises(TypeError, match='list-like'):
idx.set_levels(levels, level=0)
# should have equal lengths
with pytest.raises(TypeError, match='list of lists-like'):
idx.set_codes(codes[0], level=[0, 1])
with pytest.raises(TypeError, match='list-like'):
idx.set_codes(codes, level=0)
# should have equal lengths
with pytest.raises(ValueError, match='Length of names'):
idx.set_names(names[0], level=[0, 1])
with pytest.raises(TypeError, match='Names must be a'):
idx.set_names(names, level=0)
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
codes=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('ordered', [True, False])
def test_set_levels_categorical(ordered):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
codes=index.codes)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_set_value_keeps_names():
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_set_levels_with_iterable():
# GH23273
sizes = [1, 2, 3]
colors = ['black'] * 3
index = pd.MultiIndex.from_arrays([sizes, colors], names=['size', 'color'])
result = index.set_levels(map(int, ['3', '2', '1']), level='size')
expected_sizes = [3, 2, 1]
expected = pd.MultiIndex.from_arrays([expected_sizes, colors],
names=['size', 'color'])
tm.assert_index_equal(result, expected)
| bsd-3-clause |
fabriziocosta/EDeN | setup.py | 2 | 3484 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import subprocess
import re
from setuptools import setup
from setuptools.command.sdist import sdist as _sdist
from setuptools.command.install import install as _install
VERSION_PY = """
# This file is originally generated from Git information by running 'setup.py
# version'. Distribution tarballs contain a pre-generated copy of this file.
__version__ = '%s'
"""
def update_version_py():
if not os.path.isdir(".git"):
print("This does not appear to be a Git repository.")
return
try:
# p = subprocess.Popen(["git", "describe","--tags", "--always"],
# stdout=subprocess.PIPE)
p = subprocess.Popen("git rev-list HEAD --count".split(),
stdout=subprocess.PIPE)
except EnvironmentError:
print("unable to run git, leaving eden/_version.py alone")
return
stdout = p.communicate()[0]
if p.returncode != 0:
print("unable to run git, leaving eden/_version.py alone")
return
ver = "0.3."+stdout.strip()
# ver = str(int(ver,16)) # pypi doesnt like base 16
f = open("eden/_version.py", "w")
f.write(VERSION_PY % ver)
f.close()
print("set eden/_version.py to '%s'" % ver)
def get_version():
try:
f = open("eden/_version.py")
except EnvironmentError:
return None
for line in f.readlines():
mo = re.match("__version__ = '([^']+)'", line)
if mo:
ver = mo.group(1)
return ver
return None
class sdist(_sdist):
def run(self):
update_version_py()
self.distribution.metadata.version = get_version()
return _sdist.run(self)
class install(_install):
def run(self):
_install.run(self)
def checkProgramIsInstalled(self, program, args, where_to_download,
affected_tools):
try:
subprocess.Popen([program, args], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
return True
except EnvironmentError:
# handle file not found error.
# the config file is installed in:
msg = "\n**{0} not found. This " \
"program is needed for the following "\
"tools to work properly:\n"\
" {1}\n"\
"{0} can be downloaded from here:\n " \
" {2}\n".format(program, affected_tools,
where_to_download)
sys.stderr.write(msg)
except Exception as e:
sys.stderr.write("Error: {}".format(e))
setup(
name='eden-kernel',
version=get_version(),
author='Fabrizio Costa',
author_email='[email protected]',
packages=['eden',
'eden.ml',
'eden.display',
'eden.io'
],
scripts=[],
include_package_data=True,
package_data={},
url='https://github.com/smautner/',
license='LICENSE',
description='Explicit Decomposition with Neighborhoods',
long_description='',
install_requires=[
"dill",
"future",
"joblib",
"toolz",
"matplotlib",
"networkx >= 2.0",
"numpy >= 1.10.4",
"requests",
"scikit-learn >= 0.18.2",
"scipy >= 0.14.0",
],
cmdclass={'sdist': sdist, 'install': install}
)
| mit |
eickenberg/sklearn-theano | sklearn_theano/sandbox/logistic_regression.py | 9 | 1526 | import numpy as np
from theano import tensor as T
import theano
from sklearn.datasets import make_classification
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
rng = np.random.RandomState(1999)
X, y = make_classification(n_samples=400, n_features=25, n_informative=10,
n_classes=2, n_clusters_per_class=2,
random_state=1999)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.8)
n_samples, n_features = X_train.shape
x = T.matrix('x')
y = T.vector('y')
w = theano.shared(rng.randn(n_features), name='w')
b = theano.shared(0., name='b')
print("Initial model")
print(w.get_value(), b.get_value())
learning_rate = 0.01
reg = .1
n_iter = 10000
prob = 1 / (1 + T.exp(-T.dot(x, w) - b))
pred = prob > 0.5
loss = -y * T.log(prob) - (1 - y) * T.log(1 - prob)
# l2
# penalty = reg * (w ** 2).sum()
# l1
penalty = reg * abs(w).sum()
# l0
# penalty = reg * T.neq(w, 0).sum()
cost = loss.mean() + penalty
gw, gb = T.grad(cost, [w, b])
train = theano.function(inputs=[x, y], outputs=[pred, loss],
updates=((w, w - learning_rate * gw),
(b, b - learning_rate * gb)))
predict = theano.function(inputs=[x], outputs=pred)
for i in range(n_iter):
pred, err = train(X_train, y_train)
print("Final model:")
print(w.get_value(), b.get_value())
print("Report:")
y_pred = predict(X_test)
report = classification_report(y_test, y_pred)
print(report)
| bsd-3-clause |
inasio/Inverse-Sierpinski | hex-sierpinsky.py | 1 | 4984 | import pylab as pb
import numpy as np
import matplotlib.pyplot as plt
#from pycallgraph import PyCallGraph
#from pycallgraph.output import GraphvizOutput
from time import time
def plot_triangle(x,y):
#side = 2*np.sqrt((x[1]-x[0])**2 + (y[1]-y[0])**2)
#side = 1 - np.sqrt(x[1]**2 + y[1]**2)
side = 0.2
pb.plot(x,y,'k',linewidth=side)
pb.plot(x[2::-2],y[2::-2],'k',linewidth=side)
def compute_triangle_inverse(triangles, params):
i1,i2,i3 = triangles.shape
tr_inverse = np.zeros([i1,i2,i3])
a,b = params
for i in xrange(i3):
tr_inverse[:,:,i] = triangles[:,:,i]/(abs(triangles[0,:,i])**a + abs(triangles[1,:,i])**a)**b
return tr_inverse
# norms = (abs(x)**a + abs(y)**a)**b
# x = x/norms
# y = y/norms
# x0 = np.concatenate([x, [x[0]]])
# y0 = np.concatenate([y, [y[0]]])
# pb.plot(y0,x0,'k',linewidth=0.4)
def midpoint(x1, x2):
return [(x1[0] + x1[1])/2., (x2[0] + x2[1])/2.]
def reproduce_triangles(triangles):
X, Y, Z = triangles.shape
counter = 0
new_triangles = np.zeros([2, 4, Z*3])
for i in xrange(Z):
x = triangles[0,:3,i]
y = triangles[1,:3,i]
midp01 = midpoint(x[0:2], y[0:2])
midp02 = midpoint(x[0::2], y[0::2])
midp12 = midpoint(x[1:], y[1:])
new_triangles[:,:3,counter] = np.array([[x[0], midp01[0], midp02[0]],[y[0], midp01[1], midp02[1]]])
new_triangles[:,3, counter] = x[0], y[0]
new_triangles[:,:3,counter+1] = np.array([[x[1], midp12[0], midp01[0]],[y[1], midp12[1], midp01[1]]])
new_triangles[:,3, counter+1] = x[1], y[1]
new_triangles[:,:3,counter+2] = np.array([[x[2], midp02[0], midp12[0]],[y[2], midp02[1], midp12[1]]])
new_triangles[:,3, counter+2] = x[2], y[2]
counter += 3
#new_triangles[1,:3,counter] = y[0], midp01[1], midp02[1]
#new_triangles[1,:,:] = [np.array([x[1], midp12[0], midp01[0]]), np.array([y[1], midp12[1], midp01[1]])]
#new_triangles[2,:,:] = [np.array([x[2], midp02[0], midp12[0]]), np.array([y[2], midp02[1], midp12[1]])]
return new_triangles
if __name__=="__main__":
# Start with a single triangle
#x = np.zeros(3)
#y = np.zeros(3)
#for i in range(3):
#num = np.pi/2 + 2*np.pi*i/3.
#x[i] = np.cos(num)
#y[i] = np.sin(num)
#triangles = []
#triangles.append([x,y])
# Start with a hexagon split into six triangles
triangles = np.zeros([2,4,6])
for i in range(6):
# The figure is shifted in y by h in order to center the figure
# in an empty space and avoid div by 0, looks better.
h=np.sin(np.pi/3)*.5*1.5
triangles[0,:3,i] = 0, np.cos(2*np.pi*i/6), np.cos(2*np.pi*(i+1)/6)
triangles[1,:3,i] = h, h+np.sin(2*np.pi*i/6), h+np.sin(2*np.pi*(i+1)/6)
triangles[0,3,i] = triangles[0,0,i]
triangles[1,3,i] = triangles[1,0,i]
generations = 8
for i in range(generations):
triangles = reproduce_triangles(triangles)
### Uncomment to view the original Sierpinski figure
#plt.figure('siers normal')
#plt.plot(triangles[0,:,:], triangles[1,:,:])
params = [0.45, 0.65]
t3 = time()
tr_inverse = compute_triangle_inverse(triangles, params)
t4 = time()
print 't4 ', t4-t3
plt.figure('siers inverse')
plt.plot(tr_inverse[0,:,:], tr_inverse[1,:,:], 'k', linewidth=0.3)
plt.text(0,0,str(params[0])+', '+str(params[1]), color='blue')
ax = plt.gca()
ax.autoscale(tight=True)
plt.show()
caca
# key parameter: how many generations of triangles in the fractal
#Uncomment for uninverted Sierpinski fractal
#plt.figure('siers normal')
#plt.clf()
#for t in triangles:
# x = np.concatenate([t[0],[t[0][0]]])
# y = np.concatenate([t[1],[t[1][0]]])
# plt.plot(x,y)
t3 = time()
plt.figure('sierps')
plt.clf()
for t in triangles:
plot_triangle_inverse(t[0], t[1], params)
t4 = time()
print 't4 ', t4-t3
plt.show()
# #for i in newts:
# #plot_triangle(newts[i][0], newts[i][1])
# #plot_triangle(x,y)
# plt.plot(0,0,'r.')
# plt.text(0,0,str(a)+', '+str(b), color='blue')
# ax = plt.gca()
# ax.autoscale(tight=True)
# lx,rx = ax.get_xlim()
# ly,hy = ax.get_ylim()
# xmax = max(abs(lx), abs(rx))
# ymax = max(abs(ly), abs(hy))
# plt.axis([-xmax, xmax, -ymax, ymax])
# #plt.axis([-1, 1, -1, 1])
# #plt.axis('equal')
# plt.axis('off')
# counter_figs += 1
# #pb.savefig('gif_figs/inverse_sierpinski'+str(counter_figs).zfill(4)+'.png',dpi = 300)
# #pb.savefig('inverse_sierpinski2.pdf',dpi = 1200)
# #pb.savefig('inverse_sierpinski2.raw',dpi = 1200)
# #pb.savefig('inverse_sierpinski2.svg',dpi = 1200)
# plt.show()
# plt.pause(0.1)
| mit |
datapythonista/pandas | pandas/tests/indexing/multiindex/test_iloc.py | 3 | 4837 | import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
@pytest.fixture
def simple_multiindex_dataframe():
"""
Factory function to create simple 3 x 3 dataframe with
both columns and row MultiIndex using supplied data or
random data by default.
"""
data = np.random.randn(3, 3)
return DataFrame(
data, columns=[[2, 2, 4], [6, 8, 10]], index=[[4, 4, 8], [8, 10, 12]]
)
@pytest.mark.parametrize(
"indexer, expected",
[
(
lambda df: df.iloc[0],
lambda arr: Series(arr[0], index=[[2, 2, 4], [6, 8, 10]], name=(4, 8)),
),
(
lambda df: df.iloc[2],
lambda arr: Series(arr[2], index=[[2, 2, 4], [6, 8, 10]], name=(8, 12)),
),
(
lambda df: df.iloc[:, 2],
lambda arr: Series(arr[:, 2], index=[[4, 4, 8], [8, 10, 12]], name=(4, 10)),
),
],
)
def test_iloc_returns_series(indexer, expected, simple_multiindex_dataframe):
df = simple_multiindex_dataframe
arr = df.values
result = indexer(df)
expected = expected(arr)
tm.assert_series_equal(result, expected)
def test_iloc_returns_dataframe(simple_multiindex_dataframe):
df = simple_multiindex_dataframe
result = df.iloc[[0, 1]]
expected = df.xs(4, drop_level=False)
tm.assert_frame_equal(result, expected)
def test_iloc_returns_scalar(simple_multiindex_dataframe):
df = simple_multiindex_dataframe
arr = df.values
result = df.iloc[2, 2]
expected = arr[2, 2]
assert result == expected
def test_iloc_getitem_multiple_items():
# GH 5528
tup = zip(*[["a", "a", "b", "b"], ["x", "y", "x", "y"]])
index = MultiIndex.from_tuples(tup)
df = DataFrame(np.random.randn(4, 4), index=index)
result = df.iloc[[2, 3]]
expected = df.xs("b", drop_level=False)
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labels():
# this is basically regular indexing
arr = np.random.randn(4, 3)
df = DataFrame(
arr,
columns=[["i", "i", "j"], ["A", "A", "B"]],
index=[["i", "i", "j", "k"], ["X", "X", "Y", "Y"]],
)
result = df.iloc[2, 2]
expected = arr[2, 2]
assert result == expected
def test_frame_getitem_slice(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.iloc[:4]
expected = df[:4]
tm.assert_frame_equal(result, expected)
def test_frame_setitem_slice(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
df.iloc[:4] = 0
assert (df.values[:4] == 0).all()
assert (df.values[4:] != 0).all()
def test_indexing_ambiguity_bug_1678():
# GH 1678
columns = MultiIndex.from_tuples(
[("Ohio", "Green"), ("Ohio", "Red"), ("Colorado", "Green")]
)
index = MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)])
df = DataFrame(np.arange(12).reshape((4, 3)), index=index, columns=columns)
result = df.iloc[:, 1]
expected = df.loc[:, ("Ohio", "Red")]
tm.assert_series_equal(result, expected)
def test_iloc_integer_locations():
# GH 13797
data = [
["str00", "str01"],
["str10", "str11"],
["str20", "srt21"],
["str30", "str31"],
["str40", "str41"],
]
index = MultiIndex.from_tuples(
[("CC", "A"), ("CC", "B"), ("CC", "B"), ("BB", "a"), ("BB", "b")]
)
expected = DataFrame(data)
df = DataFrame(data, index=index)
result = DataFrame([[df.iloc[r, c] for c in range(2)] for r in range(5)])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data, indexes, values, expected_k",
[
# test without indexer value in first level of MultiIndex
([[2, 22, 5], [2, 33, 6]], [0, -1, 1], [2, 3, 1], [7, 10]),
# test like code sample 1 in the issue
([[1, 22, 555], [1, 33, 666]], [0, -1, 1], [200, 300, 100], [755, 1066]),
# test like code sample 2 in the issue
([[1, 3, 7], [2, 4, 8]], [0, -1, 1], [10, 10, 1000], [17, 1018]),
# test like code sample 3 in the issue
([[1, 11, 4], [2, 22, 5], [3, 33, 6]], [0, -1, 1], [4, 7, 10], [8, 15, 13]),
],
)
def test_iloc_setitem_int_multiindex_series(data, indexes, values, expected_k):
# GH17148
df = DataFrame(data=data, columns=["i", "j", "k"])
df = df.set_index(["i", "j"])
series = df.k.copy()
for i, v in zip(indexes, values):
series.iloc[i] += v
df["k"] = expected_k
expected = df.k
tm.assert_series_equal(series, expected)
def test_getitem_iloc(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.iloc[2]
expected = df.xs(df.index[2])
tm.assert_series_equal(result, expected)
| bsd-3-clause |
harisbal/pandas | pandas/io/formats/style.py | 2 | 44025 | """
Module for applying conditional formatting to
DataFrames and Series.
"""
from functools import partial
from itertools import product
from contextlib import contextmanager
from uuid import uuid1
import copy
from collections import defaultdict, MutableMapping
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
raise ImportError("pandas.Styler requires jinja2. "
"Please install with `conda install Jinja2`\n"
"or `pip install Jinja2`")
from pandas.core.dtypes.common import is_float, is_string_like
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like
from pandas.compat import range
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
import pandas.core.common as com
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
from pandas.util._decorators import Appender
from pandas.core.dtypes.generic import ABCSeries
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the
data with HTML and CSS.
Parameters
----------
data: Series or DataFrame
precision: int
precision to round floats to, defaults to pd.options.display.precision
table_styles: list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid: str, default None
a unique identifier to avoid CSS collisions; generated automatically
caption: str, default None
caption to attach to the table
cell_ids: bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
See Also
--------
pandas.DataFrame.style
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None, cell_ids=True):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indices.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
self.hidden_index = False
self.hidden_columns = []
self.cell_ids = cell_ids
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""Hooks into Jupyter notebook rich display system."""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
hidden_index = self.hidden_index
hidden_columns = self.hidden_columns
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns, hidden_columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": not hidden_index,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": not hidden_index})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if (self.data.index.names and
com._any_not_none(*self.data.index.names) and
not hidden_index):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * (len(clabels[0]) - len(hidden_columns)))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": (_is_visible(r, c, idx_lengths) and
not hidden_index),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_dict = {"type": "td",
"value": value,
"class": " ".join(cs),
"display_value": formatter(value),
"is_visible": (c not in hidden_columns)}
# only add an id if the cell has a style
if (self.cell_ids or
not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')):
row_dict["id"] = "_".join(cs[1:])
row_es.append(row_dict)
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
table_attr = self.table_attributes
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
table_attr = table_attr or ''
if 'class="' in table_attr:
table_attr = table_attr.replace('class="',
'class="tex2jax_ignore ')
else:
table_attr += ' class="tex2jax_ignore"'
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=table_attr)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter: str, callable, or dict
subset: IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'c': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if isinstance(formatter, MutableMapping):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
"""Render the built up styles to HTML
Parameters
----------
`**kwargs`:
Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you
need to provide additional variables for a custom
template.
.. versionadded:: 0.20
Returns
-------
rendered: str
the rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
update the state of the Styler. Collects a mapping
of {index_label: ['<property>: <value>']}
attrs: Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
""""Reset" the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis,
result_type='expand', **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wise,
updating the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x).shape == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
Parameters
----------
precision: int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes. These are the items
that show up in the opening ``<table>`` tag in addition
to to automatic (by default) id.
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
Returns
-------
styles: list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
Parameters
----------
styles: list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
Parameters
----------
uuid: str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Set the caption on a Styler
Parameters
----------
caption: str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler. These are placed in a
``<style>`` tag before the generated HTML table.
Parameters
----------
table_styles: list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
def hide_index(self):
"""
Hide any indices from rendering.
.. versionadded:: 0.23.0
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset: IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
Parameters
----------
null_color: str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None, text_color_threshold=0.408):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
Parameters
----------
cmap: str or colormap
matplotlib colormap
low, high: float
compress the range by these values.
axis: int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset: IndexSlice
a valid slice for ``data`` to limit the style application to
text_color_threshold: float or int
luminance threshold for determining text color. Facilitates text
visibility across varying background colors. From 0 to 1.
0 = all text is dark colored, 1 = all text is light colored.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
Notes
-----
Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the
text legible by not using the entire range of the color map. The range
of the data is extended by ``low * (x.max() - x.min())`` and ``high *
(x.max() - x.min())`` before normalizing.
Raises
------
ValueError
If ``text_color_threshold`` is not a value from 0 to 1.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high,
text_color_threshold=text_color_threshold)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0,
text_color_threshold=0.408):
"""Color background in a range according to the data."""
if (not isinstance(text_color_threshold, (float, int)) or
not 0 <= text_color_threshold <= 1):
msg = "`text_color_threshold` must be a value from 0 to 1."
raise ValueError(msg)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = s.values.min()
smax = s.values.max()
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
# matplotlib colors.Normalize modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
rgbas = plt.cm.get_cmap(cmap)(norm(s.values))
def relative_luminance(rgba):
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba):
dark = relative_luminance(rgba) < text_color_threshold
text_color = '#f1f1f1' if dark else '#000000'
return 'background-color: {b};color: {c};'.format(
b=colors.rgb2hex(rgba), c=text_color
)
if s.ndim == 1:
return [css(rgba) for rgba in rgbas]
else:
return pd.DataFrame(
[[css(rgba) for rgba in row] for row in rgbas],
index=s.index, columns=s.columns
)
def set_properties(self, subset=None, **kwargs):
"""
Convenience method for setting one or more non-data dependent
properties or each cell.
Parameters
----------
subset: IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs: dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar(s, align, colors, width=100, vmin=None, vmax=None):
"""Draw bar chart in dataframe cells"""
# Get input value range.
smin = s.min() if vmin is None else vmin
if isinstance(smin, ABCSeries):
smin = smin.min()
smax = s.max() if vmax is None else vmax
if isinstance(smax, ABCSeries):
smax = smax.max()
if align == 'mid':
smin = min(0, smin)
smax = max(0, smax)
elif align == 'zero':
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.values - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start, end, color):
"""Generate CSS code to draw a bar from start to end."""
css = 'width: 10em; height: 80%;'
if end > start:
css += 'background: linear-gradient(90deg,'
if start > 0:
css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format(
s=start, c=color
)
css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format(
e=min(end, width), c=color,
)
return css
def css(x):
if pd.isna(x):
return ''
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == 'left':
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return pd.DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index, columns=s.columns
)
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left', vmin=None, vmax=None):
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : IndexSlice, optional
A valid slice for `data` to limit the style application to.
axis : int, str or None, default 0
Apply to each column (`axis=0` or `'index'`)
or to each row (`axis=1` or `'columns'`) or
to the entire DataFrame at once with `axis=None`.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
.. versionadded:: 0.20.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ('left', 'zero', 'mid'):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError("`color` must be string or a list-like"
" of length 2: [`color_neg`, `color_pos`]"
" (eg: color=['#d65f5f', '#5fba7d'])")
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._bar, subset=subset, axis=axis,
align=align, colors=color, width=width,
vmin=vmin, vmax=vmax)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color: str, default 'yellow'
axis: int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color: str, default 'yellow'
axis: int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""Highlight the min or max in a Series or DataFrame"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool})
"""
return (idx_col, idx_row) in lengths
def _get_level_lengths(index, hidden_elements=None):
"""
Given an index, find the level length for each element.
Optional argument is a list of index positions which
should not be visible.
Result is a dictionary of (level, inital_position): span
"""
sentinel = com.sentinel_factory()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if hidden_elements is None:
hidden_elements = []
lengths = {}
if index.nlevels == 1:
for i, value in enumerate(levels):
if(i not in hidden_elements):
lengths[(0, i)] = 1
return lengths
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif (row != sentinel) and (j not in hidden_elements):
last_label = j
lengths[(i, last_label)] = 1
elif (row != sentinel):
# even if its hidden, keep track of it in case
# length >1 and later elements are visible
last_label = j
lengths[(i, last_label)] = 0
elif(j not in hidden_elements):
lengths[(i, last_label)] += 1
non_zero_lengths = {}
for element, length in lengths.items():
if(length >= 1):
non_zero_lengths[element] = length
return non_zero_lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
| bsd-3-clause |
BiaDarkia/scikit-learn | sklearn/ensemble/tests/test_base.py | 36 | 5277 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from numpy.testing import assert_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble.base import _set_random_states
from sklearn.linear_model import Perceptron
from collections import OrderedDict
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectFromModel
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(
base_estimator=Perceptron(tol=1e-3, random_state=None), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
random_state = np.random.RandomState(3)
ensemble._make_estimator(random_state=random_state)
ensemble._make_estimator(random_state=random_state)
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
assert_equal(ensemble[0].random_state, None)
assert_true(isinstance(ensemble[1].random_state, int))
assert_true(isinstance(ensemble[2].random_state, int))
assert_not_equal(ensemble[1].random_state, ensemble[2].random_state)
np_int_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3),
n_estimators=np.int32(3))
np_int_ensemble.fit(iris.data, iris.target)
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3),
n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
def test_base_not_int_n_estimators():
# Check that instantiating a BaseEnsemble with a string as n_estimators
# raises a ValueError demanding n_estimators to be supplied as an integer.
string_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3),
n_estimators='3')
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be an integer",
string_ensemble.fit, iris.data, iris.target)
float_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3),
n_estimators=3.0)
assert_raise_message(ValueError,
"n_estimators must be an integer",
float_ensemble.fit, iris.data, iris.target)
def test_set_random_states():
# Linear Discriminant Analysis doesn't have random state: smoke test
_set_random_states(LinearDiscriminantAnalysis(), random_state=17)
clf1 = Perceptron(tol=1e-3, random_state=None)
assert_equal(clf1.random_state, None)
# check random_state is None still sets
_set_random_states(clf1, None)
assert_true(isinstance(clf1.random_state, int))
# check random_state fixes results in consistent initialisation
_set_random_states(clf1, 3)
assert_true(isinstance(clf1.random_state, int))
clf2 = Perceptron(tol=1e-3, random_state=None)
_set_random_states(clf2, 3)
assert_equal(clf1.random_state, clf2.random_state)
# nested random_state
def make_steps():
return [('sel', SelectFromModel(Perceptron(tol=1e-3,
random_state=None))),
('clf', Perceptron(tol=1e-3, random_state=None))]
est1 = Pipeline(make_steps())
_set_random_states(est1, 3)
assert_true(isinstance(est1.steps[0][1].estimator.random_state, int))
assert_true(isinstance(est1.steps[1][1].random_state, int))
assert_not_equal(est1.get_params()['sel__estimator__random_state'],
est1.get_params()['clf__random_state'])
# ensure multiple random_state parameters are invariant to get_params()
# iteration order
class AlphaParamPipeline(Pipeline):
def get_params(self, *args, **kwargs):
params = Pipeline.get_params(self, *args, **kwargs).items()
return OrderedDict(sorted(params))
class RevParamPipeline(Pipeline):
def get_params(self, *args, **kwargs):
params = Pipeline.get_params(self, *args, **kwargs).items()
return OrderedDict(sorted(params, reverse=True))
for cls in [AlphaParamPipeline, RevParamPipeline]:
est2 = cls(make_steps())
_set_random_states(est2, 3)
assert_equal(est1.get_params()['sel__estimator__random_state'],
est2.get_params()['sel__estimator__random_state'])
assert_equal(est1.get_params()['clf__random_state'],
est2.get_params()['clf__random_state'])
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/core/panel.py | 7 | 55818 | """
Contains data structures designed for manipulating panel (3-dimensional) data
"""
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
import numpy as np
import warnings
from pandas.core.dtypes.cast import (
infer_dtype_from_scalar,
maybe_cast_item)
from pandas.core.dtypes.common import (
is_integer, is_list_like,
is_string_like, is_scalar)
from pandas.core.dtypes.missing import notnull
import pandas.core.computation.expressions as expressions
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.core.missing as missing
from pandas import compat
from pandas.compat import (map, zip, range, u, OrderedDict, OrderedDefaultdict)
from pandas.compat.numpy import function as nv
from pandas.core.common import _try_sort, _default_index
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
from pandas.io.formats.printing import pprint_thing
from pandas.core.indexing import maybe_droplevels
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.ops import _op_descriptions
from pandas.core.series import Series
from pandas.core.reshape.util import cartesian_product
from pandas.util._decorators import (deprecate, Appender)
_shared_doc_kwargs = dict(
axes='items, major_axis, minor_axis',
klass="Panel",
axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}")
_shared_doc_kwargs['args_transpose'] = ("three positional arguments: each one"
"of\n%s" %
_shared_doc_kwargs['axes_single_arg'])
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=None):
"""
Returns a multi-index suitable for a panel-like DataFrame
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = range(1960,1963)
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),
(1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),
(1962, 'C')], dtype=object)
or
>>> import numpy as np
>>> years = np.repeat(range(1960,1963), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
if names is None:
names = ['time', 'panel']
time, panels = _ensure_like_indices(time, panels)
return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
class Panel(NDFrame):
"""
Represents wide format panel data, stored as 3-dimensional array
Parameters
----------
data : ndarray (items x major x minor), or dict of DataFrames
items : Index or array-like
axis=0
major_axis : Index or array-like
axis=1
minor_axis : Index or array-like
axis=2
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""
@property
def _constructor(self):
return type(self)
_constructor_sliced = DataFrame
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
# deprecation GH13563
warnings.warn("\nPanel is deprecated and will be removed in a "
"future version.\nThe recommended way to represent "
"these types of 3-dimensional data are with a "
"MultiIndex on a DataFrame, via the "
"Panel.to_frame() method\n"
"Alternatively, you can use the xarray package "
"http://xarray.pydata.org/en/stable/.\n"
"Pandas provides a `.to_xarray()` method to help "
"automate this conversion.\n",
DeprecationWarning, stacklevel=3)
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
def _init_data(self, data, copy, dtype, **kwargs):
"""
Generate ND initialization; axes are passed
as required objects to __init__
"""
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]
if kwargs:
raise TypeError('_init_data() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axes = None
if isinstance(data, BlockManager):
if any(x is not None for x in passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
elif is_scalar(data) and all(x is not None for x in passed_axes):
if dtype is None:
dtype, data = infer_dtype_from_scalar(data)
values = np.empty([len(x) for x in passed_axes], dtype=dtype)
values.fill(data)
mgr = self._init_matrix(values, passed_axes, dtype=dtype,
copy=False)
copy = False
else: # pragma: no cover
raise ValueError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
def _init_dict(self, data, axes, dtype=None):
haxis = axes.pop(self._info_axis_number)
# prefilter if haxis passed
if haxis is not None:
haxis = _ensure_index(haxis)
data = OrderedDict((k, v)
for k, v in compat.iteritems(data)
if k in haxis)
else:
ks = list(data.keys())
if not isinstance(data, OrderedDict):
ks = _try_sort(ks)
haxis = Index(ks)
for k, v in compat.iteritems(data):
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
# extract axis for remaining axes & create the slicemap
raxes = [self._extract_axis(self, data, axis=i) if a is None else a
for i, a in enumerate(axes)]
raxes_sm = self._extract_axes_for_slice(self, raxes)
# shallow copy
arrays = []
haxis_shape = [len(a) for a in raxes]
for h in haxis:
v = values = data.get(h)
if v is None:
values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
elif isinstance(v, self._constructor_sliced):
d = raxes_sm.copy()
d['copy'] = False
v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
arrays.append(values)
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
return create_block_manager_from_arrays(arrays, arr_names, axes)
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
Panel
"""
orient = orient.lower()
if orient == 'minor':
new_data = OrderedDefaultdict(dict)
for col, df in compat.iteritems(data):
for item, s in compat.iteritems(df):
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('Orientation must be one of {items, minor}.')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'], OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis_name] = Index(ks)
return cls(**d)
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
if isinstance(self._info_axis, MultiIndex):
return self._getitem_multilevel(key)
if not (is_list_like(key) or isinstance(key, slice)):
return super(Panel, self).__getitem__(key)
return self.loc[key]
def _getitem_multilevel(self, key):
info = self._info_axis
loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_index = info[loc]
result_index = maybe_droplevels(new_index, key)
slices = [loc] + [slice(None) for x in range(self._AXIS_LEN - 1)]
new_values = self.values[slices]
d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
d[self._info_axis_name] = result_index
result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to %s' % dtype)
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = _default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
return create_block_manager_from_blocks([values], fixed_axes)
# ----------------------------------------------------------------------
# Comparison methods
def _compare_constructor(self, other, func):
if not self._indexed_same(other):
raise Exception('Can only compare identically-labeled '
'same type objects')
new_data = {}
for col in self._info_axis:
new_data[col] = func(self[col], other[col])
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
# ----------------------------------------------------------------------
# Magic methods
def __unicode__(self):
"""
Return a string representation for a particular Panel
Invoked by unicode(df) in py2 only.
Yields a Unicode String in both py2/py3.
"""
class_name = str(self.__class__)
shape = self.shape
dims = u('Dimensions: %s') % ' x '.join(
["%d (%s)" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])
def axis_pretty(a):
v = getattr(self, a)
if len(v) > 0:
return u('%s axis: %s to %s') % (a.capitalize(),
pprint_thing(v[0]),
pprint_thing(v[-1]))
else:
return u('%s axis: None') % a.capitalize()
output = '\n'.join(
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def _get_plane_axes_index(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes indexes
"""
axis_name = self._get_axis_name(axis)
if axis_name == 'major_axis':
index = 'minor_axis'
columns = 'items'
if axis_name == 'minor_axis':
index = 'major_axis'
columns = 'items'
elif axis_name == 'items':
index = 'major_axis'
columns = 'minor_axis'
return index, columns
def _get_plane_axes(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes
"""
return [self._get_axis(axi)
for axi in self._get_plane_axes_index(axis)]
fromDict = from_dict
def to_sparse(self, *args, **kwargs):
"""
NOT IMPLEMENTED: do not call this method, as sparsifying is not
supported for Panel objects and will raise an error.
Convert to SparsePanel
"""
raise NotImplementedError("sparsifying is not supported "
"for Panel objects")
def to_excel(self, path, na_rep='', engine=None, **kwargs):
"""
Write each DataFrame in Panel to a separate excel sheet
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data representation
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
Other Parameters
----------------
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
Notes
-----
Keyword arguments (and na_rep) are passed to the ``to_excel`` method
for each DataFrame written.
"""
from pandas.io.excel import ExcelWriter
if isinstance(path, compat.string_types):
writer = ExcelWriter(path, engine=engine)
else:
writer = path
kwargs['na_rep'] = na_rep
for item, df in self.iteritems():
name = str(item)
df.to_excel(writer, name, **kwargs)
writer.save()
def as_matrix(self):
self._consolidate_inplace()
return self._data.as_matrix()
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, *args, **kwargs):
"""
Quickly retrieve single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
takeable : interpret the passed labels as indexers, default False
Returns
-------
value : scalar value
"""
nargs = len(args)
nreq = self._AXIS_LEN
# require an arg for each axis
if nargs != nreq:
raise TypeError('There must be an argument for each axis, you gave'
' {0} args, but {1} are required'.format(nargs,
nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('get_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
return lower.get_value(*args[1:], takeable=takeable)
def set_value(self, *args, **kwargs):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
# require an arg for each axis and the value
nargs = len(args)
nreq = self._AXIS_LEN + 1
if nargs != nreq:
raise TypeError('There must be an argument for each axis plus the '
'value provided, you gave {0} args, but {1} are '
'required'.format(nargs, nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('set_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
try:
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
lower.set_value(*args[1:], takeable=takeable)
return self
except KeyError:
axes = self._expand_axes(args)
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
likely_dtype, args[-1] = infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(axes[0], self._info_axis)
# how to make this logic simpler?
if made_bigger:
maybe_cast_item(result, args[0], likely_dtype)
return result.set_value(*args)
def _box_item_values(self, key, values):
if self.ndim == values.ndim:
result = self._constructor(values)
# a dup selection will yield a full ndim
if result._get_axis(0).is_unique:
result = result[key]
return result
d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
if value.shape != shape[1:]:
raise ValueError('shape of value must be {0}, shape of given '
'object was {1}'.format(
shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif is_scalar(value):
dtype, value = infer_dtype_from_scalar(value)
mat = np.empty(shape[1:], dtype=dtype)
mat.fill(value)
else:
raise TypeError('Cannot set item of type: %s' % str(type(value)))
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
"""
Conform input DataFrame to align with chosen axis pair.
Parameters
----------
frame : DataFrame
axis : {'items', 'major', 'minor'}
Axis the input corresponds to. E.g., if axis='major', then
the frame's columns would be items, and the index would be
values of the minor axis
Returns
-------
DataFrame
"""
axes = self._get_plane_axes(axis)
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def head(self, n=5):
raise NotImplementedError
def tail(self, n=5):
raise NotImplementedError
def round(self, decimals=0, *args, **kwargs):
"""
Round each value in Panel to a specified number of decimal places.
.. versionadded:: 0.18.0
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Panel object
See Also
--------
numpy.around
"""
nv.validate_round(args, kwargs)
if is_integer(decimals):
result = np.apply_along_axis(np.round, 0, self.values)
return self._wrap_result(result, axis=0)
raise TypeError("decimals must be an integer")
def _needs_reindex_multi(self, axes, method, level):
""" don't allow a multi reindex on Panel or above ndim """
return False
def align(self, other, **kwargs):
raise NotImplementedError
def dropna(self, axis=0, how='any', inplace=False):
"""
Drop 2D from panel, holding passed axis constant
Parameters
----------
axis : int, default 0
Axis to hold constant. E.g. axis=1 will drop major_axis entries
having a certain amount of NA data
how : {'all', 'any'}, default 'any'
'any': one or more values are NA in the DataFrame along the
axis. For 'all' they all must be.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
dropped : Panel
"""
axis = self._get_axis_number(axis)
values = self.values
mask = notnull(values)
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
result = self.reindex_axis(new_ax, axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif is_scalar(other):
return self._combine_const(other, func)
else:
raise NotImplementedError("%s is not supported in combine "
"operation with %s" %
(str(type(other)), str(type(self))))
def _combine_const(self, other, func):
with np.errstate(all='ignore'):
new_values = func(self.values, other)
d = self._construct_axes_dict()
return self._constructor(new_values, **d)
def _combine_frame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
with np.errstate(all='ignore'):
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return self._constructor(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
with np.errstate(all='ignore'):
result_values = func(this.values, other.values)
return self._constructor(result_values, items, major, minor)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
Notes
-----
major_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of major_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 2)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : DataFrame
index -> major axis, columns -> items
Notes
-----
minor_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of minor_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 1)
def xs(self, key, axis=1):
"""
Return slice of panel along selected axis
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
Returns
-------
y : ndim(self)-1
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
axis = self._get_axis_number(axis)
if axis == 0:
return self[key]
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=False)
result = self._construct_return_type(new_data)
copy = new_data.is_mixed_type
result._set_is_copy(self, copy=copy)
return result
_xs = xs
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
ax = self._get_axis(axis)
key = ax[i]
# xs cannot handle a non-scalar key, so just reindex here
# if we have a multi-index and a single tuple, then its a reduction
# (GH 7516)
if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):
if is_list_like(key):
indexer = {self._get_axis_name(axis): key}
return self.reindex(**indexer)
# a reduction
if axis == 0:
values = self._data.iget(i)
return self._box_item_values(key, values)
# xs by position
self._consolidate_inplace()
new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)
return self._construct_return_type(new_data)
def groupby(self, function, axis='major'):
"""
Group data on given axis, returning GroupBy object
Parameters
----------
function : callable
Mapping function for chosen access
axis : {'major', 'minor', 'items'}, default 'major'
Returns
-------
grouped : PanelGroupBy
"""
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def to_frame(self, filter_observations=True):
"""
Transform wide format into long (stacked) format as DataFrame whose
columns are the Panel's items and whose index is a MultiIndex formed
of the Panel's major and minor axes.
Parameters
----------
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
Returns
-------
y : DataFrame
"""
_, N, K = self.shape
if filter_observations:
# shaped like the return DataFrame
mask = notnull(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {}
for item in self.items:
data[item] = self[item].values.ravel()[selector]
def construct_multi_parts(idx, n_repeat, n_shuffle=1):
axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)
labels = [x[selector] for x in axis_idx.labels]
levels = axis_idx.levels
names = axis_idx.names
return labels, levels, names
def construct_index_parts(idx, major=True):
levels = [idx]
if major:
labels = [np.arange(N).repeat(K)[selector]]
names = idx.name or 'major'
else:
labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
labels = [labels.ravel()[selector]]
names = idx.name or 'minor'
names = [names]
return labels, levels, names
if isinstance(self.major_axis, MultiIndex):
major_labels, major_levels, major_names = construct_multi_parts(
self.major_axis, n_repeat=K)
else:
major_labels, major_levels, major_names = construct_index_parts(
self.major_axis)
if isinstance(self.minor_axis, MultiIndex):
minor_labels, minor_levels, minor_names = construct_multi_parts(
self.minor_axis, n_repeat=N, n_shuffle=K)
else:
minor_labels, minor_levels, minor_names = construct_index_parts(
self.minor_axis, major=False)
levels = major_levels + minor_levels
labels = major_labels + minor_labels
names = major_names + minor_names
index = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
return DataFrame(data, index=index, columns=self.items)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def apply(self, func, axis='major', **kwargs):
"""
Applies function along axis (or axes) of the Panel
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', the combination of major_axis/minor_axis
will each be passed as a Series; if axis = ('items', 'major'),
DataFrames of items & major axis will be passed
axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two
axes
Additional keyword arguments will be passed as keywords to the function
Examples
--------
Returns a Panel with the square root of each element
>>> p = pd.Panel(np.random.rand(4,3,2))
>>> p.apply(np.sqrt)
Equivalent to p.sum(1), returning a DataFrame
>>> p.apply(lambda x: x.sum(), axis=1)
Equivalent to previous:
>>> p.apply(lambda x: x.sum(), axis='minor')
Return the shapes of each DataFrame over axis 2 (i.e the shapes of
items x major), as a Series
>>> p.apply(lambda x: x.shape, axis=(0,1))
Returns
-------
result : Panel, DataFrame, or Series
"""
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
with np.errstate(all='ignore'):
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis)
def _apply_1d(self, func, axis):
axis_name = self._get_axis_name(axis)
ndim = self.ndim
values = self.values
# iter thru the axes
slice_axis = self._get_axis(axis)
slice_indexer = [0] * (ndim - 1)
indexer = np.zeros(ndim, 'O')
indlist = list(range(ndim))
indlist.remove(axis)
indexer[axis] = slice(None, None)
indexer.put(indlist, slice_indexer)
planes = [self._get_axis(axi) for axi in indlist]
shape = np.array(self.shape).take(indlist)
# all the iteration points
points = cartesian_product(planes)
results = []
for i in range(np.prod(shape)):
# construct the object
pts = tuple([p[i] for p in points])
indexer.put(indlist, slice_indexer)
obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)
result = func(obj)
results.append(result)
# increment the indexer
slice_indexer[-1] += 1
n = -1
while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):
slice_indexer[n - 1] += 1
slice_indexer[n] = 0
n -= 1
# empty object
if not len(results):
return self._constructor(**self._construct_axes_dict())
# same ndim as current
if isinstance(results[0], Series):
arr = np.vstack([r.values for r in results])
arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
tranp = np.array([axis] + indlist).argsort()
arr = arr.transpose(tuple(list(tranp)))
return self._constructor(arr, **self._construct_axes_dict())
# ndim-1 shape
results = np.array(results).reshape(shape)
if results.ndim == 2 and axis_name != self._info_axis_name:
results = results.T
planes = planes[::-1]
return self._construct_return_type(results, planes)
def _apply_2d(self, func, axis):
""" handle 2-d slices, equiv to iterating over the other axis """
ndim = self.ndim
axis = [self._get_axis_number(a) for a in axis]
# construct slabs, in 2-d this is a DataFrame result
indexer_axis = list(range(ndim))
for a in axis:
indexer_axis.remove(a)
indexer_axis = indexer_axis[0]
slicer = [slice(None, None)] * ndim
ax = self._get_axis(indexer_axis)
results = []
for i, e in enumerate(ax):
slicer[indexer_axis] = i
sliced = self.iloc[tuple(slicer)]
obj = func(sliced)
results.append((e, obj))
return self._construct_return_type(dict(results))
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if numeric_only:
raise NotImplementedError('Panel.{0} does not implement '
'numeric_only.'.format(name))
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)
with np.errstate(all='ignore'):
result = f(self.values)
axes = self._get_plane_axes(axis_name)
if result.ndim == 2 and axis_name != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
def _construct_return_type(self, result, axes=None):
""" return the type for the ndim of the result """
ndim = getattr(result, 'ndim', None)
# need to assume they are the same
if ndim is None:
if isinstance(result, dict):
ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)
# have a dict, so top-level is +1 dim
if ndim != 0:
ndim += 1
# scalar
if ndim == 0:
return Series(result)
# same as self
elif self.ndim == ndim:
# return the construction dictionary for these axes
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
# sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
return self._constructor_sliced(
result, **self._extract_axes_for_slice(self, axes))
raise ValueError('invalid _construct_return_type [self->%s] '
'[result->%s]' % (self, result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
axes = self._get_plane_axes(axis)
if result.ndim == 2 and axis != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).rename(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(Panel, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
# check if a list of axes was passed in instead as a
# single *args element
if (len(args) == 1 and hasattr(args[0], '__iter__') and
not is_string_like(args[0])):
axes = args[0]
else:
axes = args
if 'axes' in kwargs and axes:
raise TypeError("transpose() got multiple values for "
"keyword argument 'axes'")
elif not axes:
axes = kwargs.pop('axes', ())
return super(Panel, self).transpose(*axes, **kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Panel, self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
def shift(self, periods=1, freq=None, axis='major'):
"""
Shift index by desired number of periods with an optional time freq.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original. This is different
from the behavior of DataFrame.shift()
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
shifted : Panel
"""
if freq:
return self.tshift(periods, freq, axis=axis)
return super(Panel, self).slice_shift(periods, axis=axis)
def tshift(self, periods=1, freq=None, axis='major'):
return super(Panel, self).tshift(periods, freq, axis)
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.core.reshape.concat import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify Panel in place using non-NA values from passed
Panel, or object coercible to Panel. Aligns on items
Parameters
----------
other : Panel, or object coercible to Panel
join : How to join individual DataFrames
{'left', 'right', 'outer', 'inner'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling panel
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : bool
If True, will raise an error if a DataFrame and other both
contain data in the same place.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join, overwrite, filter_func,
raise_conflict)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
# miscellaneous data creation
@staticmethod
def _extract_axes(self, data, axes, **kwargs):
""" return a list of the axis indicies """
return [self._extract_axis(self, data, axis=i, **kwargs)
for i, a in enumerate(axes)]
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
return dict([(self._AXIS_SLICEMAP[i], a)
for i, a in zip(
self._AXIS_ORDERS[self._AXIS_LEN - len(axes):],
axes)])
@staticmethod
def _prep_ndarray(self, values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
if values.ndim != self._AXIS_LEN:
raise ValueError("The number of dimensions required is {0}, "
"but the number of dimensions of the "
"ndarray given was {1}".format(self._AXIS_LEN,
values.ndim))
return values
@staticmethod
def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indicies
"""
result = dict()
# caller differs dict/ODict, presered type
if isinstance(frames, OrderedDict):
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in compat.iteritems(frames):
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect))])
reindex_dict = dict(
[(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])
reindex_dict['copy'] = False
for key, frame in compat.iteritems(adj_frames):
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
axes_dict['dtype'] = dtype
return axes_dict
@staticmethod
def _extract_axis(self, data, axis=0, intersect=False):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, self._constructor_sliced):
have_frames = True
indexes.append(v._get_axis(axis))
elif v is not None:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_combined_index(indexes, intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on axis %d' % axis)
if have_frames:
if lengths[0] != len(index):
raise AssertionError('Length of data and index must match')
else:
index = Index(np.arange(lengths[0]))
if index is None:
index = Index([])
return _ensure_index(index)
@classmethod
def _add_aggregate_operations(cls, use_numexpr=True):
""" add the operations to the cls; evaluate the doc strings again """
# doc strings substitors
_agg_doc = """
Wrapper method for %%s
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__, cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + "\n"
def _panel_arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True,
**eval_kwargs)
except TypeError:
result = op(x, y)
# handles discrepancy between numpy and numexpr on division/mod
# by 0 though, given that these are generally (always?)
# non-scalars, I'm not sure whether it's worth it at the moment
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' panel'
else:
equiv = 'panel ' + op_desc['op'] + ' other'
_op_doc = """
%%s of series and other, element-wise (binary operator `%%s`).
Equivalent to ``%%s``.
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__,
cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + """
See also
--------
""" + cls.__name__ + ".%s\n"
doc = _op_doc % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
else:
doc = _agg_doc % name
@Appender(doc)
def f(self, other, axis=0):
return self._combine(other, na_op, axis=axis)
f.__name__ = name
return f
# add `div`, `mul`, `pow`, etc..
ops.add_flex_arithmetic_methods(
cls, _panel_arith_method, use_numexpr=use_numexpr,
flex_comp_method=ops._comp_method_PANEL)
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0,
stat_axis=1, aliases={'major': 'major_axis',
'minor': 'minor_axis'},
slicers={'major_axis': 'index',
'minor_axis': 'columns'})
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
# legacy
class WidePanel(Panel):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("WidePanel is deprecated. Please use Panel",
FutureWarning, stacklevel=2)
super(WidePanel, self).__init__(*args, **kwargs)
class LongPanel(DataFrame):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("LongPanel is deprecated. Please use DataFrame",
FutureWarning, stacklevel=2)
super(LongPanel, self).__init__(*args, **kwargs)
| mit |
gnuradio/gnuradio | gr-fec/python/fec/polar/channel_construction_awgn.py | 2 | 8128 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
"""
Based on 2 papers:
[1] Ido Tal, Alexander Vardy: 'How To Construct Polar Codes', 2013
for an in-depth description of a widely used algorithm for channel construction.
[2] Harish Vangala, Emanuele Viterbo, Yi Hong: 'A Comparative Study of Polar Code Constructions for the AWGN Channel', 2015
for an overview of different approaches
"""
from scipy.optimize import fsolve
from scipy.special import erfc
import numpy as np
from .helper_functions import (
bhattacharyya_parameter,
bit_reverse_vector,
power_of_2_int,
show_progress_bar,
)
from .channel_construction_bec import bhattacharyya_bounds
def solver_equation(val, s):
cw_lambda = codeword_lambda_callable(s)
ic_lambda = instantanious_capacity_callable()
return lambda y: ic_lambda(cw_lambda(y)) - val
def solve_capacity(a, s):
eq = solver_equation(a, s)
res = fsolve(eq, 1)
return np.abs(res[0]) # only positive values needed.
def codeword_lambda_callable(s):
return lambda y: np.exp(-2 * y * np.sqrt(2 * s))
def codeword_lambda(y, s):
return codeword_lambda_callable(s)(y)
def instantanious_capacity_callable():
return lambda x: 1 - np.log2(1 + x) + (x * np.log2(x) / (1 + x))
def instantanious_capacity(x):
return instantanious_capacity_callable()(x)
def q_function(x):
# Q(x) = (1 / sqrt(2 * pi) ) * integral (x to inf) exp(- x ^ 2 / 2) dx
return 0.5 * erfc(x / np.sqrt(2))
def discretize_awgn(mu, design_snr):
"""
needed for Binary-AWGN channels.
in [1] described in Section VI
in [2] described as a function of the same name.
in both cases reduce infinite output alphabet to a finite output alphabet of a given channel.
idea:
1. instantaneous capacity C(x) in interval [0, 1]
2. split into mu intervals.
3. find corresponding output alphabet values y of likelihood ratio function lambda(y) inserted into C(x)
4. Calculate probability for each value given that a '0' or '1' is was transmitted.
"""
s = 10 ** (design_snr / 10)
a = np.zeros(mu + 1, dtype=float)
a[-1] = np.inf
for i in range(1, mu):
a[i] = solve_capacity(1.0 * i / mu, s)
factor = np.sqrt(2 * s)
tpm = np.zeros((2, mu))
for j in range(mu):
tpm[0][j] = q_function(factor + a[j]) - q_function(factor + a[j + 1])
tpm[1][j] = q_function(-1.0 * factor + a[j]) - q_function(
-1.0 * factor + a[j + 1]
)
tpm = tpm[::-1]
tpm[0] = tpm[0][::-1]
tpm[1] = tpm[1][::-1]
return tpm
def instant_capacity_delta_callable():
return (
lambda a, b: -1.0 * (a + b) * np.log2((a + b) / 2)
+ a * np.log2(a)
+ b * np.log2(b)
)
def capacity_delta_callable():
c = instant_capacity_delta_callable()
return lambda a, b, at, bt: c(a, b) + c(at, bt) - c(a + at, b + bt)
def quantize_to_size(tpm, mu):
# This is a degrading merge, compare [1]
calculate_delta_I = capacity_delta_callable()
L = np.shape(tpm)[1]
if not mu < L:
print("WARNING: This channel gets too small!")
# lambda works on vectors just fine. Use Numpy vector awesomeness.
delta_i_vec = calculate_delta_I(tpm[0, 0:-1], tpm[1, 0:-1], tpm[0, 1:], tpm[1, 1:])
for i in range(L - mu):
d = np.argmin(delta_i_vec)
ap = tpm[0, d] + tpm[0, d + 1]
bp = tpm[1, d] + tpm[1, d + 1]
if d > 0:
delta_i_vec[d - 1] = calculate_delta_I(tpm[0, d - 1], tpm[1, d - 1], ap, bp)
if d < delta_i_vec.size - 1:
delta_i_vec[d + 1] = calculate_delta_I(ap, bp, tpm[0, d + 1], tpm[1, d + 1])
delta_i_vec = np.delete(delta_i_vec, d)
tpm = np.delete(tpm, d, axis=1)
tpm[0, d] = ap
tpm[1, d] = bp
return tpm
def upper_bound_z_params(z, block_size, design_snr):
upper_bound = bhattacharyya_bounds(design_snr, block_size)
z = np.minimum(z, upper_bound)
return z
def tal_vardy_tpm_algorithm(block_size, design_snr, mu):
mu = mu // 2 # make sure algorithm uses only as many bins as specified.
block_power = power_of_2_int(block_size)
channels = np.zeros((block_size, 2, mu))
channels[0] = discretize_awgn(mu, design_snr) * 2
print("Constructing polar code with Tal-Vardy algorithm")
print(
"(block_size = {0}, design SNR = {1}, mu = {2}".format(
block_size, design_snr, 2 * mu
)
)
show_progress_bar(0, block_size)
for j in range(0, block_power):
u = 2 ** j
for t in range(u):
show_progress_bar(u + t, block_size)
# print("(u={0}, t={1}) = {2}".format(u, t, u + t))
ch1 = upper_convolve(channels[t], mu)
ch2 = lower_convolve(channels[t], mu)
channels[t] = quantize_to_size(ch1, mu)
channels[u + t] = quantize_to_size(ch2, mu)
z = np.zeros(block_size)
for i in range(block_size):
z[i] = bhattacharyya_parameter(channels[i])
z = z[bit_reverse_vector(np.arange(block_size), block_power)]
z = upper_bound_z_params(z, block_size, design_snr)
show_progress_bar(block_size, block_size)
print("")
print("channel construction DONE")
return z
def merge_lr_based(q, mu):
lrs = q[0] / q[1]
vals, indices, inv_indices = np.unique(lrs, return_index=True, return_inverse=True)
# compare [1] (20). Ordering of representatives according to LRs.
temp = np.zeros((2, len(indices)), dtype=float)
if vals.size < mu:
return q
for i in range(len(indices)):
merge_pos = np.where(inv_indices == i)[0]
sum_items = q[:, merge_pos]
if merge_pos.size > 1:
sum_items = np.sum(q[:, merge_pos], axis=1)
temp[0, i] = sum_items[0]
temp[1, i] = sum_items[1]
return temp
def upper_convolve(tpm, mu):
q = np.zeros((2, mu ** 2))
idx = -1
for i in range(mu):
idx += 1
q[0, idx] = (tpm[0 / i] ** 2 + tpm[1, i] ** 2, 2)
q[1, idx] = tpm[0, i] * tpm[1, i]
for j in range(i + 1, mu):
idx += 1
q[0, idx] = tpm[0, i] * tpm[0, j] + tpm[1, i] * tpm[1, j]
q[1, idx] = tpm[0, i] * tpm[1, j] + tpm[1, i] * tpm[0, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1)
q = merge_lr_based(q, mu)
q = normalize_q(q, tpm)
return q
def lower_convolve(tpm, mu):
q = np.zeros((2, mu * (mu + 1)))
idx = -1
for i in range(0, mu):
idx += 1
q[0, idx] = (tpm[0 / i] ** 2, 2)
q[1, idx] = (tpm[1 / i] ** 2, 2)
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q[0, idx] = tpm[0, i] * tpm[1, i]
q[1, idx] = q[0, idx]
for j in range(i + 1, mu):
idx += 1
q[0, idx] = tpm[0, i] * tpm[0, j]
q[1, idx] = tpm[1, i] * tpm[1, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q[0, idx] = tpm[0, i] * tpm[1, j]
q[1, idx] = tpm[1, i] * tpm[0, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1)
q = merge_lr_based(q, mu)
q = normalize_q(q, tpm)
return q
def swap_values(first, second):
return second, first
def normalize_q(q, tpm):
original_factor = np.sum(tpm)
next_factor = np.sum(q)
factor = original_factor / next_factor
return q * factor
def main():
print("channel construction AWGN main")
n = 8
m = 2 ** n
design_snr = 0.0
mu = 16
z_params = tal_vardy_tpm_algorithm(m, design_snr, mu)
print(z_params)
if 0:
import matplotlib.pyplot as plt
plt.plot(z_params)
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
rongzh/unbiased-pscale | code/Jamieson1982Analysis/eval_Pt_eos.py | 1 | 9546 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as optimize
from scipy.interpolate import interp1d
import collections
from eos_mod import MGD_PowerLaw,debye_fun,Ptot_powerlaw,press_vinet
def set_dic(a):
param_d['V0'] = a[0]
param_d['K0'] = a[1]
param_d['K0p'] = a[2]
param_d['theta0'] = np.exp(a[3])#
param_d['gamma0'] = np.exp(a[4])
param_d['q'] = a[5]
def set_const():
param_d['const']['Natom'] = 4
param_d['const']['kB'] = 8.6173324e-5 #eV per K
param_d['const']['P_factor'] = 160.217657#GPa in 1 eV/Ang^3
param_d['const']['R'] = 8.314462/1.6021764e-19 # eV/K per mol
# 1eV/ang^3 = 160.217657GPa, 1eV = 1.6021764e-19Joules, 1Ang3 = e-30m^3
param_d['const']['C_DP'] = 3*param_d['const']['R']#Dulong-Petit limit for Cv
#read data from file
dat = np.loadtxt(fname='UpUs-Pt.md', delimiter='|', skiprows=3)
print dat
Up = dat[:,0]
Us = dat[:,1]
plt.plot(Up,Us,'rx')
plt.xlabel('Up[km/s]')
plt.ylabel('Us[km/s]')
plt.show()
print Up,Us
#set Pt density
rho1original = 21.472 #unit grams/cm3
atompermol = 6.022*(10**23) #at/mol
unitcell = 4 #at/unitcell
v_unitcell = 60.38*10**(-24) #unit cm3
rho1 = rho1original/10**24 #grams/ang^3
#compute pho2 based on conservation of Mass
rho2 = rho1*Us/(Us-Up) #unit grams/Ang^3
print "rho2:",rho2
#Atmospheric pressure is
p1 = 101325*10**(-9) #unit GPa
p2 = rho1*Us*Up*10**(24) + p1 #unit GPa, and this is properly unit analyzed
#edit units here: km and m
print "p2: ",p2
#let the initial internal energy E1 to be 0.
m_cell = 195*4/atompermol # (g/unitcell)
f_conv_E = m_cell/160.217657 # (g/cell)*(eV/(GPa*Ang^3))
E2 = 0.5*(p1+p2) * f_conv_E *(1/rho1-1/rho2)#unit eV/unitcell
print "E2: ", E2
param_d = collections.OrderedDict()
param_d['const'] = collections.OrderedDict()
set_const()
V_a = 195*param_d['const']['Natom']/atompermol/rho2 #unit Ang^3/unitcell
print "V_a" , V_a
# 1eV/ang^3 = 160.217657GPa, 1eV = 1.6021764e-19Joules, 1Ang3 = e-30m^3
fei_report = [60.38, 277,5.08,np.log(230),np.log(2.72),0.5]
set_dic(fei_report)
#compute Vinet energy
def energy_vinet( V_a, param_d ):
V0 = param_d['V0']
K0 = param_d['K0']
K0p = param_d['K0p']
P_factor = param_d['const']['P_factor']
x = (V_a/V0)**(1.0/3)
eta = 3.0*(K0p- 1)/2.0
energy_a = V0*K0/P_factor*9.0/(eta**2.0) * (1 + (eta*(1-x)-1) * np.exp(eta*(1-x)))
#print "vinet: " , energy_a
return energy_a
#compute thermal part energy
def energy_mgd_powlaw( V_a, T_a, param_d ):
# get parameter values
theta0 = param_d['theta0']
C_DP = param_d['const']['C_DP']
P_factor = param_d['const']['P_factor']
gamma= param_d['gamma0']*(V_a/param_d['V0'])**param_d['q']
theta = param_d['theta0']*np.exp((-1)*(gamma-param_d['gamma0'])/param_d['q'])
T_ref_a = 300 # here we use isothermal reference compression curve
energy_therm_a = C_DP/atompermol*param_d['const']['Natom']*(T_a*debye_fun(theta/T_a) - T_ref_a*debye_fun(theta/T_ref_a ))
return energy_therm_a
#return model total enerty
def energy_mod_total(T_a,V_a,param_d):
return energy_vinet(V_a,param_d) + energy_mgd_powlaw(V_a,T_a,param_d)
def findroot(T_a,V_a,param_d,E):
return energy_mod_total(T_a,V_a,param_d) - E
#find the temperature
result = []
for ind in range(len(V_a)):
T_root = optimize.brentq(findroot, a=300,b=3000,args = (V_a[ind],param_d,E2[ind]),full_output = False)
result.append(T_root)
print "result: ", result
#edit the dictionary
#plot energy from 300K to 3000K, compare with E2,
#eos_mod.py & eval_Pt_eos.py
P_resi = MGD_PowerLaw(V_a, T_root, param_d) - p2
print P_resi
plt.figure()
plt.plot(p2,P_resi,'x')
plt.xlabel('absolute P')
plt.ylabel('resi_P')
plt.show()
plt.plot(result,P_resi,'rx')
plt.xlabel('T')
plt.ylabel('resi_P')
plt.show()
Pthermal = Ptot_powerlaw(V_a,result,param_d,300)
plt.plot(Pthermal,P_resi+Pthermal,'rx')
plt.xlabel('thermal')
plt.ylabel('resi_P')
plt.show()
##conversion from PVT to vs vp.
def mass_conserv(us,up,rho1, rho2):
return rho1*us - rho2*(us-up)
def momentum_conserv(us,up,rho1,p1,p2):
#return rho1*us*up + p1 - p2
p1 = 101325*10**(-9) #unit GPa
return p2 - rho1*us*up*10**(24) + p1 #unit GPa
def energy_conserv(us,up,p1,p2,rho1,rho2,E2):
#return (p1+p2)*(1/rho1 + 1/rho2)/2 - E2
m_cell = 195*4/atompermol # (g/unitcell)
f_conv_E = m_cell/160.217657 # (g/cell)*(eV/(GPa*Ang^3))
return E2 - 0.5*(p1+p2) * f_conv_E *(1/rho1-1/rho2)#unit eV/unitcell
mass_range = max(rho2*(Us-Up))
momentum_range = max(p2-p1)
energy_range = max(E2)
print "testhere: ",mass_range, momentum_range,energy_range
def fitfunc(u,rho1,rho2,p1,p2,E2):
us = u[0]
up = u[1]
return np.array(mass_conserv(us,up,rho1,rho2)*mass_conserv(us,up,rho1,rho2)/mass_range/mass_range+
momentum_conserv(us,up,rho1,p1,p2) * momentum_conserv(us,up,rho1,p1,p2)/momentum_range/momentum_range +
energy_conserv(us,up,p1,p2,rho1,rho2,E2) * energy_conserv(us,up,p1,p2,rho1,rho2,E2)/energy_range/energy_range,dtype = 'f8')
guess = [0.3,4]
#set other parameters
#print "test three functions: ", mass_conserv(Us,Up,rho1,rho2), momentum_conserv(Us,Up,rho1,p1,p2), energy_conserv(Us,Up,p1,p2,rho1,rho2,E2)
#rho1new = rho1 * np.ones(len(rho2))
#p1new = p1* np.ones(len(p2))
#for i in range(len(rho2)):
# popt = optimize.minimize(fitfunc,guess[:],args=(rho1new[i],rho2[i],p1new[i],p2[i],E2[i]),full_output=1)
# print popt
#print"done"
##ployfit rho and T
#print "temperaure",result #unit K
#print "density and its corresponding volume", rho2, V_a# unit grams/Ang^3
##get Pmod and Emod
#Emod = energy_mod_total(result,V_a,param_d)
#Pmod = MGD_PowerLaw(V_a, result, param_d)
#print Emod, Pmod
#for i in range(len(rho2)):
# popt = optimize.leastsq(fitfunc,guess[:],args=(rho1new[i],rho2[i],p1new[i],Pmod[i],Emod[i]),full_output=1)
#print popt
#modfit = np.polyfit(result,rho2,3)
#print modfit
#p = np.poly1d(modfit)
#tem_range = np.array([300,500,700,900,1100,1300,1500,1700,1900,2100,2300,2500,2700,2900,3000])
#print "using polyfit: ", p(tem_range),tem_range
#print "computed density and its corresponding temperature: ", rho2, result# unit grams/Ang^3
#####Compute Jamieson's Temperature
param_d['theta0'] = 200
param_d['gamma0'] = 2.40
param_d['q'] = 1
#compute thermal part energy
def energy_Jam_mgd_powlaw( V_a, T_a, param_d ):
# get parameter values
theta0 = param_d['theta0']
gamma= param_d['gamma0']*(V_a/param_d['V0'])**param_d['q']
theta = param_d['theta0']*np.exp((-1)*(gamma-param_d['gamma0'])/param_d['q'])
T_ref_a = 298 # here we use isothermal reference compression curve
energy_therm_a = 0.12786*(T_a*debye_fun(theta/T_a) - T_ref_a*debye_fun(theta/T_ref_a ))
return energy_therm_a
def Jam_fit(T_a,P,param_d):
return P - (21.449 * 2.40) * energy_Jam_mgd_powlaw(195*param_d['const']['Natom']/atompermol/21.449,T_a,param_d)
r = []
for ind in range(len(p2)):
T_root = optimize.brentq(Jam_fit, a=300,b=3000,args = (p2[ind],param_d),full_output = False)
r.append(T_root)
print "r: ", r
####
rho0 = 21.4449/10**24 #grams/Ang^3
rho2 = rho0*Us/(Us-Up) #unit grams/Ang^3
V_0 = 195*param_d['const']['Natom']/atompermol/rho0 #unit Ang^3/unitcell
#read Jam's data here and fit the vinet model for 300K
dat = np.loadtxt("Fig.txt", delimiter = ",", skiprows = 1)
V300_a = (1 - dat[:,2]) * V_0
P300_a = dat[:,1]
def V_fit(param_a, P_a=P300_a, V_a=V300_a):
param_d = {'V0':param_a[0],'K0':param_a[1],'K0p':param_a[2]}
Pmod_a = press_vinet(V_a,param_d)
resid_a = P_a-Pmod_a
return resid_a
param0_a = np.array([V_0, 100.0, 4.0])
print V_fit(param0_a)
paramfit_a = optimize.leastsq( V_fit, param0_a )
print "%%%%%%%%%%%%"
print "paramfit_a"
print paramfit_a
paramtrue_a = paramfit_a[0]
print "true params: ", paramtrue_a
#set true dictionary for Jam's vinet model
paramtrue = dict()
paramtrue = {'V0':paramtrue_a[0],'K0':paramtrue_a[1],'K0p':paramtrue_a[2]}
#using computed V_a to find the corresponding P_vinet
V_a = 195*param_d['const']['Natom']/atompermol/rho2 #unit Ang^3/unitcell
print "V_a: " , V_a
P300 = press_vinet(V_a, paramtrue)
print "P300 is: ", P300
#get the thermal pressure
Pth = p2-P300
print "pth ", Pth
#plt.plot(p2,V_a)
#plt.show()
mask_a = p2 > P300
print "now p2 is: ", p2
def findT(T, Pth,rho2):
atompermol = 6.022*(10**23) #at/mol
rho0 = 21.4449 #unit g/cm^3
V_0 = 195*param_d['const']['Natom']/atompermol/rho0*1e24 #unit Ang^3/unitcell
g0 = 2.40
thetaD = 200 #unit K
thetaV = thetaD * np.exp(g0*(1-rho0/rho2))
T0 = 300
fcv = 0.12786/0.12664 #the ratio of Cvmax/DP
kT0 = 1.0/40 #unit eV
unit_conv = 160.217657
return Pth - g0/V_0 * 3 * fcv * kT0 * (T/T0 * debye_fun(thetaV/T) - debye_fun(thetaV/T0)) * unit_conv
#from IPython import embed; embed(); import ipdb; ipdb.set_trace()
print "findT: ", findT(300,Pth,rho2*1e24), findT(3000,Pth,rho2*1e24)
thetaV = 200 * np.exp(2.4*(1-rho0/(rho2*1e24)))
print "thetaV", thetaV
Tfit = np.nan*np.ones(p2.shape)
for ind in range(len(Pth)):
if mask_a[ind]:
T_root = optimize.brentq(findT, a=300,b=10000,args = (Pth[ind],rho2[ind]*1e24),full_output = False)
Tfit[ind] = T_root
#print "Tfit",Tfit
#print "eta",(V_0-V_a)/V_0
#print "p2:",p2
pvt_a = np.vstack((p2,(V_0-V_a)/V_0,Tfit))
print pvt_a.T
plt.plot(Tfit/1000,p2,'rx')
plt.xlim(0,2)
plt.ylim(0,100)
plt.xlabel("temperature [kK]")
plt.ylabel("P [GPa]")
plt.show()
a = V_0*np.linspace(1,0.5,num = 100)
p = press_vinet(a,paramtrue)
plt.plot(p2,V_a,'rx', p,a,'b')
plt.xlim(0,200)
plt.show() | mit |
elhuhdron/emdrp | neon3/data/parseEMdata.py | 1 | 111047 | # The MIT License (MIT)
#
# Copyright (c) 2016 Paul Watkins, National Institutes of Health / NINDS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Generate EM data for cuda-convnet2 and neon.
# Also added "unpackager" routine for recreating output probabilities convnet outputs.
# Data is parsed out of hdf5 files for raw EM data and for labels.
# Each batch is then generated on-demand (in parallel with training previous batch).
# This process should not take more than a few seconds per batch (use verbose flag to print run times).
# The process must be shorter than the GPU batch time, so that the parsing is not the speed bottleneck.
# New feature allows for batches from multiple areas of the dataset, each area (given by size parameter and list of
# "chunk" ranges) is loaded once per batch.
#
# The hdf5 inputs can be C-order or F-order (specified in ini). hdf5 outputs always written in F-order.
import h5py
import numpy as np
from operator import add #, sub
import time
import numpy.random as nr
import os, sys
from configobj import ConfigObj, flatten_errors
from validate import Validator, ValidateError
import random
import pickle as myPickle
import io as myStringIO
# for elastic transform
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
# http://stackoverflow.com/questions/15704010/write-data-to-hdf-file-using-multiprocessing
import multiprocessing as mp
import sharedmem
def handle_hdf5_prob_output(start_queue, done_queue, probs_out, ind, label_names, outpath):
outfile = h5py.File(outpath, 'r+')
while True:
args = start_queue.get()
if args:
for n in range(len(label_names)):
d = probs_out[:,:,:,n].transpose((2,1,0)); dset = outfile[label_names[n]]
dset[ind[0]:ind[0]+d.shape[0],ind[1]:ind[1]+d.shape[1],ind[2]:ind[2]+d.shape[2]] = d
done_queue.put(1)
else:
done_queue.put(1)
break
outfile.close()
def handle_knossos_prob_output(start_queue, done_queue, probs_out, ind, label_names, outpath, strnetid):
while True:
args = start_queue.get()
if args:
curpath = os.path.join(outpath, 'x%04d' % ind[0], 'y%04d' % ind[1], 'z%04d' % ind[2])
try: os.makedirs(curpath)
except: pass
for n in range(len(label_names)):
d = probs_out[:,:,:,n].transpose((2,1,0))
d.tofile(os.path.join(curpath, label_names[n] + strnetid + '.f32'))
done_queue.put(1)
else:
done_queue.put(1)
break
class EMDataParser():
# Constants
# Numpy type to be used for cube subscripts. Throwback to parsing cube on GPU memory, but kept type for numpy.
# Very unlikely to need more than 16 bits per index (also supports loads in imagej from hdf5).
cubeSubType = np.uint16; cubeSubLim = 65536
# optional output file names / dataset names
INFO_FILE = 'batch.info'
OUTPUT_H5_CVIN = 'batch_input_data.h5'
OUTPUT_H5_CVOUT = 'batch_output_data.h5'
PRIOR_DATASET = 'prior_train'
# Where a batch is within these ranges allows for different types of batches to be selected:
# 1 to FIRST_RAND_NOLOOKUP_BATCH-1 are label lookup table randomized examples from all training cubes
# FIRST_RAND_NOLOOKUP_BATCH - FIRST_TILED_BATCH-1 are randomized examples from all training cubes
# FIRST_TILED_BATCH - (batch with max test chunk / zslice) are tiled examples from the rand then test cubes
# or all sequential cubes in chunk list or range mode
FIRST_RAND_NOLOOKUP_BATCH = 100001
FIRST_TILED_BATCH = 200001
# others
NAUGS = 32 # total number of simple augmentations (including reflections in z, 8 for xy augs only)
HDF5_CLVL = 5 # compression level in hdf5
def __init__(self, cfg_file, write_outputs=False, init_load_path='', save_name=None, append_features=False,
chunk_skip_list=[], dim_ordering='', image_in_size=None, isTest=False):
self.cfg_file = cfg_file
self.write_outputs = write_outputs; self.save_name = save_name; self.append_features = append_features
print('EMDataParser: config file ''%s''' % cfg_file)
# retrieve / save options from ini files, see definitions parseEMdata.ini
opts = EMDataParser.get_options(cfg_file)
for k, v in list(opts.items()):
if type(v) is list and k not in ['chunk_skip_list','aug_datasets']:
if len(v)==1:
setattr(self,k,v[0]) # save single element lists as first element
elif len(v)>0 and type(v[0]) is int: # convert the sizes and offsets to numpy arrays
setattr(self,k,np.array(v,dtype=np.int32))
else:
setattr(self,k,v) # store other list types as usual (floats, empties)
else:
setattr(self,k,v)
# Options / Inits
self.isTest = isTest # added this for allowing test/train to use same ini file in chunk_list_all mode
# added in another "sub-mode" of append features to write knossos-style raw outputs instead
# xxx - guh, this has to be set externally due to the many overlapping feature adds / backcompat done here
self.append_features_knossos = False
self.strnetid = ''; # unique integer for different trained nets for use with knossos-style output format
# Previously had these as constants, but moved label data type to ini file and special labels are defined
# depending on the data type.
self.cubeLblType = eval('np.' + self.cubeLblTypeStr)
self.EMPTY_LABEL = np.iinfo(self.cubeLblType).max
#self.ECS_LABEL = np.iinfo(self.cubeLblType).max-1 # xxx - likely not using this, but keep here for now
self.ECS_LABEL = self.EMPTY_LABEL # makes default for ECS not select any ECS no matter how it's labeled
self.EMPTY_PROB = -1.0
# the manner in which the zreslice is defined, define sort from data -> re-order and from re-order -> data.
# only 3 options because data can be automatically augmented to transpose the first two dims (in each "z-slice")
# these orders were chosen because the "unsort" is the same as the "sort" indexing, so re-order->data not needed
if dim_ordering: self.dim_ordering = dim_ordering # allow command line override
if self.dim_ordering=='xyz':
self.zreslice_dim_ordering = [0,1,2]
self.zreslice_dim_ordering_index = 0
elif self.dim_ordering=='xzy':
self.zreslice_dim_ordering = [0,2,1]
self.zreslice_dim_ordering_index = 1
elif self.dim_ordering=='zyx':
self.zreslice_dim_ordering = [2,1,0]
self.zreslice_dim_ordering_index = 2
else:
assert(False) # bad dim_ordering parameter given
# immediately re-order any arguments that need it because of reslice. this prevents from having to do this on
# command line, which ended up being annoying.
# originally reading the hdf5 was done using arguments that were re-ordered on command line, so those needed
# during read are un-re-ordered (back to normal order) in readCubeToBuffers.
# considered changing this, but calculations are more intuitive after the re-order, so left for the sizes
self.size_rand = self.size_rand[self.zreslice_dim_ordering]
self.read_size = self.read_size[self.zreslice_dim_ordering]
self.read_border = self.read_border[self.zreslice_dim_ordering]
if self.nz_tiled < 0: self.nz_tiled = self.size_rand[2]
# initialize for "chunkrange" or "chunklist" mode if these parameters are not empty
self.use_chunk_list = (len(self.chunk_range_beg) > 0); self.use_chunk_range = False
assert( self.use_chunk_list or self.chunk_list_all ) # no chunk_list_all if not chunk_list mode
if self.use_chunk_list:
assert( self.nz_tiled == 0 ) # do not define tiled cube for chunklist mode
self.chunk_range_beg = self.chunk_range_beg.reshape(-1,3); self.chunk_list_index = -1
self.nchunk_list = self.chunk_range_beg.shape[0]
if len(self.chunk_range_end) > 0:
# "chunkrange" mode, chunks are selected based on defined beginning and end of ranges in X,Y,Z
# range is open ended (python-style, end is not included in range)
self.chunk_range_end = self.chunk_range_end.reshape(-1,3);
assert( self.chunk_range_end.shape[0] == self.nchunk_list )
self.chunk_range_index = -1; self.use_chunk_range = True
self.chunk_range_rng = self.chunk_range_end - self.chunk_range_beg
assert( (self.chunk_range_rng >= 0).all() ) # some bad ranges
self.chunk_range_size = self.chunk_range_rng.prod(axis=1)
self.chunk_range_cumsize = np.concatenate((np.zeros((1,),dtype=self.chunk_range_size.dtype),
self.chunk_range_size.cumsum()))
self.chunk_range_nchunks = self.chunk_range_cumsize[-1]
self.nchunks = self.chunk_range_nchunks
else:
# regular chunklist mode, chunk_range_beg just contains the list of the chunks to use
self.nchunks = self.nchunk_list
# this code is shared by defining max number of chunks depending on chunk list or chunk range mode.
# default for the chunk_range_rand is the max number of chunks.
if self.chunk_range_rand < 0: self.chunk_range_rand = self.nchunks
assert( self.chunk_range_rand <= self.nchunks )
# offsets are either per chunk or per range, depending on above mode (whether chunk_range_end empty or not)
if len(self.offset_list) > 0:
self.offset_list = self.offset_list.reshape(-1,3)
assert( self.offset_list.shape[0] == self.nchunk_list )
else:
self.offset_list = np.zeros_like(self.chunk_range_beg)
# create a list for random chunks in chunk_range_rand based on the chunk_skip_list, if provided.
# let command line override definition in ini file.
if len(chunk_skip_list) > 0: self.chunk_skip_list = chunk_skip_list
if len(self.chunk_skip_list) > 0:
mask = np.zeros((self.nchunks,), dtype=np.bool)
mask[:self.chunk_range_rand] = 1
mask[np.array(self.chunk_skip_list, dtype=np.int64)] = 0
self.chunk_rand_list = np.nonzero(mask)[0].tolist()
self.chunk_range_rand = len(self.chunk_rand_list)
# the tiled chunks default to all the chunks.
# if the chunk_skip_list is specified, then the chunk_skip_is_test parameter makes the tiled chunks
# only the chunks that are not rand chunks.
if self.chunk_skip_is_test:
self.chunk_tiled_list = np.nonzero(np.logical_not(mask))[0].tolist()
else:
self.chunk_tiled_list = list(range(self.nchunks))
else:
# in the old mode, typically the test chunks are put at the end of the chunk list,
# and all chunks are in the tiled chunk list.
# this was annoying because a seperate ini file had to be made for each cross-validation.
self.chunk_rand_list = list(range(self.chunk_range_rand))
self.chunk_tiled_list = list(range(self.nchunks))
# xxx - not an easy way not to call initBatches in the beginning without breaking everything,
# so just load the first chunk, if first batch is an incremental chunk rand batch, it should not reload
self.chunk_rand = self.chunk_range_beg[0,:]; self.offset_rand = self.offset_list[0,:]
# print out info for chunklist / chunkrange modes so that input data is logged
print(('EMDataParser: Chunk mode with %d ' % self.nchunk_list) + \
('ranges' if self.use_chunk_range else 'chunks') + \
(' of size %d %d %d:' % tuple(self.size_rand[self.zreslice_dim_ordering].tolist())))
fh = myStringIO.BytesIO()
if self.use_chunk_range:
np.savetxt(fh, np.concatenate((np.arange(self.nchunk_list).reshape((self.nchunk_list,1)),
self.chunk_range_beg, self.chunk_range_end, self.chunk_range_size.reshape((self.nchunk_list,1)),
self.offset_list), axis=1),
fmt='\t(%d) range %d %d %d to %d %d %d (%d chunks), offset %d %d %d',
delimiter='', newline='\n', header='', footer='', comments='')
else:
np.savetxt(fh, np.concatenate((np.arange(self.nchunk_list).reshape((self.nchunk_list,1)),
self.chunk_range_beg, self.offset_list), axis=1), fmt='\t(%d) chunk %d %d %d, offset %d %d %d',
delimiter='', newline='\n', header='', footer='', comments='')
cstr = fh.getvalue(); fh.close(); print(cstr.decode('UTF-8'))
#print '\tchunk_list_rand %d, chunk_range_rand %d' % (self.chunk_list_rand, self.chunk_range_rand)
print('\tchunk_skip_list: ' + str(self.chunk_skip_list))
print('\tchunk_list_all: ' + str(self.chunk_list_all))
# need these for appending features in chunklist mode, otherwise they do nothing
#self.last_chunk_rand = self.chunk_rand; self.last_offset_rand = self.offset_rand
# need special case for first chunk incase there is no next ever loaded (if only one chunk written)
self.last_chunk_rand = None; self.last_offset_rand = None
self.cur_chunk = None # started needing this for chunk_list_all mode
# some calculated values based on constants and input arguments for tiled batches
# allow command line override of image size
if image_in_size: self.image_size = image_in_size
# few checks here on inputs if not checkable by ini specifications
assert( self.nzslices == 1 or self.nzslices == 3 ) # xxx - 1 or 3 (or multiple of 4?) only supported by convnet
# to be certain things don't get off with augmentations, in and out size need to both be even or odd
assert( (self.image_size % 2) == (self.image_out_size % 2) )
assert( self.independent_labels or self.image_out_size == 1 ) # need independent labels for multiple pixels out
assert( not self.no_labels or self.no_label_lookup ) # must have no_label_lookup in no_labels mode
#assert( not self.chunk_list_all or self.no_label_lookup ) # must have no_label_lookup for all chunks loaded
assert( not self.chunk_list_all or not self.write_outputs ) # write_outputs not supported for all chunks loaded
# optionally allow tile_size to specify size for all three orthogonal directions, pick the one we're using
if self.tile_size.size > 3:
self.tile_size_all = self.tile_size.reshape((3,-1))
self.tile_size = self.tile_size_all[self.zreslice_dim_ordering_index,:]
print(('EMDataParser: tile_size %d %d %d' % tuple(self.tile_size.tolist())))
# number of cases per batch should be kept lower than number of rand streams in convnet (128*128 = 16384)
self.num_cases_per_batch = self.tile_size.prod()
self.shape_per_batch = self.tile_size.copy(); self.shape_per_batch[0:2] *= self.image_out_size
# kept this here so I'm not tempted to do it again. tile_size is NOT re-ordered, too confusing that way
#self.shape_per_batch[self.zreslice_dim_ordering[0:2]] *= self.image_out_size
if self.verbose: print("size rand %d %d %d, shape per batch %d %d %d" % \
tuple(np.concatenate((self.size_rand, self.shape_per_batch)).tolist()))
self.size_total = self.size_rand.copy(); self.size_total[2] += self.nz_tiled
assert( ((self.size_total % self.shape_per_batch) == 0).all() )
self.tiles_per_zslice = self.size_rand // self.shape_per_batch
self.pixels_per_image = self.nzslices*self.image_size**2;
self.pixels_per_out_image = self.image_out_size**2;
# need data for slices above and below labels if nzslices > 1, keep data and labels aligned
self.nrand_zslice = self.size_rand[2] + self.nzslices - 1
self.ntiled_zslice = self.nz_tiled + self.nzslices - 1
self.ntotal_zslice = self.nrand_zslice + self.ntiled_zslice
# x/y dims on tiled zslices are the same size as for selecting rand indices
self.size_tiled = np.array((self.size_rand[0], self.size_rand[1], self.nz_tiled), dtype=np.int32)
if self.image_size % 2 == 1:
self.data_slice_size = (self.size_rand[0] + self.image_size - 1, self.size_rand[1] + self.image_size - 1,
self.ntotal_zslice);
else:
self.data_slice_size = (self.size_rand[0] + self.image_size, self.size_rand[1] + self.image_size,
self.ntotal_zslice);
self.labels_slice_size = (self.size_rand[0], self.size_rand[1], self.ntotal_zslice)
# xxx - not sure why I didn't include the nzslice into the labels offset, throwback to old provider only?
self.labels_offset = (self.image_size//2, self.image_size//2, 0)
# these were previously hidden passing to GPU provider, introduce new variables
self.batches_per_zslice = self.tiles_per_zslice[0] * self.tiles_per_zslice[1]
self.num_inds_tiled = self.num_cases_per_batch * self.batches_per_zslice
self.zslices_per_batch = self.tile_size[2]
# there can either be multiple batches per zslice or multiple zslices per batch
assert( (self.batches_per_zslice == 1 and self.zslices_per_batch >= 1) or \
(self.batches_per_zslice > 1 and self.zslices_per_batch == 1) );
self.batches_per_rand_cube = self.nrand_zslice * self.batches_per_zslice // self.zslices_per_batch
self.getLabelMap() # setup for label types, need before any other inits referencing labels
self.segmented_labels_slice_size = tuple(map(add,self.labels_slice_size,
(2*self.segmented_labels_border).tolist()))
#assert( self.segmented_labels_border[0] == self.segmented_labels_border[1] and \
# self.segmented_labels_border[2] == 0 ); # had to add this for use in GPU labels slicing
# because of the affinity graphs, segmented labels can have a border around labels.
# plot and save this way for validation.
self.seg_out_size = self.image_out_size + 2*self.segmented_labels_border[0]
self.pixels_per_seg_out = self.seg_out_size**2
# optional border around "read-size"
# this is used so that densely labeled front-end cubes do not require label merging, instead just don't select
# training examples from around some border of each of these "read-size" cubes.
if (self.read_size < 0).any():
self.read_size = self.size_rand
assert( ((self.size_rand % self.read_size) == 0).all() )
# use the read border to prevent randomized image out patches from going outside of rand size
self.read_border[0:2] += self.image_out_size//2;
# additionally image out patches can be offset from selected pixel (label lookup), so remove this also
# if label lookup is enabled. this randomized offset is used to reduce output patch correlations.
if not self.no_label_lookup:
self.read_border[0:2] += self.image_out_offset//2
self.pixels_per_out_offset = self.image_out_offset**2
# default for label train prior probabilities is uniform for all label types
if type(self.label_priors) is list:
assert( len(self.label_priors) == self.nlabels )
self.initial_label_priors = np.array(self.label_priors,dtype=np.double)
else:
self.initial_label_priors = 1.0/self.nlabels * np.ones((self.nlabels,),dtype=np.double)
# these are used for making the output probability cubes
# xxx - the tiling procedure is confusing, see comments on this in makeTiledIndices
self.output_size = list(self.labels_slice_size)
self.output_size[0] //= self.image_out_size; self.output_size[1] //= self.image_out_size
# for neon output mode that does not actually pickle the output batches
self.batch_outputs = [None] * self.batches_per_rand_cube
self.batch_outputs_ind = 0
# variables containing actual number of convnet outputs depending on label config
self.nclass = self.noutputs if self.independent_labels else self.nIndepLabels
self.oshape = (self.image_out_size, self.image_out_size, self.nIndepLabels)
# augmented data cubes can be presented in parallel with raw EM data
self.naug_data = len(self.aug_datasets)
assert( len(self.aug_mean) >= self.naug_data )
assert( len(self.aug_std) >= self.naug_data )
# Originally these were not arrays, but changed so same code can be used without lots of conditionals to
# support chunk_list_all mode which loads all chunks into system memory at once.
n = self.nchunks if self.chunk_list_all else 1
self.aug_data = [[None]*self.naug_data for i in range(n)]
self.data_cube = [None]*n
self.segmented_labels_cube = [None]*n
self.labels_cube = [None]*n
# need a copy of initial label priors, incase priors needs to be modified because of missing labels
# when creating the rand label lookup (makeRandLabelLookup)
self.label_priors = [self.initial_label_priors.copy() for i in range(n)]
self.inds_label_lookup = [self.nlabels*[None] for i in range(n)]
self.label_lookup_lens = [self.nlabels*[0] for i in range(n)]
# print out all initialized variables in verbose mode
if self.verbose:
tmp = vars(self); #tmp['indep_label_names_out'] = 'removed from print for brevity'
print('EMDataParser, vars after init:\n'); print(tmp)
# other inits
self.rand_priors = self.nlabels * [0.0]; self.tiled_priors = self.nlabels * [0.0]
# the prior to use to reweight exported probabilities.
# training prior is calculated on-the-fly by summing output label targets.
# this is done because for multiple outputs and label selection schemes different from the labels
# label_priors does not give a good indication of the labels that actual end up getting selected
# xxx - might be better to rename label_priors and other priors since they are not acutal used as priors
self.prior_test = np.array(self.prior_test, dtype=np.double)
def initBatches(self, silent=False):
# turns off printouts during runs if in chunklist or chunkrange mode
self.silent = silent
if self.write_outputs:
if not os.path.exists(self.outpath): os.makedirs(self.outpath)
outfile = open(os.path.join(self.outpath, self.INFO_FILE), 'w'); outfile.close(); # truncate
outfile = h5py.File(os.path.join(self.outpath, self.OUTPUT_H5_CVIN), 'w'); outfile.close(); # truncate
if self.chunk_list_all:
# This allows different test and train parsers that both do not have to load all chunks but can still use
# the same ini file.
cl = self.chunk_tiled_list if self.isTest else self.chunk_rand_list
for c in range(len(cl)):
self.setChunkList(c, cl)
self.readCubeToBuffers(cl[c])
self.setupAllLabels(cl[c])
if not self.no_label_lookup: self.makeRandLabelLookup(cl[c])
else:
self.readCubeToBuffers()
self.setupAllLabels()
if not self.no_label_lookup: self.makeRandLabelLookup()
if self.write_outputs: self.enumerateTiledLabels()
# for chunklist or chunkrange modes, the tiled indices do not change, so no need to regenerate them
# xxx - use hasattr for this in a number of spots, maybe change to a single boolean for readability?
if not hasattr(self, 'inds_tiled'): self.makeTiledIndices()
if self.write_outputs: self.writeH5Cubes()
self.makeBatchMeta()
self.silent = False
def makeRandLabelLookup(self, chunkind=0):
#assert( not self.chunk_list_all ) # random batches with lookup not intended for all chunks loaded mode
if not self.silent: print('EMDataParser: Creating rand label lookup for specified zslices')
self.label_priors[chunkind][:] = self.initial_label_priors # incase prior was modified below in chunk mode
max_total_voxels = self.size_rand.prod() # for heuristic below - xxx - rethink this, or add param?
total_voxels = 0
#self.inds_label_lookup = self.nlabels*[None]
for i in range(self.nlabels):
inds = np.transpose(np.nonzero(self.labels_cube[chunkind][:,:,0:self.nrand_zslice] == i))
if inds.shape[0] > 0:
# don't select from end slots for multiple zslices per case
inds = inds[np.logical_and(inds[:,2] >= self.nzslices//2,
inds[:,2] < self.nrand_zslice-self.nzslices//2),:]
inds = self.rand_inds_remove_border(inds)
# if after removing borders a label is missing, do not hard error, just force the prior to zero
# xxx - heuristic for removing labels with very few members, 1/32^3, use param?
if inds.shape[0] == 0 or float(inds.shape[0])/max_total_voxels < 3.0517578125e-05:
if not self.silent: print('EMDataParser: no voxels with label %d forcing prior to zero' % i)
# redistribute current prior amongst remaining nonzero priors
prior = self.label_priors[chunkind][i]; self.label_priors[chunkind][i] = 0
pinds = np.arange(self.nlabels)[self.label_priors[chunkind] > 0]
self.label_priors[chunkind][pinds] += prior/pinds.size
#assert( self.label_priors.sum() - 1.0 < 1e-5 )
inds += self.labels_offset
assert( np.logical_and(inds >= 0, inds < self.cubeSubLim).all() )
self.label_lookup_lens[chunkind][i] = inds.shape[0]; total_voxels += inds.shape[0]
self.inds_label_lookup[chunkind][i] = inds.astype(self.cubeSubType, order='C')
if self.write_outputs:
outfile = h5py.File(os.path.join(self.outpath, self.OUTPUT_H5_CVIN), 'a');
outfile.create_dataset('inds_label_lookup_%d' % i,data=inds,compression='gzip',
compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
outfile.close();
else:
# if a label is missing, must specify label priors on command line to handle this.
# xxx - maybe do the same as above for this, just remove and redistribute this prior?
if not self.silent: print('EMDataParser: no voxels with label %d' % i)
assert(self.label_priors[chunkind][i] == 0) # prior must be zero if label is missing
assert(total_voxels > 0);
self.rand_priors = [float(x)/total_voxels for x in self.label_lookup_lens[chunkind]]
if self.write_outputs:
outfile = open(os.path.join(self.outpath, self.INFO_FILE), 'a')
outfile.write('\nTotal voxels included for random batches %u\n' % (total_voxels,))
for i in range(self.nlabels):
outfile.write('label %d %s percentage of voxels = %.8f , count = %d, use prior %.8f\n' %\
(i,self.label_names[i],self.rand_priors[i],self.label_lookup_lens[0][i],self.label_priors[0][i]))
outfile.write('Sum percentage of allowable rand voxels = %.3f\n' % sum(self.rand_priors))
outfile.close();
# border is used to not select training examples from areas of "read-size" cubes between which the labels are
# potentially not consistent (because they were densely labeled separately).
def rand_inds_remove_border(self, inds):
nread_cubes = (self.size_rand // self.read_size)
for d in range(3):
bmin = self.read_border[d]; inds = inds[inds[:,d] >= bmin,:]
for i in range(1,nread_cubes[d]):
bmin = i*self.read_size[d] - self.read_border[d]; bmax = i*self.read_size[d] + self.read_border[d];
inds = inds[np.logical_or(inds[:,d] < bmin, inds[:,d] >= bmax),:]
bmax = self.size_rand[d] - self.read_border[d]; inds = inds[inds[:,d] < bmax,:]
return inds
def enumerateTiledLabels(self):
if not self.silent: print('EMDataParser: Enumerating tiled labels (for prior probabilities)')
#total_voxels = self.size_tiled.prod() # because of potential index selects or missing labels, sum instead
tiled_count = self.nlabels * [0]; total_voxels = 0
for i in range(self.nlabels):
inds = np.transpose(np.nonzero(self.labels_cube[0][:,:,self.nrand_zslice:self.ntotal_zslice] == i))
if inds.shape[0] > 0:
# don't select from end slots for multiple zslices per case
inds = inds[np.logical_and(inds[:,2] >= self.nzslices//2,
inds[:,2] < self.ntiled_zslice-self.nzslices//2),:]
tiled_count[i] += inds.shape[0]; total_voxels += inds.shape[0]
outfile = open(os.path.join(self.outpath, self.INFO_FILE), 'a')
outfile.write('\nTotal voxels included for tiled %u\n' % (total_voxels,))
if total_voxels > 0:
self.tiled_priors = [float(x)/total_voxels for x in tiled_count]
for i in range(self.nlabels):
outfile.write('label %d %s percentage of voxels = %.8f , count = %d, use prior %.8f\n' \
% (i,self.label_names[i],self.tiled_priors[i],tiled_count[i],self.label_priors[0][i]))
outfile.write('Sum percentage of allowable tiled voxels = %.3f\n\n' % sum(self.tiled_priors))
# priors again for copy / paste convenience (if using in convnet param file)
outfile.write('Priors train: %s\n' % ','.join('%.8f' % i for i in self.label_priors[0]))
if total_voxels > 0:
outfile.write('Priors test: %s\n' % ','.join('%.8f' % i for i in self.tiled_priors))
if not self.no_label_lookup:
outfile.write('Priors rand: %s\n\n' % ','.join('%.8f' % i for i in self.rand_priors))
# other useful info (for debugging / validating outputs)
outfile.write('data_shape %dx%dx%d ' % self.data_cube[0].shape)
outfile.write('labels_shape %dx%dx%d\n' % self.labels_cube[0].shape)
outfile.write('num_rand_zslices %d, num_tiled_zslices %d, zslice size %dx%d\n' %\
(self.size_rand[2], self.nz_tiled, self.size_rand[0], self.size_rand[1]))
outfile.write('num_cases_per_batch %d, tiles_per_zslice %dx%dx%d\n' %\
(self.num_cases_per_batch, self.tiles_per_zslice[0], self.tiles_per_zslice[1],
self.tiles_per_zslice[2]))
outfile.write('image_out_size %d, tile_size %dx%dx%d, shape_per_batch %dx%dx%d\n' %\
(self.image_out_size, self.tile_size[0], self.tile_size[1], self.tile_size[2],
self.shape_per_batch[0], self.shape_per_batch[1], self.shape_per_batch[2]))
outfile.close()
def makeTiledIndices(self):
# xxx - realized that after writing it this way, just evenly dividing the total number of pixels per zslice
# would have also probably worked fine. this code is a bit confusing, but it works. basically just makes it
# so that each batch is a rectangular tile of a single zslice, instead of some number of pixels in the zslice.
# this method has also been extended for the case of image_out patches to get multiple z-slices per batch.
# the method is quite confusing, but it is working so leaving it as is for now, might consider revising this.
if not self.silent: print('EMDataParser: Creating tiled indices (typically for test and writing outputs)')
# create the indices for the tiled output - multiple tiles per zslice
# added the z dimension into the indices so multiple outputs we can have multiple z-slices per batch.
# tile_size is the shape in image output patches for each batch
# shape_per_batch is the shape in voxels for each batch
# if more than one tile fits in a zslice, the first two dimensions of tiles_per_zslice gives this shape.
# if one tile is a single zslice, then the third dim of tiles_per_zslice gives number of zslices in a batch.
# swapped the dims so that z is first (so it changes the slowest), need to re-order below when tiling.
inds_tiled = np.zeros((3,self.tile_size[2],self.tiles_per_zslice[1],self.tiles_per_zslice[0],
self.tile_size[0],self.tile_size[1]), dtype=self.cubeSubType, order='C')
for x in range(self.tiles_per_zslice[0]):
xbeg = self.labels_offset[0] + x*self.shape_per_batch[0] + self.image_out_size//2
for y in range(self.tiles_per_zslice[1]):
ybeg = self.labels_offset[1] + y*self.shape_per_batch[1] + self.image_out_size//2
inds = np.require(np.mgrid[0:self.shape_per_batch[2], xbeg:(xbeg+self.shape_per_batch[0]):\
self.image_out_size, ybeg:(ybeg+self.shape_per_batch[1]):self.image_out_size], requirements='C')
assert( np.logical_and(inds >= 0, inds < self.cubeSubLim).all() )
inds_tiled[:,:,y,x,:,:] = inds # yes, dims are swapped, this is correct (meh)
# unswap the dims, xxx - again, maybe this should be re-written in a simpler fashion?
self.inds_tiled = inds_tiled.reshape((3,self.num_inds_tiled))[[1,2,0],:]
# create another copy that is used for generating the output probabilities (used to be unpackager).
# this is also confusing using this method of tiling, see comments above.
# xxx - the transpose is a throwback to how it was written previously in unpackager.
# could change subscript order here and in makeOutputCubes, did not see a strong need for this, view is fine
self.inds_tiled_out = self.inds_tiled.copy().T
self.inds_tiled_out[:,0] -= self.labels_offset[0]; self.inds_tiled_out[:,1] -= self.labels_offset[1];
self.inds_tiled_out[:,0:2] //= self.image_out_size
# xxx - these are from the old unpackager, should be self-consistent now, so removed this assert
#assert( ((self.inds_tiled_out[:,0] >= 0) & (self.inds_tiled_out[:,0] < self.labels_slice_size[0]) & \
# (self.inds_tiled_out[:,1] >= 0) & (self.inds_tiled_out[:,1] < self.labels_slice_size[1]) & \
# (self.inds_tiled_out[:,2] >= 0)).all() )
#assert( self.batches_per_zslice*self.num_cases_per_batch == self.inds_tiled_out.shape[0] )
if self.write_outputs:
outfile = h5py.File(os.path.join(self.outpath, self.OUTPUT_H5_CVIN), 'a');
outfile.create_dataset('tiled_indices',(3,self.tiles_per_zslice[0]*self.tile_size[0],
self.tiles_per_zslice[1]*self.tile_size[1]*self.tile_size[2]),data=inds_tiled,compression='gzip',
compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
outfile.close();
def writeH5Cubes(self):
print('EMDataParser: Exporting raw data / labels to hdf5 for validation at "%s"' % (self.outpath,))
outfile = h5py.File(os.path.join(self.outpath, self.OUTPUT_H5_CVIN), 'a');
outfile.create_dataset('data',data=self.data_cube[0].transpose((2,1,0)),
compression='gzip', compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
# copy the attributes over
for name,value in list(self.data_attrs.items()):
outfile['data'].attrs.create(name,value)
if self.labels_cube[0].size > 0:
outfile.create_dataset('labels',data=self.labels_cube[0].transpose((2,1,0)),
compression='gzip', compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
outfile.create_dataset('segmented_labels',data=self.segmented_labels_cube[0].transpose((2,1,0)),
compression='gzip', compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
for name,value in list(self.labels_attrs.items()):
outfile['segmented_labels'].attrs.create(name,value)
outfile.close();
# this is the main interface method for fetching batches from the memory cached chunk from the hdf5 file.
# in normal mode this fetches batches that have been loaded to memory already (see initBatches).
# in chunklist mode, this can initiate loading a new chunk from a separate location in the hdf5 file.
# the total processing time here needs to be kept under the network batch time, as the load happens in parallel.
#def getBatch(self, batchnum, plot_outputs=False, tiledAug=0, do_preprocess=True):
def getBatch(self, batchnum, plot_outputs=False, tiledAug=0):
t = time.time()
# allocate batch
data = np.zeros((self.pixels_per_image, self.num_cases_per_batch), dtype=np.single, order='C')
aug_data = [None] * self.naug_data
for i in range(self.naug_data):
aug_data[i] = np.zeros((self.pixels_per_image, self.num_cases_per_batch), dtype=np.single, order='C')
if self.no_labels or self.zero_labels:
labels = np.zeros((0, self.num_cases_per_batch), dtype=np.single, order='C')
seglabels = np.zeros((0, self.num_cases_per_batch), dtype=self.cubeLblType, order='C')
else:
labels = np.zeros((self.noutputs, self.num_cases_per_batch), dtype=np.single, order='C')
seglabels = np.zeros((self.pixels_per_seg_out, self.num_cases_per_batch), dtype=self.cubeLblType, order='C')
# get data, augmented data and labels depending on batch type
if batchnum >= self.FIRST_TILED_BATCH:
augs = [tiledAug]
self.getTiledBatch(data,aug_data,labels,seglabels,batchnum,tiledAug)
elif batchnum >= self.FIRST_RAND_NOLOOKUP_BATCH:
augs = self.generateRandNoLookupBatch(data,aug_data,labels,seglabels)
else:
augs = self.generateRandBatch(data,aug_data,labels,seglabels)
# option to return zero labels, need this when convnet is expecting labels but not using them.
# this is useful for dumping features over a large area that does not contain labels.
if self.zero_labels:
labels = np.zeros((self.noutputs, self.num_cases_per_batch), dtype=np.single, order='C')
# replaced preprocessing with scalar mean subtraction and scalar std division.
# For means: < 0 and >= -1 for mean over batch, < -1 for mean over current loaded chunk, 0 to do nothing
# For stds: <= 0 and >= -1 for std over batch, < -1 for std over current loaded chunk, 1 to do nothing
if not plot_outputs:
data -= self.EM_mean if self.EM_mean >= 0 else data.mean()
data /= self.EM_std if self.EM_std > 0 else data.std()
for i in range(self.naug_data):
aug_data[i] -= self.aug_mean[i] if self.aug_mean[i] >= 0 else aug_data[i].mean()
aug_data[i] /= self.aug_std[i] if self.aug_std[i] > 0 else aug_data[i].std()
if self.verbose and not self.silent:
print('EMDataParser: Got batch ', batchnum, ' (%.3f s)' % (time.time()-t,))
# xxx - add another parameter here for different plots?
if plot_outputs: self.plotDataLbls(data,labels,seglabels,augs,pRand=(batchnum < self.FIRST_TILED_BATCH))
#if plot_outputs: self.plotData(data,dataProc,batchnum < self.FIRST_TILED_BATCH)
#time.sleep(5) # useful for "brute force" memory leak debug
#return data, labels
return ([data] if self.no_labels else [data, labels]) + aug_data
def generateRandBatch(self,data,aug_data,labels,seglabels):
assert( not self.no_labels )
if self.use_chunk_list: self.randChunkList() # load a new cube in chunklist or chunkrange modes
# pick labels that will be used to select images to present
#lbls = nr.choice(self.nlabels, (self.num_cases_per_batch,), p=self.label_priors)
augs = np.bitwise_and(nr.choice(self.NAUGS, self.num_cases_per_batch), self.augs_mask)
if self.no_label_lookup:
assert(False) # never implemented balanced randomized batch creation without label lookup
#inds, chunks = self.generateRandNoLookupInds(factor=10)
## generate an inds label lookup on the fly
#inds_label_lookup = [None]*self.nlabels
#for i in range(self.nlabels):
# inds_label_lookup[i] = np.transpose(np.nonzero(self.labels_cube[0][:,:,0:self.nrand_zslice] == i))
else:
# randomize the chunks were are presented also if all chunks are loaded
if self.chunk_list_all:
cl = self.chunk_rand_list; ncl = self.chunk_range_rand
chunks = nr.choice(ncl, (self.num_cases_per_batch,))
# need special label creation here incase priors needed changing for a chunk because
# one of the label types was missing (for example ECS in a low ECS dataset).
lbls = np.zeros((self.num_cases_per_batch,), dtype=np.int64)
for c, chunk in zip(list(range(ncl)), cl):
sel = (chunks == c); n = sel.sum(dtype=np.int64)
lbls[chunks==c] = nr.choice(self.nlabels, (n,), p=self.label_priors[chunk])
else:
cl = [0]; ncl = 1
chunks = np.zeros((self.num_cases_per_batch,), dtype=np.int64)
lbls = nr.choice(self.nlabels, (self.num_cases_per_batch,), p=self.label_priors[0])
# generate any possible random choices for each label type and chunk, will not use all, for efficiency
inds_lbls = np.zeros((self.nlabels, ncl, self.num_cases_per_batch), dtype=np.uint64)
for c, chunk in zip(list(range(ncl)), cl):
for i in range(self.nlabels):
if self.label_lookup_lens[chunk][i]==0: continue # do not attempt to select labels if there are none
inds_lbls[i,c,:] = nr.choice(self.label_lookup_lens[chunk][i], self.num_cases_per_batch)
# generate a random offset from the selected location.
# this prevents the center pixel from being the pixel that is used to select the image every time and
# reduces correlations in label selection priors between the center and surrounding
# image out patch labels. total possible range of offset comes from image_out_offset parameter.
# an offset (rand range) larger than image_out_size can help reduce corrleations further.
# an offset parameter of 1 causes offset to always be zero and so selection is only based on center pixel.
offset = np.zeros((self.num_cases_per_batch,3), dtype=self.cubeSubType)
offset[:,0:2] = np.concatenate([x.reshape(self.num_cases_per_batch,1) for x in np.unravel_index(\
nr.choice(self.pixels_per_out_offset, (self.num_cases_per_batch,)),
(self.image_out_offset, self.image_out_offset))], axis=1) - self.image_out_offset//2
for imgi in range(self.num_cases_per_batch):
chunk = cl[chunks[imgi]]
inds = self.inds_label_lookup[chunk][lbls[imgi]][inds_lbls[lbls[imgi],chunks[imgi],imgi],:] + offset[imgi,:]
self.getAllDataAtPoint(inds,data,aug_data,imgi,augs[imgi],chunk=chunk)
self.getLblDataAtPoint(inds,labels[:,imgi],seglabels[:,imgi],augs[imgi],chunk=chunk)
self.tallyTrainingPrior(labels)
return augs
def generateRandNoLookupBatch(self,data,aug_data,labels,seglabels):
# load a new cube in chunklist or chunkrange modes
if self.use_chunk_list and not self.chunk_list_all: self.randChunkList()
inds, chunks = self.generateRandNoLookupInds()
augs = np.bitwise_and(nr.choice(self.NAUGS, self.num_cases_per_batch), self.augs_mask)
for imgi in range(self.num_cases_per_batch):
self.getAllDataAtPoint(inds[imgi,:],data,aug_data,imgi,augs[imgi],chunk=chunks[imgi])
if not self.no_labels:
self.getLblDataAtPoint(inds[imgi,:],labels[:,imgi],seglabels[:,imgi],augs[imgi],chunk=chunks[imgi])
if not self.no_labels and not self.zero_labels: self.tallyTrainingPrior(labels)
return augs
def generateRandNoLookupInds(self, factor=2):
# generate random indices from anywhere in the rand cube
if self.no_labels:
size = self.size_rand; offset = self.labels_offset
else:
# xxx - bug was here originally up until K0057, 29 Mar 2017, was missing +1
# plus one since difference of zero actually means no choice of placement after border removed
# (so dimension sized 1), difference of 1 means 2 posible choices, etc
size = self.size_rand - 2*self.read_border + 1
offset = self.labels_offset + self.read_border
nrand_inds = factor*self.num_cases_per_batch
#print(size, self.labels_offset, self.read_border)
inds = np.concatenate([x.reshape((nrand_inds,1)) for x in np.unravel_index(nr.choice(size.prod(),
nrand_inds), size)], axis=1) + offset
# don't select from end slots for multiple zslices per case
inds = inds[np.logical_and(inds[:,2] >= self.nzslices//2, inds[:,2] < self.nrand_zslice-self.nzslices//2),:]
# randomize the chunks were are presented also if all chunks are loaded
if self.chunk_list_all:
chunks = nr.choice(self.chunk_rand_list, (self.num_cases_per_batch,))
else:
chunks = np.zeros((self.num_cases_per_batch,), dtype=np.int64)
return inds, chunks
def tallyTrainingPrior(self, labels):
if 'prior_train_count' not in self.batch_meta: return
# training label counts for calculating prior are allocated in convnet layers.py harness
# so that they can be stored in the convnet checkpoints.
self.batch_meta['prior_total_count'] += self.num_cases_per_batch
if self.independent_labels:
self.batch_meta['prior_train_count'] += labels.astype(np.bool).sum(axis=1)
else:
cnts,edges = np.histogram(labels.astype(np.int32), bins=list(range(0,self.nlabels+1)), range=(0,self.nlabels))
self.batch_meta['prior_train_count'] += cnts
def getTiledBatchOffset(self, batchnum, setChunkList=False):
assert( batchnum >= self.FIRST_TILED_BATCH ) # this is only for tiled batches
# these conversions used to be in data.cu for GPU data provider
batchOffset = batchnum - self.FIRST_TILED_BATCH
# for chunklist mode, the batch also determines which chunk we are in. need to reload if moving to new chunk
if self.use_chunk_list:
chunk = batchOffset // self.batches_per_rand_cube; batchOffset %= self.batches_per_rand_cube
# xxx - moved this here so don't need this requirement for rand, doesn't matter b/c randomly selected.
# it is possible to fix this for tiled, but doesn't seem necessary.
# if it's fixed need to decide what are the chunks... still easier to have them as actual Knossos-sized
# chunks and not as defined by size_rand, then have to change how chunk_range_index is incremented
# xxx - dealt with this in a hacky way below in setChunkList for chunk_range mode
#assert( not self.use_chunk_range or (self.size_rand == self.chunksize).all() )
# draw from the list of tiled chunks only, set in init depending on parameters.
if setChunkList: self.setChunkList(chunk, self.chunk_tiled_list)
else:
chunk = None
return batchOffset, chunk
def getTiledBatch(self, data,aug_data,labels,seglabels, batchnum, aug=0):
batchOffset, chunk = self.getTiledBatchOffset(batchnum, setChunkList=True)
# get index and zslice. same method for regular or use_chunk_list modes.
ind0 = (batchOffset % self.batches_per_zslice)*self.num_cases_per_batch
zslc = batchOffset // self.batches_per_zslice * self.zslices_per_batch + self.nzslices//2;
assert( zslc < self.ntotal_zslice ) # usually fails then specified tiled batch is out of range of cube
inds = np.zeros((3,),dtype=self.cubeSubType)
chunk = self.cur_chunk if self.chunk_list_all else 0
for imgi in range(self.num_cases_per_batch):
inds[:] = self.inds_tiled[:,ind0 + imgi]; inds[2] += zslc
self.getAllDataAtPoint(inds,data,aug_data,imgi,aug,chunk=chunk)
self.getLblDataAtPoint(inds,labels[:,imgi],seglabels[:,imgi],aug,chunk=chunk)
# set to a specific chunk, re-initBatches if the new chunk is different from the current one
def setChunkList(self, chunk, chunk_list):
# should only be called from chunklist or chunkrange modes
assert(chunk >= 0 and chunk < len(chunk_list)) # usually fails when tiled batch is out of range of chunks
chunk = chunk_list[chunk] # chunk looks up in appropriate list (for rand or tiled chunks), set by ini
if self.use_chunk_range:
self.chunk_list_index = np.nonzero(chunk >= self.chunk_range_cumsize)[0][-1]
self.chunk_range_index = chunk - self.chunk_range_cumsize[self.chunk_list_index]
if (self.size_rand == self.chunksize).all():
# original mode
chunk_rand = np.unravel_index(self.chunk_range_index, self.chunk_range_rng[self.chunk_list_index,:]) \
+ self.chunk_range_beg[self.chunk_list_index,:]
else:
assert(False) # xxx - this hack is dangerous for debugging normal use, so comment this if needed
# this is something of a hack to allow for batches larger than chunksize
scale = self.size_rand // self.chunksize
chunk_rand = np.unravel_index(self.chunk_range_index, self.chunk_range_rng[self.chunk_list_index,:]) \
* scale + self.chunk_range_beg[self.chunk_list_index,:]
offset_rand = self.offset_list[self.chunk_list_index,:]
else:
self.chunk_list_index = chunk
chunk_rand = self.chunk_range_beg[chunk,:]; offset_rand = self.offset_list[chunk,:]
self.cur_chunk = chunk # started needing this for chunk_list_all mode
# compare with actual chunks and offsets here instead of index to avoid loading the first chunk twice
if (chunk_rand != self.chunk_rand).any() or (offset_rand != self.offset_rand).any():
if self.last_chunk_rand is None:
# special case incase there is no next chunk ever loaded (only one chunk being written)
self.last_chunk_rand = chunk_rand; self.last_offset_rand = offset_rand;
else:
self.last_chunk_rand = self.chunk_rand; self.last_offset_rand = self.offset_rand;
self.chunk_rand = chunk_rand; self.offset_rand = offset_rand;
if not self.chunk_list_all: self.initBatches(silent=not self.verbose)
elif self.last_chunk_rand is None:
# special case incase there is no next chunk ever loaded (only one chunk being written)
self.last_chunk_rand = chunk_rand; self.last_offset_rand = offset_rand;
def randChunkList(self):
assert( self.chunk_range_rand > 0 ) # do not request rand chunk with zero range
# should only be called from chunklist or chunkrange modes
# xxx - randomizing chunks performed very poorly, so removed in favor of chunk_list_all mode
#if self.chunk_list_rand:
# nextchunk = random.randrange(self.chunk_range_rand)
#else:
if self.use_chunk_range: nextchunk = (self.chunk_range_index+1) % self.chunk_range_rand
else: nextchunk = (self.chunk_list_index+1) % self.chunk_range_rand
# draw from the list of random chunks only, set in init depending on parameters.
self.setChunkList(nextchunk, self.chunk_rand_list)
def getAllDataAtPoint(self,inds,data,aug_data,imgi,aug=0,chunk=0):
self.getImgDataAtPoint(self.data_cube[chunk],inds,data[:,imgi],aug)
for i in range(self.naug_data):
self.getImgDataAtPoint(self.aug_data[chunk][i],inds,aug_data[i][:,imgi],aug)
def getImgDataAtPoint(self,data_cube,inds,data,aug):
# don't simplify this... it's integer math
selx = slice(inds[0]-self.image_size//2,inds[0]-self.image_size//2+self.image_size)
sely = slice(inds[1]-self.image_size//2,inds[1]-self.image_size//2+self.image_size)
selz = slice(inds[2]-self.nzslices//2,inds[2]-self.nzslices//2+self.nzslices)
#print data_cube[selx,sely,selz].shape, inds
data[:] = EMDataParser.augmentData(data_cube[selx,sely,selz].astype(np.single),
aug).transpose(2,0,1).flatten('C') # z last because channel data must be contiguous for convnet
def getLblDataAtPoint(self,inds,labels,seglabels,aug=0,chunk=0):
if labels.size == 0: return
# some conditions can not happen here, and this should have been asserted in getLabelMap
if not self.independent_labels:
assert( self.noutputs == 1 ) # just make sure
# image out size has to be one in this case, just take the center label
# the image in and out size must both be even or odd for this to work (asserted in init)
indsl = inds - self.labels_offset; labels[:] = self.labels_cube[chunk][indsl[0],indsl[1],indsl[2]]
seglabels[:] = self.segmented_labels_cube[chunk][indsl[0],indsl[1],indsl[2]]
else:
# NOTE from getLabelMap for border: make 3d for (convenience) in ortho reslice code, but always need same
# in xy dir and zero in z for lbl slicing. xxx - maybe too confusing, fix to just use scalar?
b = self.segmented_labels_border; indsl = inds - self.labels_offset + b
# don't simplify this... it's integer math
selx = slice(indsl[0]-self.seg_out_size//2,indsl[0]-self.seg_out_size//2+self.seg_out_size)
sely = slice(indsl[1]-self.seg_out_size//2,indsl[1]-self.seg_out_size//2+self.seg_out_size)
lbls = EMDataParser.augmentData(self.segmented_labels_cube[chunk][selx,sely,indsl[2]].reshape((\
self.seg_out_size,self.seg_out_size,1)),aug,order=0).reshape((self.seg_out_size,self.seg_out_size))
seglabels[:] = lbls.flatten('C')
# xxx - currently affinity labels assume a single pixel border around the segmented labels only.
# put other views here if decide to expand label border for selection (currently does not seem neccessary)
# also see note above, might be better to make segmented_labels_border a scalar
lblscntr = lbls[b[0]:-b[0],b[1]:-b[1]] if b[0] > 0 else lbls
# change the view on the output for easy assignment
lblsout = labels.reshape(self.oshape)
# this code needs to be consistent with (independent) label meanings defined in getLabelMap
if self.label_type == 'ICSorOUT':
if self.image_out_size==1:
# need at least two outputs for convnet (even if independent)
lblsout[lblscntr > 0,0] = 1; # ICS
lblsout[lblscntr == 0,1] = 1; # OUT
else:
lblsout[lblscntr > 0,0] = 1; # ICS
elif self.label_type == 'ICSorECSorMEM':
lblsout[np.logical_and(lblscntr > 0,lblscntr != self.ECS_label_value),0] = 1; # ICS
lblsout[lblscntr == self.ECS_label_value,1] = 1; # ECS
lblsout[lblscntr == 0,2] = 1; # MEM
elif self.label_type == 'ICSorECS':
lblsout[np.logical_and(lblscntr > 0,lblscntr != self.ECS_label_value),0] = 1; # ICS
lblsout[lblscntr == self.ECS_label_value,1] = 1; # ECS
elif self.label_type == 'ICSorMEM':
lblsout[np.logical_and(lblscntr > 0,lblscntr != self.ECS_label_value),0] = 1; # ICS
lblsout[lblscntr == 0,1] = 1; # MEM
elif self.label_type == 'affin2':
isICS = (lblscntr > 0)
lblsout[np.logical_and(isICS,np.diff(lbls[1:,1:-1],1,0) == 0),0] = 1;
lblsout[np.logical_and(isICS,np.diff(lbls[1:-1,1:],1,1) == 0),1] = 1;
elif self.label_type == 'affin4':
diff0 = np.diff(lbls[1:,1:-1],1,0)==0; diff1 = np.diff(lbls[1:-1,1:],1,1)==0
# affinities for ICS voxels
isICS = np.logical_and(lblscntr > 0, lblscntr != self.ECS_label_value)
lblsout[np.logical_and(isICS,diff0),0] = 1; lblsout[np.logical_and(isICS,diff1),1] = 1;
# affinities for ECS voxels
isECS = (lblscntr == self.ECS_label_value)
lblsout[np.logical_and(isECS,diff0),2] = 1; lblsout[np.logical_and(isECS,diff1),3] = 1;
elif self.label_type == 'affin6':
diff0 = np.diff(lbls[1:,1:-1],1,0)==0; diff1 = np.diff(lbls[1:-1,1:],1,1)==0
# affinities for ICS voxels
isICS = np.logical_and(lblscntr > 0, lblscntr != self.ECS_label_value)
lblsout[np.logical_and(isICS,diff0),0] = 1; lblsout[np.logical_and(isICS,diff1),1] = 1;
# affinities for ECS voxels
isECS = (lblscntr == self.ECS_label_value)
lblsout[np.logical_and(isECS,diff0),2] = 1; lblsout[np.logical_and(isECS,diff1),3] = 1;
# affinities for MEM voxels
isMEM = (lblscntr == 0)
lblsout[np.logical_and(isMEM,diff0),4] = 1; lblsout[np.logical_and(isMEM,diff1),5] = 1;
# originally this was single function for loading em data and labels.
# split into reading of labels and reading of data so that extra data can be read, i.e., augmented data
#
# Comments from original function regarding how data is loading to support reslices and C/F order:
# xxx - might think of a better way to "reslice" the dimensions later, for now, here's the method:
# read_direct requires the same size for the numpy array as in the hdf5 file. so if we're re-ordering the dims:
# (1) re-order the sizes to allocate here as if in original xyz order.
# (2) re-order the dims and sizes used in the *slices_from_indices functions into original xyz order.
# chunk indices are not changed.
# (3) at the end of this function re-order the data and labels into the specified dim ordering
# (4) the rest of the packager is then blind to the reslice dimension ordering
# NOTE ORIGINAL: chunk indices should be given in original hdf5 ordering.
# all other command line arguments should be given in the re-ordered ordering.
# the C/F order re-ordering needs to be done nested inside the reslice re-ordering
# NEW NOTE: had the re-ordering of command line inputs for reslice done automatically, meaning all inputs on
# command line should be given in original ordering, but they are re-ordered in re-slice order in init, so
# un-re-order here to go back to original ordering again (minimal overhead, done to reduce debug time).
#
# ulimately everything is accessed as C-order, but support loading from F-order hdf5 inputs.
# h5py requires that for read_direct data must be C order and contiguous. this means F-order must be dealt with
# "manually". for F-order the cube will be in C-order, but shaped like F-order, and then the view
# transposed back to C-order so that it's transparent in the rest of the code.
def readCubeToBuffers(self, chunkind=0):
if not self.silent: print('EMDataParser: Buffering data and labels chunk %d,%d,%d offset %d,%d,%d' % \
(self.chunk_rand[0], self.chunk_rand[1], self.chunk_rand[2],
self.offset_rand[0], self.offset_rand[1], self.offset_rand[2]))
c = chunkind
assert( c==0 or self.chunk_list_all ) # sanity check
self.data_cube[c], self.data_attrs, self.chunksize, self.datasize = \
self.loadData( self.data_cube[c], self.imagesrc, self.dataset )
self.segmented_labels_cube[c], self.label_attrs = self.loadSegmentedLabels(self.segmented_labels_cube[c])
# load augmented data cubes
for i in range(self.naug_data):
self.aug_data[c][i], data_attrs, chunksize, datasize = \
self.loadData( self.aug_data[c][i], self.augsrc, self.aug_datasets[i] )
if not self.silent: print('\tbuffered aug data ' + self.aug_datasets[i])
def loadData(self, data_cube, fname, dataset):
data_size = list(self.data_slice_size[i] for i in self.zreslice_dim_ordering)
size_rand = self.size_rand[self.zreslice_dim_ordering]; size_tiled = self.size_tiled[self.zreslice_dim_ordering]
if self.verbose and not self.silent: print('data slice size ' + str(self.data_slice_size) + \
' data size ' + str(data_size) + ' size rand ' + str(size_rand) + ' size tiled ' + str(size_tiled))
hdf = h5py.File(fname,'r')
if data_cube is None:
# for chunkrange / chunklist mode, this function is recalled, don't reallocate in this case
if self.hdf5_Corder:
data_cube = np.zeros(data_size, dtype=hdf[dataset].dtype, order='C')
else:
data_cube = np.zeros(data_size[::-1], dtype=hdf[dataset].dtype, order='C')
else:
# change back to the original view (same view changes as below, opposite order)
# zreslice un-re-ordering, so data is in original view in this function
data_cube = data_cube.transpose(self.zreslice_dim_ordering)
# the C/F order re-ordering needs to be done nested inside the reslice re-ordering
if not self.hdf5_Corder:
data_cube = data_cube.transpose(2,1,0)
# slice out the data hdf
ind = self.get_hdf_index_from_chunk_index(hdf[dataset], self.chunk_rand, self.offset_rand)
slc,slcd = self.get_data_slices_from_indices(ind, size_rand, data_size, False)
hdf[dataset].read_direct(data_cube, slc, slcd)
if self.nz_tiled > 0:
ind = self.get_hdf_index_from_chunk_index(hdf[dataset], self.chunk_tiled, self.offset_tiled)
slc,slcd = self.get_data_slices_from_indices(ind, size_tiled, data_size, True)
hdf[dataset].read_direct(data_cube, slc, slcd)
data_attrs = {}
for name,value in list(hdf[dataset].attrs.items()): data_attrs[name] = value
# xxx - this is only used for chunkrange mode currently, likely item to rethink...
chunksize = np.array(hdf[dataset].chunks, dtype=np.int64)
datasize = np.array(hdf[dataset].shape, dtype=np.int64) # not currently used
hdf.close()
# calculate mean and std over all of the data cube
#mean = float(data_cube.mean(dtype=np.float64)); std = float(data_cube.std(dtype=np.float64))
# the C/F order re-ordering needs to be done nested inside the reslice re-ordering
if not self.hdf5_Corder:
data_cube = data_cube.transpose(2,1,0)
chunksize = chunksize[::-1]; datasize = datasize[::-1]
# zreslice re-ordering, so data is in re-sliced order view outside of this function
data_cube = data_cube.transpose(self.zreslice_dim_ordering)
chunksize = chunksize[self.zreslice_dim_ordering]; datasize = datasize[self.zreslice_dim_ordering]
if self.verbose and not self.silent:
print('After re-ordering ' + fname + ' ' + dataset + ' data cube shape ' + str(data_cube.shape))
return data_cube, data_attrs, chunksize, datasize
def loadSegmentedLabels(self, segmented_labels_cube):
if self.no_labels: seglabels_size = [0, 0, 0]
else: seglabels_size = list(self.segmented_labels_slice_size[i] for i in self.zreslice_dim_ordering)
size_rand = self.size_rand[self.zreslice_dim_ordering]; size_tiled = self.size_tiled[self.zreslice_dim_ordering]
if self.verbose and not self.silent: print('seglabels size ' + str(seglabels_size) + \
' size rand ' + str(size_rand) + ' size tiled ' + str(size_tiled))
if segmented_labels_cube is None:
# for chunkrange / chunklist mode, this function is recalled, don't reallocate in this case
if self.hdf5_Corder:
segmented_labels_cube = np.zeros(seglabels_size, dtype=self.cubeLblType, order='C')
else:
segmented_labels_cube = np.zeros(seglabels_size[::-1], dtype=self.cubeLblType, order='C')
else:
# change back to the original view (same view changes as below, opposite order)
# zreslice un-re-ordering, so data is in original view in this function
segmented_labels_cube = segmented_labels_cube.transpose(self.zreslice_dim_ordering)
# the C/F order re-ordering needs to be done nested inside the reslice re-ordering
if not self.hdf5_Corder:
segmented_labels_cube = segmented_labels_cube.transpose(2,1,0)
# slice out the labels hdf except for no_labels mode (save memory)
hdf = h5py.File(self.labelsrc,'r');
if not self.no_labels:
ind = self.get_hdf_index_from_chunk_index(hdf[self.username], self.chunk_rand, self.offset_rand)
slc,slcd = self.get_label_slices_from_indices(ind, size_rand, seglabels_size, False)
hdf[self.username].read_direct(segmented_labels_cube, slc, slcd)
if self.nz_tiled > 0:
ind = self.get_hdf_index_from_chunk_index(hdf[self.username], self.chunk_tiled, self.offset_tiled)
slc,slcd = self.get_label_slices_from_indices(ind, size_tiled, seglabels_size, True)
hdf[self.username].read_direct(segmented_labels_cube, slc, slcd)
labels_attrs = {}
for name,value in list(hdf[self.username].attrs.items()): labels_attrs[name] = value
# these two only for validation that they are same as data cube
chunksize = np.array(hdf[self.username].chunks, dtype=np.int64)
datasize = np.array(hdf[self.username].shape, dtype=np.int64)
hdf.close()
# the C/F order re-ordering needs to be done nested inside the reslice re-ordering
if not self.hdf5_Corder:
segmented_labels_cube = segmented_labels_cube.transpose(2,1,0)
chunksize = chunksize[::-1]; datasize = datasize[::-1]
# zreslice re-ordering, so data is in re-sliced order view outside of this function
segmented_labels_cube = segmented_labels_cube.transpose(self.zreslice_dim_ordering)
chunksize = chunksize[self.zreslice_dim_ordering]; datasize = datasize[self.zreslice_dim_ordering]
if self.verbose and not self.silent: print('After re-ordering segmented labels cube shape ' + \
str(segmented_labels_cube.shape))
assert( (self.chunksize == chunksize).all() )
assert( (self.datasize == datasize).all() )
return segmented_labels_cube, labels_attrs
def get_hdf_index_from_chunk_index(self, hdf_dataset, chunk_index, offset):
datasize = np.array(hdf_dataset.shape, dtype=np.int64)
chunksize = np.array(hdf_dataset.chunks, dtype=np.int64)
nchunks = datasize//chunksize
if self.hdf5_Corder: ci = chunk_index
else: ci = chunk_index[::-1]
# chunk index is either given as origin-centered, or zero-based relative to corner
if self.origin_chunk_inds: ci = (ci + nchunks//2 + nchunks%2 - 1) # origin-centered chunk index
# always return the indices into the hdf5 in C-order
if self.hdf5_Corder: return ci*chunksize + offset
else: return (ci*chunksize)[::-1] + offset
# xxx - add asserts to check that data select is inbounds in hdf5, currently not a graceful error
def get_data_slices_from_indices(self, ind, size, dsize, isTiled):
xysel = self.zreslice_dim_ordering[0:2]; zsel = self.zreslice_dim_ordering[2]
beg = ind; end = ind + size
beg[xysel] = beg[xysel] - self.image_size//2; beg[zsel] = beg[zsel] - self.nzslices//2
end[xysel] = end[xysel] + self.image_size//2; end[zsel] = end[zsel] + self.nzslices//2
return self.get_slices_from_limits(beg,end,dsize,isTiled)
# xxx - add asserts to check that labels select is inbounds in hdf5, currently not a graceful error
def get_label_slices_from_indices(self, ind, size, dsize, isTiled):
#xysel = self.zreslice_dim_ordering[0:2];
zsel = self.zreslice_dim_ordering[2]
beg = ind - self.segmented_labels_border[self.zreslice_dim_ordering]
end = ind + size + self.segmented_labels_border[self.zreslice_dim_ordering]
beg[zsel] = beg[zsel] - self.nzslices//2; end[zsel] = end[zsel] + self.nzslices//2
return self.get_slices_from_limits(beg,end,dsize,isTiled)
def get_slices_from_limits(self, beg, end, size, isTiled):
zsel = self.zreslice_dim_ordering[2]
begd = np.zeros_like(size); endd = size;
if isTiled:
begd[zsel], endd[zsel] = self.nrand_zslice, self.ntotal_zslice
else:
begd[zsel], endd[zsel] = 0, self.nrand_zslice
if self.hdf5_Corder:
slc = np.s_[beg[0]:end[0],beg[1]:end[1],beg[2]:end[2]]
slcd = np.s_[begd[0]:endd[0],begd[1]:endd[1],begd[2]:endd[2]]
else:
slc = np.s_[beg[2]:end[2],beg[1]:end[1],beg[0]:end[0]]
slcd = np.s_[begd[2]:endd[2],begd[1]:endd[1],begd[0]:endd[0]]
return slc,slcd
# Don't need to pickle meta anymore as parser is instantiated and runs on-demand from EMDataProvider.
def makeBatchMeta(self):
if self.no_labels:
noutputs = 0; label_names = []
else:
noutputs = self.noutputs
label_names = self.indep_label_names_out if self.independent_labels else self.label_names
# do not re-assign meta dict so this works with chunklist mode (which reloads each time)
if not hasattr(self, 'batch_meta'): self.batch_meta = {}
b = self.batch_meta; b['num_cases_per_batch']=self.num_cases_per_batch; b['label_names']=label_names;
b['nlabels']=len(label_names); b['pixels_per_image']=self.pixels_per_image;
#b['scalar_data_mean']=data_mean; b['scalar_data_std']=data_std;
b['noutputs']=noutputs; b['num_pixels_per_case']=self.pixels_per_image;
if self.verbose and not self.silent: print(self.batch_meta)
# for debug only (standalone harness), DO NOT UNcomment these when running network, then count won't be saved
#self.batch_meta['prior_train_count'] = np.zeros((self.noutputs if self.independent_labels else self.nlabels,),
# dtype=np.int64)
#self.batch_meta['prior_total_count'] = np.zeros((1,),dtype=np.int64)
def setupAllLabels(self, chunkind=0):
c = chunkind # for chunk_list_all mode
assert( c==0 or self.chunk_list_all ) # sanity check
self.labels_cube[c] = self.setupLabels(self.labels_cube[c], self.segmented_labels_cube[c])
# labels_cube is used for label priors for selecting pixels for presentation to convnet.
# The actual labels are calculated on-demand using the segmented labels (not using the labels created here),
# unless NOT independent_labels in which case the labels cube is also the labels sent to the network.
def setupLabels(self, labels_cube, segmented_labels_cube):
# init labels to empty and return if no label mode
if self.no_labels:
return np.zeros((0,0,0), dtype=self.cubeLblType, order='C')
num_empty = (segmented_labels_cube == self.EMPTY_LABEL).sum()
assert( self.no_label_lookup or num_empty != segmented_labels_cube.size ) # a completely unlabeled chunk
if num_empty > 0:
if not self.silent: print('EMDataParser: WARNING: %d empty label voxels selected' % float(num_empty))
# ECS as a single label is used for some of the label types. figure out which label it is based on ini param
# need a separate variable for chunklist mode where the ECS label in different regions is likely different.
# xxx - this does not work in the cases where the ECS label has been set differently in adjacent regions.
# would likely have to go back to a fixed label for this, but not clear why this situation would come up.
if self.ECS_label == -2:
self.ECS_label_value = self.ECS_LABEL # specifies ECS label is single defined value (like EMPTY_LABEL)
elif self.ECS_label == -1:
# specifies that ECS is labeled with whatever the last label is, ignore the empty label
self.ECS_label_value = (segmented_labels_cube[segmented_labels_cube != self.EMPTY_LABEL]).max()
else:
self.ECS_label_value = self.ECS_label # use supplied value for ECS
if labels_cube is None:
# do not re-allocate if just setting up labels for a new cube for cubelist / cuberange mode
labels_cube = np.zeros(self.labels_slice_size, dtype=self.cubeLblType, order='C')
if self.select_label_type == 'ICS_OUT':
labels_cube[segmented_labels_cube == 0] = self.labels['OUT']
labels_cube[segmented_labels_cube > 0] = self.labels['ICS']
elif self.select_label_type == 'ICS_ECS_MEM':
labels_cube[segmented_labels_cube == 0] = self.labels['MEM']
labels_cube[np.logical_and(segmented_labels_cube > 0,
segmented_labels_cube != self.ECS_label_value)] = self.labels['ICS']
labels_cube[segmented_labels_cube == self.ECS_label_value] = self.labels['ECS']
elif self.select_label_type == 'ICS_OUT_BRD':
labels_cube[:] = self.labels['ICS']
labels_cube[np.diff(segmented_labels_cube[1:,1:-1],1,0) != 0] = self.labels['BORDER']
labels_cube[np.diff(segmented_labels_cube[0:-1,1:-1],1,0) != 0] = self.labels['BORDER']
labels_cube[np.diff(segmented_labels_cube[1:-1,1:],1,1) != 0] = self.labels['BORDER']
labels_cube[np.diff(segmented_labels_cube[1:-1,0:-1],1,1) != 0] = self.labels['BORDER']
# xxx - this would highlight membrane areas that are near border also, better method of balancing priors?
labels_cube[np.logical_and(segmented_labels_cube[1:-1,1:-1] == 0,
labels_cube != self.labels['BORDER'])] = self.labels['OUT']
#labels_cube[segmented_labels_cube[1:-1,1:-1] == 0] = self.labels['OUT']
elif self.select_label_type == 'ICS_ECS_MEM_BRD':
labels_cube[:] = self.labels['ICS']
labels_cube[np.diff(segmented_labels_cube[1:,1:-1],1,0) != 0] = self.labels['BORDER']
labels_cube[np.diff(segmented_labels_cube[0:-1,1:-1],1,0) != 0] = self.labels['BORDER']
labels_cube[np.diff(segmented_labels_cube[1:-1,1:],1,1) != 0] = self.labels['BORDER']
labels_cube[np.diff(segmented_labels_cube[1:-1,0:-1],1,1) != 0] = self.labels['BORDER']
# xxx - this would highlight membrane areas that are near border also, better method of balancing priors?
labels_cube[np.logical_and(segmented_labels_cube[1:-1,1:-1] == 0,
labels_cube != self.labels['BORDER'])] = self.labels['MEM']
#labels_cube[segmented_labels_cube[1:-1,1:-1] == 0] = self.labels['MEM']
labels_cube[segmented_labels_cube[1:-1,1:-1] == self.ECS_label_value] = self.labels['ECS']
else:
raise Exception('Unknown select_label_type ' + self.select_label_type)
return labels_cube
def getLabelMap(self):
# NEW: change how labels work so that labels that use priors for selection are seperate from how the labels are
# sent / interpreted by the network. First setup select_label_type maps.
# NOTE: the select labels are also the labels sent to the network if NOT independent_labels.
# in this case there also must only be one output voxel (multiple outputs not supported for
# mutually exclusive labels.
border = 0 # this is for label selects or label types that need bordering pixels around cube to fetch labels
# label names need to be in the same order as the indices in the labels map.
if self.select_label_type == 'ICS_OUT':
self.labels = {'OUT':0, 'ICS':1} # labels are binary, intracellular or outside (not intracellular)
self.label_names = ['OUT', 'ICS']
elif self.select_label_type == 'ICS_ECS_MEM':
self.labels = {'MEM':0, 'ICS':1, 'ECS':2} # label values for ICS, MEM and ECS fixed values
self.label_names = ['MEM', 'ICS', 'ECS']
elif self.select_label_type == 'ICS_OUT_BRD':
# for the priors for affinity just use three classes, OUT, ICS or border voxels
self.labels = {'OUT':0, 'BORDER':1, 'ICS':2}
self.label_names = ['OUT', 'BORDER', 'ICS']
border = 1
elif self.select_label_type == 'ICS_ECS_MEM_BRD':
# for the priors for affinity just use three classes, OUT, ICS or border voxels
self.labels = {'MEM':0, 'BORDER':1, 'ICS':2, 'ECS':3}
self.label_names = ['MEM', 'BORDER', 'ICS', 'ECS']
border = 1
# then setup "independent label names" which will be used to setup how labels are sent to network.
# these are the actual labels, the ones above are used for selecting voxels randomly based on label lookup
# using the label prior specified in the ini file (balancing).
if self.label_type == 'ICSorOUT':
if self.image_out_size==1:
# for single pixel output, use two independent outputs (convnet doesn't like single output)
self.indep_label_names = ['ICS', 'OUT']
else:
# one output per pixel
self.indep_label_names = ['ICS']
elif self.label_type == 'ICSorECSorMEM':
self.indep_label_names = ['ICS', 'ECS', 'MEM']
elif self.label_type == 'ICSorECS':
assert( self.independent_labels ) # this setup is intended for MEM to be encoded by no winner
# the labels used for selection are still the same, just learn outputs as 0, 0 for membrane
self.indep_label_names = ['ICS', 'ECS']
elif self.label_type == 'ICSorMEM':
assert( self.independent_labels ) # this setup is intended for ECS to be encoded by no winner
# the labels used for selection are still the same, just learn outputs as 0, 0 for ecs
self.indep_label_names = ['ICS', 'MEM']
elif self.label_type == 'affin2':
assert( self.independent_labels ) # xxx - can construct mutex labels, but decided no point to support this
# two output per pixel, the affinities in two directions
self.indep_label_names = ['DIM0POS', 'DIM1POS']
border = 1
elif self.label_type == 'affin4':
assert( self.independent_labels ) # xxx - can construct mutex labels, but decided no point to support this
# four outputs per pixel, the affinities in two directions for ICS and for ECS
self.indep_label_names = ['ICS_DIM0POS', 'ICS_DIM1POS', 'ECS_DIM0POS', 'ECS_DIM1POS']
border = 1
elif self.label_type == 'affin6':
assert( self.independent_labels ) # xxx - can construct mutex labels, but decided no point to support this
# six outputs per pixel, the affinities in two directions for ICS, ECS and MEM
self.indep_label_names = ['ICS_DIM0POS', 'ICS_DIM1POS', 'ECS_DIM0POS', 'ECS_DIM1POS',
'MEM_DIM0POS', 'MEM_DIM1POS']
border = 1
else:
raise Exception('Unknown label_type ' + self.label_type)
# this is for affinity graphs so there is no boundary problem at edges of segmented labels, default no boundary
# make 3d for (convenience) in ortho reslice code, but always need same in xy dir and zero in z for lbl slicing
self.segmented_labels_border = np.zeros((3,),dtype=np.int32); self.segmented_labels_border[0:2] = border
assert( self.independent_labels or border==0 ) # did not see the point in supporting this
self.nlabels = len(self.label_names)
self.nIndepLabels = len(self.indep_label_names)
self.indep_label_names_out = []
for i in range(self.pixels_per_out_image):
for j in range(self.nIndepLabels):
self.indep_label_names_out.append('%s_%d' % (self.indep_label_names[j], i))
self.noutputs = len(self.indep_label_names_out) if self.independent_labels else 1
# plotting code to validate that data / labels are being created / selected correctly
# matplotlib imshow does not swap the axes so need transpose to put first dim on x-axis (like in imagej, itk-snap)
def plotDataLbls(self,data,labels,seglabels,augs,pRand=True,doffset=0.0):
from matplotlib import pylab as pl
import matplotlib as plt
imgno = -1; interp_string = 'nearest' # 'none' not supported by slightly older version of matplotlib (meh)
# just keep bring up plots with EM data sample from batch range
while True:
pl.figure(1);
if ((not self.independent_labels or self.image_out_size==1) and self.label_type[0:6] != 'affin') or \
labels.size == 0:
# original mode, softmax labels for single output pixel
for i in range(4):
imgno = random.randrange(self.num_cases_per_batch) if pRand else imgno+1
pl.subplot(2,2,i+1)
# this is ugly, but data was previously validated using this plotting, so kept it, also below
slc = data[:,imgno].reshape(self.pixels_per_image,1).\
reshape(self.nzslices, self.image_size, self.image_size)[self.nzslices//2,:,:].\
reshape(self.image_size, self.image_size, 1) + doffset
# Repeat for the three color channels so plotting can occur normally (written for color images).
img = np.require(np.concatenate((slc,slc,slc), axis=2) / (255.0 if slc.max() > 1 else 1),
dtype=np.single)
if labels.size > 0:
# Put a red dot at the center pixel
img[self.image_size//2,self.image_size//2,0] = 1;
img[self.image_size//2,self.image_size//2,1] = 0;
img[self.image_size//2,self.image_size//2,2] = 0;
pl.imshow(img.transpose((1,0,2)),interpolation=interp_string);
if labels.size == 0:
pl.title('imgno %d' % imgno)
elif not self.independent_labels:
pl.title('label %s (%d), imgno %d' % (self.label_names[np.asscalar(labels[0,
imgno].astype(int))], np.asscalar(seglabels[0,imgno].astype(int)), imgno))
else:
lblstr = ' '.join(self.indep_label_names[s] for s in np.nonzero(labels[:,imgno])[0].tolist())
pl.title('label %s (%d), imgno %d' % (lblstr,np.asscalar(seglabels[0,imgno].astype(int)),imgno))
else:
# new mode, multiple output pixels, make two plots
for i in range(2):
imgno = random.randrange(self.num_cases_per_batch) if pRand else imgno+1
slc = data[:,imgno].reshape(self.pixels_per_image,1).\
reshape(self.nzslices, self.image_size, self.image_size)[self.nzslices//2,:,:].\
reshape(self.image_size, self.image_size, 1) + doffset
# Repeat for the three color channels so plotting can occur normally (written for color images).
img = np.require(np.concatenate((slc,slc,slc), axis=2) / 255.0, dtype=np.single)
imgA = np.require(np.concatenate((slc,slc,slc,
np.ones((self.image_size,self.image_size,1))*255), axis=2) / 255.0, dtype=np.single)
aug = augs[imgno] if len(augs) > 1 else augs[0]
alpha = 0.5 # overlay (independent) labels with data
pl.subplot(2,2,2*i+1)
lbls = labels[:,imgno].reshape(self.oshape)
print(lbls[:,:,0].reshape((self.image_out_size,self.image_out_size)))
if self.nIndepLabels > 1:
print(lbls[:,:,1].reshape((self.image_out_size,self.image_out_size)))
assert(self.nIndepLabels < 4) # need more colors for this
osz = self.image_out_size
rch = lbls[:,:,0].reshape(osz,osz,1)
gch = lbls[:,:,1].reshape(osz,osz,1) if self.nIndepLabels > 1 else np.zeros((osz,osz,1))
bch = lbls[:,:,2].reshape(osz,osz,1) if self.nIndepLabels > 2 else np.zeros((osz,osz,1))
if alpha < 1:
pl.imshow(imgA.transpose((1,0,2)),interpolation=interp_string)
imglbls = np.concatenate((rch,gch,bch,np.ones((osz,osz,1))*alpha), axis=2).astype(np.single);
imglbls[(lbls==0).all(2),3] = 0 # make background clear
img3 = np.zeros((self.image_size, self.image_size, 4), dtype=np.single, order='C')
b = self.image_size//2-osz//2; slc = slice(b,b+osz); img3[slc,slc,:] = imglbls;
pl.imshow(img3.transpose((1,0,2)),interpolation=interp_string)
else:
imglbls = np.concatenate((rch,gch,bch), axis=2).astype(np.single);
imgB = img; b = self.image_size//2-osz//2; slc = slice(b,b+osz); imgB[slc,slc,:] = imglbls;
pl.imshow(imgB.transpose((1,0,2)),interpolation=interp_string);
pl.title('label, imgno %d' % imgno)
#alpha = 0.6 # overlay segmented labels with data
pl.subplot(2,2,2*i+2)
seglbls = seglabels[:,imgno].reshape((self.seg_out_size,self.seg_out_size))
print(seglbls)
pl.imshow(imgA.transpose((1,0,2)),interpolation=interp_string)
m = pl.cm.ScalarMappable(norm=plt.colors.Normalize(), cmap=pl.cm.jet)
imgseg = m.to_rgba(seglbls % 256); imgseg[:,:,3] = alpha; imgseg[seglbls==0,3] = 0
img2 = np.zeros((self.image_size, self.image_size, 4), dtype=np.single, order='C')
b = self.image_size//2-self.seg_out_size//2; slc = slice(b,b+self.seg_out_size)
img2[slc,slc,:] = imgseg;
pl.imshow(img2.transpose((1,0,2)),interpolation=interp_string)
pl.title('seglabel, aug %d' % aug)
pl.show()
# simpler plotting for just data, useful for debugging preprocessing for autoencoders
def plotData(self,data,dataProc,pRand=True,image_size=0):
from matplotlib import pylab as pl
#import matplotlib as plt
if image_size < 1: image_size = self.image_size
imgno = -1; interp_string = 'nearest' # 'none' not supported by slightly older version of matplotlib (meh)
numpix = self.image_size*self.image_size
# just keep bring up plots with EM data sample from batch range
while True:
pl.figure(1);
# original mode, softmax labels for single output pixel
for i in range(2):
imgno = random.randrange(self.num_cases_per_batch) if pRand else imgno+1
pl.subplot(2,2,2*i+1)
slc = data[:,imgno].reshape(self.nzslices, image_size, image_size)[self.nzslices//2,:,:].\
reshape(image_size, image_size)
mx = slc.max(); mn = slc.min(); fc = np.isfinite(slc).sum()
h = pl.imshow(slc.transpose((1,0)),interpolation=interp_string); h.set_cmap('gray')
pl.title('orig imgno %d, min %.2f, max %.2f, naninf %d' % (imgno, mn, mx, numpix - fc))
pl.subplot(2,2,2*i+2)
slc = dataProc[:,imgno].reshape(self.nzslices, self.image_out_size,
self.image_out_size)[self.nzslices//2,:,:].reshape(self.image_out_size, self.image_out_size)
mx = slc.max(); mn = slc.min(); fc = np.isfinite(slc).sum()
h = pl.imshow(slc.transpose((1,0)),interpolation=interp_string); h.set_cmap('gray')
pl.title('preproc imgno %d, min %.2f, max %.2f, naninf %d' % (imgno, mn, mx, numpix - fc))
pl.show()
@staticmethod
def augmentData(d,augment,order=1):
if augment == 0: return d # no augmentation
if np.bitwise_and(augment,4): d = d.transpose(1,0,2) # tranpose x/y
if np.bitwise_and(augment,1): d = d[::-1,:,:] # reflect x
if np.bitwise_and(augment,2): d = d[:,::-1,:] # reflect y
if np.bitwise_and(augment,8): d = d[:,:,::-1] # reflect z
# elastic transform
if np.bitwise_and(augment,16):
assert(d.shape[2]==1)
d = EMDataParser.elastic_transform(d.reshape(d.shape[:2]), order=order)[:,:,None]
return d
# xxx - did not find this to be useful, either way to noisy / jittery for high alpha and low sigma
# or way to blurry for high alpha and high sigma, low alpha does almost nothing, as expected
# modified from https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a
@staticmethod
def elastic_transform(image, alpha=8, sigma=2, order=3, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
assert len(image.shape)==2
if random_state is None:
#random_state = np.random.RandomState(None)
random_state = nr
shape = image.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x+dx, (-1, 1)), np.reshape(y+dy, (-1, 1))
return map_coordinates(image, indices, order=order, mode='reflect').reshape(shape)
@staticmethod
def get_options(cfg_file):
config = ConfigObj(cfg_file,
configspec=os.path.join(os.path.dirname(os.path.realpath(__file__)),'parseEMdata.ini'))
# Validator handles missing / type / range checking
validator = Validator()
results = config.validate(validator, preserve_errors=True)
if results != True:
for (section_list, key, err) in flatten_errors(config, results):
if key is not None:
if not err:
print('EMDataParser: The "%s" key is missing in the following section(s):%s ' \
% (key, ', '.join(section_list)))
raise ValidateError
else:
print('EMDataParser: The "%s" key in the section(s) "%s" failed validation' \
% (key, ', '.join(section_list)))
raise err
elif section_list:
print('EMDataParser: The following section(s) was missing:%s ' % ', '.join(section_list))
raise ValidateError
return config
# xxx - moved logic out of convEMdata.py for better modularity, maybe can clean up more?
def checkOutputCubes(self, feature_path, batchnum, isLastbatch, outputs=None):
# for neon, allow outputs to be passed in without pickling
self.batch_outputs[self.batch_outputs_ind] = outputs
self.batch_outputs_ind = (self.batch_outputs_ind+1) % self.batches_per_rand_cube
if self.use_chunk_list:
# decide if it's appropriate to make output cubes (if at the end of the current chunk)
self.chunklistOutputCubes(feature_path, batchnum, isLastbatch)
elif isLastbatch:
# not chunklist mode, only write after all batches completed
self.makeOutputCubes(feature_path)
# special makeOutputCubes call for chunklist mode, only write if this is the last batch (overall or chunk)
def chunklistOutputCubes(self, feature_path, batchnum, isLastbatch):
assert( self.use_chunk_list ) # do not call me unless chunklist mode
batchOffset, chunk = self.getTiledBatchOffset(batchnum, setChunkList=False)
# write the output cubes if this is the last batch in current chunk or if this is the last overall batch
if isLastbatch or (batchOffset == (self.batches_per_rand_cube - 1)):
self.makeOutputCubes(feature_path, chunk*self.batches_per_rand_cube + self.FIRST_TILED_BATCH)
# prevents last chunk from being written twice (for isLastbatch, next chunk might not have loaded)
self.last_chunk_rand = self.chunk_rand; self.last_offset_rand = self.offset_rand;
if isLastbatch:
self.start_queue.put(None)
self.probs_output_proc.join()
# the EM data "unpackager", recreate probablity cubes using exported output features from convnet
def makeOutputCubes(self, feature_path='', batchnum=-1):
print('EMDataParser: Loading exported features')
cpb = self.num_cases_per_batch; size = self.image_out_size;
npix = self.pixels_per_out_image; nout = self.noutputs;
# labels in this context are the labels per output pixel
if self.independent_labels: nlabels = self.nIndepLabels; label_names = self.indep_label_names
else: nlabels = self.nlabels; label_names = self.label_names
# allow the starting batch to be passed in (for chunklist mode)
if batchnum < 0: batchnum = self.FIRST_TILED_BATCH
if self.verbose:
print('ntiles_per_zslice %d zslices_per_batch %d tiled shape %d %d cpb %d' % (self.batches_per_zslice,
self.zslices_per_batch, self.inds_tiled_out.shape[0],self.inds_tiled_out.shape[1], cpb))
# initial shape of probs out depends on single or multiple output pixels
if size > 1: probs_out_shape = self.output_size + [nout]
else: probs_out_shape = self.labels_slice_size + (nlabels,)
# allocate the outputs to be written to hdf5, any pixels from missing batches are filled with EMPTY_PROB
if hasattr(self, 'probs_out'):
# do not reallocate in chunklist mode, but reshape (shape changes below for multiple output pixels)
self.probs_out[:] = self.EMPTY_PROB
self.probs_out = self.probs_out.reshape(probs_out_shape)
else:
self.probs_out = self.EMPTY_PROB * np.ones(probs_out_shape, dtype=np.float32, order='C')
# get training prior if present in the meta
if 'prior_train_count' in self.batch_meta:
# calculate the training prior based on the actual labels that have been presented to the network
prior_train = self.batch_meta['prior_train_count'] / self.batch_meta['prior_total_count'].astype(np.double)
# if the training prior counts have been saved and the test prior is specified in the ini,
# then enable prior rebalancing on the output probabilities.
prior_export = self.prior_test.all() and 'prior_train_count' in self.batch_meta
if prior_export:
# make sure the test (export) prior is legit
assert( (self.prior_test > 0).all() and (self.prior_test < 1).all() ) # test priors must be probs
# only for independent labels with independent prior test can test prior not sum to 1
if not self.independent_labels or not self.prior_test_indep: assert( self.prior_test.sum() == 1 )
if not self.independent_labels or self.prior_test_indep or (self.prior_test.size == nlabels):
# normal case, test_priors are for labels or independent labels
assert( self.prior_test.size == nlabels ) # test priors must be for labels or independent label types
if self.independent_labels:
# repeat prior_test for all output pixels
prior_test = self.prior_test.reshape((1,-1)).repeat(npix, axis=0).reshape((nout,))
if self.prior_test_indep:
# in this case each output is independently bayesian reweighted against the not output
prior_nottest_to_nottrain = (1 - prior_test) / (1 - prior_train)
else:
prior_test = self.prior_test
else:
# allow the last class to be encoded as all zeros, so prob is 1-sum others
assert( self.prior_test.size == nlabels+1 )
noutp = npix*(nlabels+1)
prior_test = self.prior_test.reshape((1,-1)).repeat(npix, axis=0).reshape((noutp,))
prior_test_labels = self.prior_test[0:-1].reshape((1,-1)).repeat(npix, axis=0).reshape((nout,))
prior_train_labels = prior_train
prior_test_to_train_labels = (prior_test_labels / prior_train_labels).reshape((1,size,size,nlabels))
ptall = prior_train.reshape((size,size,nlabels))
prior_train = np.concatenate((ptall, 1-ptall.sum(axis=2,keepdims=True)), axis=2).reshape((noutp,))
# calculate ratio once here to avoid doing it every loop iteration below
prior_test_to_train = prior_test / prior_train
if self.independent_labels and not self.prior_test_indep:
if self.prior_test.size == nlabels:
prior_test_to_train = prior_test_to_train.reshape((1,size,size,nlabels))
else:
prior_test_to_train = prior_test_to_train.reshape((1,size,size,nlabels+1))
# load the pickled output batches and assign based on tiled indices created in packager (makeTiledIndices)
cnt = 0
for z in range(0,self.ntotal_zslice,self.zslices_per_batch):
for t in range(self.batches_per_zslice):
# allows for data to either be unpickled, or saved in memory for each "chunk" (neon mode)
d = None
if feature_path:
batchfn = os.path.join(feature_path,'data_batch_%d' % batchnum)
if os.path.isfile(batchfn):
infile = open(batchfn, 'rb'); d = myPickle.load(infile); infile.close(); d = d['data']
# batches take up way too make space for "large dumps" so remove them in append_features mode
if self.append_features: os.remove(batchfn)
else:
d = self.batch_outputs[cnt]; self.batch_outputs[cnt] = None
if d is not None:
if prior_export:
# apply Bayesian reweighting, either independently or over the labels set
if self.independent_labels and self.prior_test_indep:
# sum is with the not target for independent outputs
adjusted = d*prior_test_to_train; d = adjusted / (adjusted+(1-d)*prior_nottest_to_nottrain)
elif self.independent_labels:
if self.prior_test.size != nlabels:
# need 1 - sum for last label type (encoded as all zeros)
dshp = d.reshape((cpb,size,size,nlabels))
# rectify incase existing probs sum over one
other_dshp = 1-dshp.sum(axis=3,keepdims=True); other_dshp[other_dshp < 0] = 0
d = (dshp*prior_test_to_train_labels / (np.concatenate((dshp, other_dshp),
axis=3)*prior_test_to_train).sum(axis=3, keepdims=True)).reshape((cpb,nout))
else:
dshp = d.reshape((cpb,size,size,nlabels)); adjusted = dshp*prior_test_to_train;
d = (adjusted / adjusted.sum(axis=3,keepdims=True)).reshape((cpb,nout))
else:
# sum is over the labels
adjusted = d*prior_test_to_train; d = adjusted / adjusted.sum(axis=1,keepdims=True)
begr = t*cpb; endr = begr + cpb
self.probs_out[self.inds_tiled_out[begr:endr,0],self.inds_tiled_out[begr:endr,1],
self.inds_tiled_out[begr:endr,2] + z,:] = d
batchnum += 1; cnt += 1
if size > 1:
# xxx - oh yah, this makes sense, see comments in makeTiledIndices
self.probs_out = self.probs_out.reshape(self.output_size + [size, size,
nlabels]).transpose((0,3,1,4,2,5)).reshape(self.labels_slice_size + (nlabels,))
# which prior counts will be written out
if 'prior_train_count' in self.batch_meta:
if not prior_export or self.prior_test.size == nlabels:
prior_write = prior_train.reshape((size,size,nlabels))
else:
prior_write = prior_train_labels.reshape((size,size,nlabels))
if self.write_outputs:
print('EMDataParser: Creating hdf5 output containing label probabilities')
if not os.path.exists(self.outpath): os.makedirs(self.outpath)
# write probs in F-order, use separate variable names in hdf file
outfile = h5py.File(os.path.join(self.outpath, self.OUTPUT_H5_CVOUT), 'w');
# output probability for each output if requested
for n in range(nlabels):
outfile.create_dataset(label_names[n], data=self.probs_out[:,:,:,n].transpose((2,1,0)),
compression='gzip', compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
# copy any attributes over
for name,value in list(self.data_attrs.items()):
outfile[label_names[n]].attrs.create(name,value)
self.write_prior_hdf5(prior_export, prior_write)
outfile.close()
if self.append_features_knossos:
print('EMDataParser: Appending to knossos-style output containing label probabilities "%s" at %d %d %d' % \
(self.outpath, self.last_chunk_rand[0], self.last_chunk_rand[1], self.last_chunk_rand[2]))
ind = self.last_chunk_rand
elif self.append_features:
# write outputs probabilities to a big hdf5 that spans entire dataset, used for "large feature dumps".
# always writes in F-order (inputs can be either order tho)
assert( self.nz_tiled == 0 ) # use the rand cube only for "large feature dumps"
hdf = h5py.File(self.imagesrc,'r')
if not os.path.isfile(self.outpath):
print('EMDataParser: Creating global hdf5 output containing label probabilities "%s"' % self.outpath)
# create an output prob hdf5 file (likely for a larger dataset, this is how outputs are "chunked")
outfile = h5py.File(self.outpath, 'w');
for n in range(nlabels):
# get the shape and chunk size from the data hdf5. if this file is in F-order, re-order to C-order
shape = list(hdf[self.dataset].shape); chunks = list(hdf[self.dataset].chunks)
if not self.hdf5_Corder:
shape = shape[::-1]; chunks = chunks[::-1]
# now re-order the dims based on the specified re-ordering and then re-order back to F-order
shape = list(shape[i] for i in self.zreslice_dim_ordering)
chunks = list(chunks[i] for i in self.zreslice_dim_ordering)
shape = shape[::-1]; chunks = tuple(chunks[::-1])
outfile.create_dataset(label_names[n], shape=shape, dtype=np.float32, compression='gzip',
#compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True, fillvalue=-1.0, chunks=chunks)
compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True, fillvalue=0.0, chunks=chunks)
# copy the attributes over
for name,value in list(self.data_attrs.items()):
outfile[label_names[n]].attrs.create(name,value)
self.write_prior_hdf5(prior_export, prior_write, outfile)
outfile.close()
print('EMDataParser: Appending to global hdf5 output containing label probabilities "%s" at %d %d %d' % \
(self.outpath, self.last_chunk_rand[0], self.last_chunk_rand[1], self.last_chunk_rand[2]))
# always write outputs in F-order
ind = self.get_hdf_index_from_chunk_index(hdf[self.dataset], self.last_chunk_rand,
self.last_offset_rand)
ind = ind[self.zreslice_dim_ordering][::-1] # re-order for specified ordering, then to F-order
hdf.close()
if self.append_features:
# parallel using multiprocessing, threading does not work
if not hasattr(self, 'done_queue'):
# initialize
self.start_queue = mp.Queue()
self.done_queue = mp.Queue()
self.shared_probs_out = sharedmem.empty_like(self.probs_out)
self.shared_ind = sharedmem.empty_like(ind)
if self.append_features_knossos:
self.probs_output_proc = mp.Process(target=handle_knossos_prob_output,
args=(self.start_queue, self.done_queue, self.shared_probs_out,
self.shared_ind, label_names, self.outpath,self.strnetid))
else:
self.probs_output_proc = mp.Process(target=handle_hdf5_prob_output,
args=(self.start_queue, self.done_queue, self.shared_probs_out,
self.shared_ind, label_names, self.outpath))
self.probs_output_proc.start()
else:
self.done_queue.get()
self.shared_probs_out[:] = self.probs_out; self.shared_ind[:] = ind
self.start_queue.put(1)
## non-parallel version
#outfile = h5py.File(self.outpath, 'r+');
#for n in range(nlabels):
# d = self.probs_out[:,:,:,n].transpose((2,1,0)); dset = outfile[label_names[n]]
# #print ind, d.shape, dset.shape
# dset[ind[0]:ind[0]+d.shape[0],ind[1]:ind[1]+d.shape[1],ind[2]:ind[2]+d.shape[2]] = d
#outfile.close()
def write_prior_hdf5(self, prior_export, d, outfile):
# for both modes, write out the priors, if prior reweighting enabled
# write a new dataset with the on-the-fly calculated training prior for each label type
if 'prior_train_count' in self.batch_meta:
#outfile = h5py.File(self.outpath, 'r+');
outfile.create_dataset(self.PRIOR_DATASET, data=d.transpose((2,1,0)),
compression='gzip', compression_opts=self.HDF5_CLVL, shuffle=True, fletcher32=True)
if prior_export:
print('EMDataParser: Exported with Bayesian prior reweighting')
outfile[self.PRIOR_DATASET].attrs.create('prior_test',self.prior_test)
else:
print('EMDataParser: Exported training prior but output not reweighted')
#outfile.close()
# for test
if __name__ == '__main__':
dp = EMDataParser(sys.argv[1:][0], write_outputs=False)
#dp = EMDataParser(sys.argv[1:][0], False, '', 'meh1')
#dp.no_label_lookup = True
dp.initBatches()
#dp.makeOutputCubes(sys.argv[1:][1])
nBatches = 10;
# test rand batches
#for i in range(nBatches): dp.getBatch(i+1, True)
#for i in range(dp.FIRST_RAND_NOLOOKUP_BATCH,dp.FIRST_RAND_NOLOOKUP_BATCH+nBatches): dp.getBatch(i+1, True)
# test tiled batches
batchOffset = 0;
for i in range(dp.FIRST_TILED_BATCH+batchOffset,dp.FIRST_TILED_BATCH+batchOffset+nBatches): dp.getBatch(i,True,16)
| mit |
stargaser/astropy | astropy/visualization/tests/test_histogram.py | 5 | 2349 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
try:
import matplotlib.pyplot as plt
HAS_PLT = True
except ImportError:
HAS_PLT = False
try:
import scipy # noqa
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
import pytest
import numpy as np
from astropy.visualization import hist
from astropy.stats import histogram
@pytest.mark.skipif('not HAS_PLT')
def test_hist_basic(rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(100)
for range in [None, (-2, 2)]:
n1, bins1, patches1 = plt.hist(x, 10, range=range)
n2, bins2, patches2 = hist(x, 10, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
@pytest.mark.skipif('not HAS_PLT')
def test_hist_specify_ax(rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(100)
fig, ax = plt.subplots(2)
n1, bins1, patches1 = hist(x, 10, ax=ax[0])
assert patches1[0].axes is ax[0]
n2, bins2, patches2 = hist(x, 10, ax=ax[1])
assert patches2[0].axes is ax[1]
@pytest.mark.skipif('not HAS_PLT')
def test_hist_autobin(rseed=0):
rng = np.random.RandomState(rseed)
x = rng.randn(100)
# 'knuth' bintype depends on scipy that is optional dependency
if HAS_SCIPY:
bintypes = [10, np.arange(-3, 3, 10), 'knuth', 'scott',
'freedman', 'blocks']
else:
bintypes = [10, np.arange(-3, 3, 10), 'scott',
'freedman', 'blocks']
for bintype in bintypes:
for range in [None, (-3, 3)]:
n1, bins1 = histogram(x, bintype, range=range)
n2, bins2, patches = hist(x, bintype, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
def test_histogram_pathological_input():
# Regression test for https://github.com/astropy/astropy/issues/7758
# The key feature of the data below is that one of the points is very,
# very different than the rest. That leads to a large number of bins.
data = [9.99999914e+05, -8.31312483e-03, 6.52755852e-02, 1.43104653e-03,
-2.26311017e-02, 2.82660007e-03, 1.80307521e-02, 9.26294279e-03,
5.06606026e-02, 2.05418011e-03]
with pytest.raises(ValueError):
hist(data, bins='freedman', max_bins=10000)
| bsd-3-clause |
georgid/sms-tools | lectures/9-Sound-description/plots-code/mfcc.py | 25 | 1103 | import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
mfcc = ess.MFCC(numberCoefficients = 12)
x = ess.MonoLoader(filename = '../../../sounds/speech-male.wav', sampleRate = fs)()
mfccs = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
mfcc_bands, mfcc_coeffs = mfcc(mX)
mfccs.append(mfcc_coeffs)
mfccs = np.array(mfccs)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (speech-male.wav)')
plt.subplot(2,1,2)
numFrames = int(mfccs[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.pcolormesh(frmTime, 1+np.arange(12), np.transpose(mfccs[:,1:]))
plt.ylabel('coefficients')
plt.title('MFCCs')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('mfcc.png')
plt.show()
| agpl-3.0 |
qifeigit/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
sperfu/DeepInteract-for-CircRNA-Disease-Inference- | DeepInteract_new.py | 1 | 23513 | # -*- coding: utf-8 -*-
###THEANO_FLAGS=mode=FAST_RUN,device=gpu0,floatX=float32 python
import numpy as np
import os
import matplotlib.pyplot as plt
from sklearn import svm, grid_search
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
from sklearn import metrics
#import tensorflow as tf
#tf.python.control_flow_ops = tf
from sklearn.cross_validation import train_test_split
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_recall_curve
from sklearn.cluster import KMeans,Birch,MiniBatchKMeans
import gzip
import pandas as pd
import pdb
import random
from random import randint
import scipy.io
import xlwt
from keras.layers import Input, Dense
from keras.engine.training import Model
from keras.models import Sequential, model_from_config
from keras.layers.core import Dropout, Activation, Flatten
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from keras.optimizers import SGD, RMSprop, Adadelta, Adagrad, Adam
from keras.layers import normalization
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.recurrent import LSTM
from keras.layers.embeddings import Embedding
from keras import regularizers
from keras.constraints import maxnorm
from DeepFunction import multiple_layer_autoencoder,autoencoder_two_subnetwork_fine_tuning,last_layer_autoencoder
def prepare_data(seperate=False):
print "loading data"
#miRNA_fea = np.loadtxt("circRNA_functional_sim.txt",dtype=float,delimiter=",")
miRNA_fea = np.loadtxt("circRNA_sim.txt",dtype=float,delimiter=",")
#disease_fea = np.loadtxt("disease_functional_sim2.txt",dtype=float,delimiter=",")
disease_fea = np.loadtxt("disease_sim2.txt",dtype=float,delimiter=",")
#interaction = np.loadtxt("circRNA_disease_matrix_array_inter2.txt",dtype=int,delimiter=",")
interaction2 = np.loadtxt("p_value_list.txt",dtype=float,delimiter=",")
interaction = np.loadtxt("circRNA_disease_matrix_array5.txt",dtype=int,delimiter=",")
link_number = 0
train = []
label = []
label2 = []
link_position = []
nonLinksPosition = [] # all non-link position^M
for i in range(0, interaction.shape[0]):
for j in range(0, interaction.shape[1]):
label.append(interaction[i,j])
label2.append(interaction2[i,j])
if interaction[i, j] == 1:
link_number = link_number + 1
link_position.append([i, j])
miRNA_fea_tmp = list(miRNA_fea[i])
disease_fea_tmp = list(disease_fea[j])
elif interaction[i,j] == 0:
nonLinksPosition.append([i, j])
miRNA_fea_tmp = list(miRNA_fea[i])
disease_fea_tmp = list(disease_fea[j])
if seperate:
tmp_fea = (miRNA_fea_tmp,disease_fea_tmp)
else:
tmp_fea = miRNA_fea_tmp + disease_fea_tmp
train.append(tmp_fea)
return np.array(train), label,label2
def calculate_performace(test_num, pred_y, labels):
tp =0
fp = 0
tn = 0
fn = 0
for index in range(test_num):
if labels[index] ==1:
if labels[index] == pred_y[index]:
tp = tp +1
else:
fn = fn + 1
else:
if labels[index] == pred_y[index]:
tn = tn +1
else:
fp = fp + 1
acc = float(tp + tn)/test_num
precision = float(tp)/(tp+ fp)
sensitivity = float(tp)/ (tp+fn)
specificity = float(tn)/(tn + fp)
MCC = float(tp*tn-fp*fn)/(np.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
return acc, precision, sensitivity, specificity, MCC
def transfer_array_format(data):
formated_matrix1 = []
formated_matrix2 = []
#pdb.set_trace()
#pdb.set_trace()
for val in data:
#formated_matrix1.append(np.array([val[0]]))
formated_matrix1.append(val[0])
formated_matrix2.append(val[1])
#formated_matrix1[0] = np.array([val[0]])
#formated_matrix2.append(np.array([val[1]]))
#formated_matrix2[0] = val[1]
return np.array(formated_matrix1), np.array(formated_matrix2)
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def DNN():
model = Sequential()
model.add(Dense(input_dim=1027, output_dim=500,init='glorot_normal')) ## 1027 128
model.add(Activation('relu'))
model.add(Dropout(0.3))
#model.add(Dense(input_dim=300, output_dim=300,init='glorot_normal')) ##500
#model.add(Activation('relu'))
#model.add(Dropout(0.3))
model.add(Dense(input_dim=500, output_dim=300,init='glorot_normal')) ##500
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(input_dim=300, output_dim=2,init='glorot_normal')) ##500
model.add(Activation('sigmoid'))
#sgd = SGD(l2=0.0,lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
adadelta = Adadelta(lr=1.0, rho=0.95, epsilon=1e-08)
model.compile(loss='binary_crossentropy', optimizer=adadelta, class_mode="binary")##rmsprop sgd
return model
def DNN2():
model = Sequential()
model.add(Dense(input_dim=64, output_dim=500,init='glorot_normal')) ## 1027 128 32
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(input_dim=500, output_dim=500,init='glorot_normal')) ##500
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(input_dim=500, output_dim=300,init='glorot_normal')) ##500
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(input_dim=300, output_dim=2,init='glorot_normal')) ##500
model.add(Activation('sigmoid'))
#sgd = SGD(l2=0.0,lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
adadelta = Adadelta(lr=1.0, rho=0.95, epsilon=1e-08)
model.compile(loss='binary_crossentropy', optimizer=adadelta, class_mode="binary")##rmsprop sgd
return model
def DeepInteract():
X, labels, labels2 = prepare_data(seperate = True)
'''
neg_tmp = [index for index,value in enumerate(labels) if value == 0]
np.random.shuffle(neg_tmp)
pos_tmp = [index for index,value in enumerate(labels) if value == 1]
pos_X = X[pos_tmp]
neg_X = X[neg_tmp[:len(pos_tmp)]]
pos_labels = [labels[item] for item in pos_tmp]
neg_labels = [labels[item] for item in neg_tmp[:len(pos_tmp)]]
X_new = np.vstack((pos_X,neg_X))
labels_new = pos_labels+neg_labels
'''
import pdb
X_data1, X_data2 = transfer_array_format(X) # X X_new
print X_data1.shape,X_data2.shape
y, encoder = preprocess_labels(labels)# labels labels_new
y2 = np.array(labels2)# labels labels_new
num = np.arange(len(y))
np.random.shuffle(num)
'''
X_data1 = X_data1[num]
X_data2 = X_data2[num]
y = y[num]
y2 = y2[num]
'''
num_cross_val = 5
all_performance = []
all_performance_rf = []
all_performance_bef = []
all_performance_DNN = []
all_performance_SDADNN = []
all_performance_blend = []
all_labels = []
all_prob = {}
num_classifier = 3
all_prob[0] = []
all_prob[1] = []
all_prob[2] = []
all_prob[3] = []
all_averrage = []
for fold in range(num_cross_val):
train1 = np.array([x for i, x in enumerate(X_data1) if i % num_cross_val != fold])
test1 = np.array([x for i, x in enumerate(X_data1) if i % num_cross_val == fold])
train2 = np.array([x for i, x in enumerate(X_data2) if i % num_cross_val != fold])
test2 = np.array([x for i, x in enumerate(X_data2) if i % num_cross_val == fold])
train_label = np.array([x for i, x in enumerate(y) if i % num_cross_val != fold])
test_label = np.array([x for i, x in enumerate(y) if i % num_cross_val == fold])
#pdb.set_trace()
#train_label2 = np.array([x for i, x in enumerate(y2) if i % num_cross_val != fold])
train_label2 = np.array([x for i, x in enumerate(y2) if i % num_cross_val != fold])
test_label2 = np.array([x for i, x in enumerate(y2) if i % num_cross_val == fold])
real_labels = []
for val in test_label:
if val[0] == 1:
real_labels.append(0)
else:
real_labels.append(1)
'''
real_labels2 = []
for val in test_label2:
if val[0] == 1:
real_labels2.append(0)
else:
real_labels2.append(1)
'''
train_label_new = []
for val in train_label:
if val[0] == 1:
train_label_new.append(0)
else:
train_label_new.append(1)
blend_train = np.zeros((train1.shape[0], num_classifier)) # Number of training data x Number of classifiers
blend_test = np.zeros((test1.shape[0], num_classifier)) # Number of testing data x Number of classifiers
skf = list(StratifiedKFold(train_label_new, num_classifier))
class_index = 0
#prefilter_train, prefilter_test, prefilter_train_bef, prefilter_test_bef = autoencoder_two_subnetwork_fine_tuning(train1, train2, train_label, test1, test2, test_label)
#prefilter_train_bef, prefilter_test_bef = autoencoder_two_subnetwork_fine_tuning(train1, train2, train_label, test1, test2, test_label)
prefilter_train_bef, prefilter_test_bef = autoencoder_two_subnetwork_fine_tuning(X_data1, X_data2, train_label, test1, test2, test_label)
#X_train1_tmp, X_test1_tmp, X_train2_tmp, X_test2_tmp, model = autoencoder_two_subnetwork_fine_tuning(train1, train2, train_label, test1, test2, test_label)
#model = autoencoder_two_subnetwork_fine_tuning(train1, train2, train_label, test1, test2, test_label)
#model = merge_seperate_network(train1, train2, train_label)
#proba = model.predict_proba([test1, test2])[:1]
real_labels = []
for val in test_label:
if val[0] == 1:
real_labels.append(0)
else:
real_labels.append(1)
all_labels = all_labels + real_labels
all_data_labels = real_labels + train_label_new
all_prefilter_data = np.vstack((prefilter_test_bef,prefilter_train_bef))
all_label2_data = np.vstack((test_label2.reshape(test_label2.shape[0],1),train_label2.reshape(train_label2.shape[0],1)))
#prefilter_train, new_scaler = preprocess_data(prefilter_train, stand =False)
#prefilter_test, new_scaler = preprocess_data(prefilter_test, scaler = new_scaler, stand = False)
true_data = np.hstack((train1[46529,:],train2[46529,:])) # 61713
#true_data = np.vstack((prefilter_train_bef[46529,:],prefilter_train_bef[64833,:])) # 61713
#false_data = np.vstack((prefilter_train_bef[46528,:],prefilter_train_bef[64834,:]))
false_data = np.hstack((train1[46528,:],train2[46529,:]))
#pdb.set_trace()
'''
prefilter_train1 = xgb.DMatrix( prefilter_train, label=train_label_new)
evallist = [(prefilter_train1, 'train')]
num_round = 10
clf = xgb.train( plst, prefilter_train1, num_round, evallist )
prefilter_test1 = xgb.DMatrix( prefilter_test)
ae_y_pred_prob = clf.predict(prefilter_test1)
'''
'''
tmp_aver = [0] * len(real_labels)
print 'deep autoencoder'
clf = RandomForestClassifier(n_estimators=50)
clf.fit(prefilter_train_bef, train_label_new)
ae_y_pred_prob = clf.predict_proba(prefilter_test_bef)[:,1]
all_prob[class_index] = all_prob[class_index] + [val for val in ae_y_pred_prob]
tmp_aver = [val1 + val2/3 for val1, val2 in zip(ae_y_pred_prob, tmp_aver)]
proba = transfer_label_from_prob(ae_y_pred_prob)
#pdb.set_trace()
acc, precision, sensitivity, specificity, MCC = calculate_performace(len(real_labels), proba, real_labels)
fpr, tpr, auc_thresholds = roc_curve(real_labels, ae_y_pred_prob)
auc_score = auc(fpr, tpr)
#scipy.io.savemat('deep',{'fpr':fpr,'tpr':tpr,'auc_score':auc_score})
## AUPR score add
precision1, recall, pr_threshods = precision_recall_curve(real_labels, ae_y_pred_prob)
aupr_score = auc(recall, precision1)
#scipy.io.savemat('deep_aupr',{'recall':recall,'precision':precision1,'aupr_score':aupr_score})
print acc, precision, sensitivity, specificity, MCC, auc_score, aupr_score
all_performance.append([acc, precision, sensitivity, specificity, MCC, auc_score, aupr_score])
'''
print 'deep autoencoder without fine tunning'
class_index = class_index + 1
#clf = RandomForestClassifier(n_estimators=50)
#pdb.set_trace()
#clf = KMeans(n_clusters=2, random_state=0).fit(prefilter_train_bef)
#clf = MiniBatchKMeans(n_clusters=2, init=np.vstack((false_data,true_data)),max_iter=1).fit(np.vstack((false_data,true_data)))
#clf = KMeans(n_clusters=2, init=np.vstack((false_data,true_data)),max_iter=1).fit(np.vstack((false_data,true_data)))
#clf.fit(prefilter_train_bef, train_label_new)
#ae_y_pred_prob = clf.predict(prefilter_test_bef)#[:,1]
pdb.set_trace()
#prefilter_train_bef2 = np.hstack((all_prefilter_data,all_label2_data))
prefilter_train_bef2 = np.hstack((prefilter_train_bef,y2.reshape(y2.shape[0],1)))
prefilter_test_bef2 = np.hstack((prefilter_test_bef,test_label2.reshape((test_label2.shape[0],1))))
#ae_y_pred_prob = last_layer_autoencoder(prefilter_train_bef2,all_data_labels, activation = 'sigmoid', batch_size = 100, nb_epoch = 100, last_dim = 2)
ae_y_pred_prob = last_layer_autoencoder(prefilter_train_bef2,all_data_labels, activation = 'sigmoid', batch_size = 100, nb_epoch = 100, last_dim = 2)
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('My')
i_tmp =0
for line_i in range(ae_y_pred_prob.shape[0]):
if round(ae_y_pred_prob[line_i,1],4) > 0.5:
worksheet.write(i_tmp,0,line_i)
worksheet.write(i_tmp,1,line_i/104)
worksheet.write(i_tmp,2,round(ae_y_pred_prob[line_i,1],4))
worksheet.write(i_tmp,3,line_i%104+1000)
worksheet.write(i_tmp,4,"Undirected")
i_tmp = i_tmp + 1
workbook.save('cluster_Workbook1.xls')
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('My')
i_tmp =0
for line_i in range(ae_y_pred_prob.shape[0]):
if round(ae_y_pred_prob[line_i,0],4) > 0.5:
worksheet.write(i_tmp,0,line_i)
worksheet.write(i_tmp,1,line_i/104)
worksheet.write(i_tmp,2,round(ae_y_pred_prob[line_i,0],4))
worksheet.write(i_tmp,3,line_i%104+1000)
worksheet.write(i_tmp,4,"Undirected")
i_tmp = i_tmp + 1
workbook.save('cluster_Workbook2.xls')
pdb.set_trace()
clf = KMeans(n_clusters=2, random_state=0).fit(prefilter_train_bef2)
#clf = KMeans(n_clusters=2, random_state=0).fit(all_prefilter_data)
#ae_y_pred_prob = clf.predict(prefilter_train_bef2)#(prefilter_train_bef2)
ae_y_pred_prob = clf.predict(prefilter_train_bef2)
'''
if ae_y_pred_prob[0][0] > ae_y_pred_prob[0][1]:
aha = 1
else:
aha = 0
'''
#pdb.set_trace()
proba = transfer_label_from_prob(ae_y_pred_prob)
#pdb.set_trace()
acc, precision, sensitivity, specificity, MCC = calculate_performace(len(all_data_labels), proba, all_data_labels)
fpr, tpr, auc_thresholds = roc_curve(all_data_labels, ae_y_pred_prob)
auc_score = auc(fpr, tpr)
#scipy.io.savemat('deep_without',{'fpr':fpr,'tpr':tpr,'auc_score':auc_score})
## AUPR score add
precision1, recall, pr_threshods = precision_recall_curve(all_data_labels, ae_y_pred_prob)
aupr_score = auc(recall, precision1)
#scipy.io.savemat('deep_without_aupr',{'recall':recall,'precision':precision1,'aupr_score':aupr_score})
print acc, precision, sensitivity, specificity, MCC, auc_score, aupr_score
all_performance_bef.append([acc, precision, sensitivity, specificity, MCC, auc_score, aupr_score])
print 'random forest using raw feature'
class_index = class_index + 1
prefilter_train = np.concatenate((train1, train2), axis = 1)
prefilter_test = np.concatenate((test1, test2), axis = 1)
#clf = RandomForestClassifier(n_estimators=50)
clf = AdaBoostClassifier(n_estimators=50)
#clf = DecisionTreeClassifier()
clf.fit(prefilter_train_bef, train_label_new)
ae_y_pred_prob = clf.predict_proba(prefilter_test_bef)[:,1]
all_prob[class_index] = all_prob[class_index] + [val for val in ae_y_pred_prob]
tmp_aver = [val1 + val2/3 for val1, val2 in zip(ae_y_pred_prob, tmp_aver)]
proba = transfer_label_from_prob(ae_y_pred_prob)
acc, precision, sensitivity, specificity, MCC = calculate_performace(len(real_labels), proba, real_labels)
fpr, tpr, auc_thresholds = roc_curve(real_labels, ae_y_pred_prob)
auc_score = auc(fpr, tpr)
scipy.io.savemat('raw',{'fpr':fpr,'tpr':tpr,'auc_score':auc_score})
## AUPR score add
precision1, recall, pr_threshods = precision_recall_curve(real_labels, ae_y_pred_prob)
aupr_score = auc(recall, precision1)
#scipy.io.savemat('raw_aupr',{'recall':recall,'precision':precision1,'aupr_score':aupr_score})
print acc, precision, sensitivity, specificity, MCC, auc_score, aupr_score
all_performance_rf.append([acc, precision, sensitivity, specificity, MCC, auc_score, aupr_score])
### Only RF
clf = RandomForestClassifier(n_estimators=50)
#clf = AdaBoostClassifier(n_estimators=50)
#clf = DecisionTreeClassifier()
clf.fit(prefilter_train_bef, train_label_new)
ae_y_pred_prob = clf.predict_proba(prefilter_test_bef)[:,1]
#all_prob[class_index] = all_prob[class_index] + [val for val in ae_y_pred_prob]
#tmp_aver = [val1 + val2/3 for val1, val2 in zip(ae_y_pred_prob, tmp_aver)]
proba = transfer_label_from_prob(ae_y_pred_prob)
acc, precision, sensitivity, specificity, MCC = calculate_performace(len(real_labels), proba, real_labels)
fpr, tpr, auc_thresholds = roc_curve(real_labels, ae_y_pred_prob)
auc_score = auc(fpr, tpr)
#scipy.io.savemat('raw',{'fpr':fpr,'tpr':tpr,'auc_score':auc_score})
## AUPR score add
precision1, recall, pr_threshods = precision_recall_curve(real_labels, ae_y_pred_prob)
aupr_score = auc(recall, precision1)
#scipy.io.savemat('raw_aupr',{'recall':recall,'precision':precision1,'aupr_score':aupr_score})
print "RF :", acc, precision, sensitivity, specificity, MCC, auc_score, aupr_score
## DNN
class_index = class_index + 1
prefilter_train = np.concatenate((train1, train2), axis = 1)
prefilter_test = np.concatenate((test1, test2), axis = 1)
model_DNN = DNN()
train_label_new_forDNN = np.array([[0,1] if i == 1 else [1,0] for i in train_label_new])
model_DNN.fit(prefilter_train,train_label_new_forDNN,batch_size=200,nb_epoch=20,shuffle=True,validation_split=0)
proba = model_DNN.predict_classes(prefilter_test,batch_size=200,verbose=True)
ae_y_pred_prob = model_DNN.predict_proba(prefilter_test,batch_size=200,verbose=True)
acc, precision, sensitivity, specificity, MCC = calculate_performace(len(real_labels), proba, real_labels)
fpr, tpr, auc_thresholds = roc_curve(real_labels, ae_y_pred_prob[:,1])
auc_score = auc(fpr, tpr)
scipy.io.savemat('raw_DNN',{'fpr':fpr,'tpr':tpr,'auc_score':auc_score})
## AUPR score add
precision1, recall, pr_threshods = precision_recall_curve(real_labels, ae_y_pred_prob[:,1])
aupr_score = auc(recall, precision1)
print "RAW DNN:",acc, precision, sensitivity, specificity, MCC, auc_score, aupr_score
all_performance_DNN.append([acc, precision, sensitivity, specificity, MCC, auc_score, aupr_score])
## SDA + DNN
class_index = class_index + 1
model_DNN = DNN2()
train_label_new_forDNN = np.array([[0,1] if i == 1 else [1,0] for i in train_label_new])
model_DNN.fit(prefilter_train_bef,train_label_new_forDNN,batch_size=200,nb_epoch=20,shuffle=True,validation_split=0)
proba = model_DNN.predict_classes(prefilter_test_bef,batch_size=200,verbose=True)
ae_y_pred_prob = model_DNN.predict_proba(prefilter_test_bef,batch_size=200,verbose=True)
acc, precision, sensitivity, specificity, MCC = calculate_performace(len(real_labels), proba, real_labels)
fpr, tpr, auc_thresholds = roc_curve(real_labels, ae_y_pred_prob[:,1])
auc_score = auc(fpr, tpr)
scipy.io.savemat('SDA_DNN',{'fpr':fpr,'tpr':tpr,'auc_score':auc_score})
## AUPR score add
precision1, recall, pr_threshods = precision_recall_curve(real_labels, ae_y_pred_prob[:,1])
aupr_score = auc(recall, precision1)
print "SDADNN :",acc, precision, sensitivity, specificity, MCC, auc_score, aupr_score
all_performance_SDADNN.append([acc, precision, sensitivity, specificity, MCC, auc_score, aupr_score])
pdb.set_trace()
print 'mean performance of deep autoencoder'
print np.mean(np.array(all_performance), axis=0)
print '---' * 50
print 'mean performance of deep autoencoder without fine tunning'
print np.mean(np.array(all_performance_bef), axis=0)
print '---' * 50
print 'mean performance of ADA using raw feature'
print np.mean(np.array(all_performance_rf), axis=0)
print '---' * 50
print 'mean performance of DNN using raw feature'
print np.mean(np.array(all_performance_DNN), axis=0)
print '---' * 50
print 'mean performance of SDA DNN'
print np.mean(np.array(all_performance_SDADNN), axis=0)
#print 'mean performance of stacked ensembling'
#print np.mean(np.array(all_performance_blend), axis=0)
#print '---' * 50
fileObject = open('resultListAUC_aupr_ADA5_inter2.txt', 'w')
for i in all_performance:
k=' '.join([str(j) for j in i])
fileObject.write(k+"\n")
fileObject.write('\n')
for i in all_performance_bef:
k=' '.join([str(j) for j in i])
fileObject.write(k+"\n")
fileObject.write('\n')
for i in all_performance_rf:
k=' '.join([str(j) for j in i])
fileObject.write(k+"\n")
fileObject.write('\n')
for i in all_performance_DNN:
k=' '.join([str(j) for j in i])
fileObject.write(k+"\n")
fileObject.write('\n')
for i in all_performance_SDADNN:
k=' '.join([str(j) for j in i])
fileObject.write(k+"\n")
#for i in all_performance_blend:
# k=' '.join([str(j) for j in i])
# fileObject.write(k+"\n")
fileObject.close()
def transfer_label_from_prob(proba):
label = [1 if val>=0.9 else 0 for val in proba]
return label
if __name__=="__main__":
DeepInteract()
| gpl-3.0 |
gfyoung/pandas | pandas/tests/generic/test_generic.py | 2 | 16480 | from copy import copy, deepcopy
import numpy as np
import pytest
from pandas.core.dtypes.common import is_scalar
from pandas import DataFrame, Series
import pandas._testing as tm
# ----------------------------------------------------------------------
# Generic types test cases
class Generic:
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
"""
construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed
"""
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if is_scalar(value):
if value == "empty":
arr = None
dtype = np.float64
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
idx = list("ABCD")
# relabeling values passed into self.rename
args = [
str.lower,
{x: x.lower() for x in idx},
Series({x: x.lower() for x in idx}),
]
for axis in self._axes():
kwargs = {axis: idx}
obj = self._construct(4, **kwargs)
for arg in args:
# rename a single axis
result = obj.rename(**{axis: arg})
expected = obj.copy()
setattr(expected, axis, list("abcd"))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {
self._typ._get_axis_name(i): list(range(n)) for i in range(self._ndim)
}
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value="empty", **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
msg = f"The truth value of a {self._typ.__name__} is ambiguous"
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
obj = self._construct(shape=4, value=1)
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
obj = self._construct(shape=4, value=np.nan)
with pytest.raises(ValueError, match=msg):
bool(obj == 0)
with pytest.raises(ValueError, match=msg):
bool(obj == 1)
with pytest.raises(ValueError, match=msg):
bool(obj)
# empty
obj = self._construct(shape=0)
with pytest.raises(ValueError, match=msg):
bool(obj)
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
with pytest.raises(ValueError, match=msg):
if obj1:
pass
with pytest.raises(ValueError, match=msg):
obj1 and obj2
with pytest.raises(ValueError, match=msg):
obj1 or obj2
with pytest.raises(ValueError, match=msg):
not obj1
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._mgr = o._mgr.downcast()
self._compare(result, o)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._mgr = o._mgr.downcast()
self._compare(result, o)
def test_constructor_compound_dtypes(self):
# see gh-5191
# Compound dtypes should raise NotImplementedError.
def f(dtype):
return self._construct(shape=3, value=1, dtype=dtype)
msg = (
"compound dtypes are not implemented "
f"in the {self._typ.__name__} constructor"
)
with pytest.raises(NotImplementedError, match=msg):
f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")])
# these work (though results may be unexpected)
f("int64")
f("float64")
f("M8[ns]")
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
assert v is None
else:
assert v == getattr(y, m, None)
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = "foo"
o2 = self._construct(shape=3)
o2.name = "bar"
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
result = getattr(o, op)(o)
self.check_metadata(o, result)
# simple boolean
for op in ["__eq__", "__le__", "__ge__"]:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
self.check_metadata(o, v1 & v1)
self.check_metadata(o, v1 | v1)
# combine_first
result = o.combine_first(o2)
self.check_metadata(o, result)
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
result = o + o2
self.check_metadata(result)
# simple boolean
for op in ["__eq__", "__le__", "__ge__"]:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
self.check_metadata(v1 & v2)
self.check_metadata(v1 | v2)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
assert o.size == np.prod(o.shape)
assert o.size == 10 ** len(o.axes)
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
assert len(np.array_split(o, 5)) == 5
assert len(np.array_split(o, 2)) == 2
# See gh-12301
def test_stat_unexpected_keyword(self):
obj = self._construct(5)
starwars = "Star Wars"
errmsg = "unexpected keyword"
with pytest.raises(TypeError, match=errmsg):
obj.max(epic=starwars) # stat_function
with pytest.raises(TypeError, match=errmsg):
obj.var(epic=starwars) # stat_function_ddof
with pytest.raises(TypeError, match=errmsg):
obj.sum(epic=starwars) # cum_function
with pytest.raises(TypeError, match=errmsg):
obj.any(epic=starwars) # logical_function
@pytest.mark.parametrize("func", ["sum", "cumsum", "any", "var"])
def test_api_compat(self, func):
# GH 12021
# compat for __name__, __qualname__
obj = self._construct(5)
f = getattr(obj, func)
assert f.__name__ == func
assert f.__qualname__.endswith(func)
def test_stat_non_defaults_args(self):
obj = self._construct(5)
out = np.array([0])
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
obj.max(out=out) # stat_function
with pytest.raises(ValueError, match=errmsg):
obj.var(out=out) # stat_function_ddof
with pytest.raises(ValueError, match=errmsg):
obj.sum(out=out) # cum_function
with pytest.raises(ValueError, match=errmsg):
obj.any(out=out) # logical_function
def test_truncate_out_of_bounds(self):
# GH11382
# small
shape = [2000] + ([1] * (self._ndim - 1))
small = self._construct(shape, dtype="int8", value=1)
self._compare(small.truncate(), small)
self._compare(small.truncate(before=0, after=3e3), small)
self._compare(small.truncate(before=-1, after=2e3), small)
# big
shape = [2_000_000] + ([1] * (self._ndim - 1))
big = self._construct(shape, dtype="int8", value=1)
self._compare(big.truncate(), big)
self._compare(big.truncate(before=0, after=3e6), big)
self._compare(big.truncate(before=-1, after=2e6), big)
@pytest.mark.parametrize(
"func",
[copy, deepcopy, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],
)
@pytest.mark.parametrize("shape", [0, 1, 2])
def test_copy_and_deepcopy(self, shape, func):
# GH 15444
obj = self._construct(shape)
obj_copy = func(obj)
assert obj_copy is not obj
self._compare(obj_copy, obj)
class TestNDFrame:
# tests that don't fit elsewhere
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]:
tm.assert_series_equal(s.squeeze(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.squeeze(), df)
# squeezing
df = tm.makeTimeDataFrame().reindex(columns=["A"])
tm.assert_series_equal(df.squeeze(), df["A"])
# don't fail with 0 length dimensions GH11229 & GH8999
empty_series = Series([], name="five", dtype=np.float64)
empty_frame = DataFrame([empty_series])
tm.assert_series_equal(empty_series, empty_series.squeeze())
tm.assert_series_equal(empty_series, empty_frame.squeeze())
# axis argument
df = tm.makeTimeDataFrame(nper=1).iloc[:, :1]
assert df.shape == (1, 1)
tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0])
tm.assert_series_equal(df.squeeze(axis="columns"), df.iloc[:, 0])
assert df.squeeze() == df.iloc[0, 0]
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.squeeze(axis=2)
msg = "No axis named x for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.squeeze(axis="x")
df = tm.makeTimeDataFrame(3)
tm.assert_frame_equal(df.squeeze(axis=0), df)
def test_numpy_squeeze(self):
s = tm.makeFloatSeries()
tm.assert_series_equal(np.squeeze(s), s)
df = tm.makeTimeDataFrame().reindex(columns=["A"])
tm.assert_series_equal(np.squeeze(df), df["A"])
def test_transpose(self):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]:
# calls implementation in pandas/core/base.py
tm.assert_series_equal(s.transpose(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.transpose().transpose(), df)
def test_numpy_transpose(self, frame_or_series):
obj = tm.makeTimeDataFrame()
if frame_or_series is Series:
obj = obj["A"]
if frame_or_series is Series:
# 1D -> np.transpose is no-op
tm.assert_series_equal(np.transpose(obj), obj)
# round-trip preserved
tm.assert_equal(np.transpose(np.transpose(obj)), obj)
msg = "the 'axes' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.transpose(obj, axes=1)
def test_take(self):
indices = [1, 5, -2, 6, 3, -1]
for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]:
out = s.take(indices)
expected = Series(
data=s.values.take(indices), index=s.index.take(indices), dtype=s.dtype
)
tm.assert_series_equal(out, expected)
for df in [tm.makeTimeDataFrame()]:
out = df.take(indices)
expected = DataFrame(
data=df.values.take(indices, axis=0),
index=df.index.take(indices),
columns=df.columns,
)
tm.assert_frame_equal(out, expected)
def test_take_invalid_kwargs(self, frame_or_series):
indices = [-3, 2, 0, 1]
obj = tm.makeTimeDataFrame()
if frame_or_series is Series:
obj = obj["A"]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
obj.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
obj.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
obj.take(indices, mode="clip")
@pytest.mark.parametrize("is_copy", [True, False])
def test_depr_take_kwarg_is_copy(self, is_copy, frame_or_series):
# GH 27357
obj = DataFrame({"A": [1, 2, 3]})
if frame_or_series is Series:
obj = obj["A"]
msg = (
"is_copy is deprecated and will be removed in a future version. "
"'take' always returns a copy, so there is no need to specify this."
)
with tm.assert_produces_warning(FutureWarning) as w:
obj.take([0, 1], is_copy=is_copy)
assert w[0].message.args[0] == msg
def test_axis_classmethods(self, frame_or_series):
box = frame_or_series
obj = box(dtype=object)
values = box._AXIS_TO_AXIS_NUMBER.keys()
for v in values:
assert obj._get_axis_number(v) == box._get_axis_number(v)
assert obj._get_axis_name(v) == box._get_axis_name(v)
assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v)
def test_axis_names_deprecated(self, frame_or_series):
# GH33637
box = frame_or_series
obj = box(dtype=object)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
obj._AXIS_NAMES
def test_axis_numbers_deprecated(self, frame_or_series):
# GH33637
box = frame_or_series
obj = box(dtype=object)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
obj._AXIS_NUMBERS
def test_flags_identity(self, frame_or_series):
obj = Series([1, 2])
if frame_or_series is DataFrame:
obj = obj.to_frame()
assert obj.flags is obj.flags
obj2 = obj.copy()
assert obj2.flags is not obj.flags
def test_slice_shift_deprecated(self, frame_or_series):
# GH 37601
obj = DataFrame({"A": [1, 2, 3, 4]})
if frame_or_series is DataFrame:
obj = obj["A"]
with tm.assert_produces_warning(FutureWarning):
obj.slice_shift()
| bsd-3-clause |
zaxliu/deepnap | experiments/kdd-exps/experiment_message_2016-6-13_G5_BUF2_FR20_1_legacy.py | 1 | 4370 | # System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_file_name = 'message_2016-6-13_G5_BUF2_FR20_1.log'
# Composite classes
class Phi_QAgentNN(PhiMixin, QAgentNN):
def __init__(self, **kwargs):
super(Phi_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.5, 0.9
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
phi_length = 5
dim_state = (1, phi_length, 3+2)
range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
range_state = [[range_state_slice]*phi_length]
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size = 1, 400
reward_scaling, reward_scaling_update = 20, 'fixed'
batch_size, update_period, freeze_period, rs_period = 100, 4, 16, 32
# |- Env model
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, 0.0
beta = 0.7
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- Env
# |- Time
start_time = pd.to_datetime('2014-09-25 09:20:00')
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_dh3.dat',
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
agent = Phi_QAgentNN(
phi_length=phi_length,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| bsd-3-clause |
devanshdalal/scikit-learn | sklearn/svm/tests/test_bounds.py | 49 | 2386 | import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.utils.testing import assert_true, raises
from sklearn.utils.testing import assert_raise_message
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
# loss='l2' should raise ValueError
assert_raise_message(ValueError, "loss type not in",
l1_min_c, dense_X, Y1, "l2")
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
SEMCOG/synthpop | synthpop/ipf/ipf.py | 3 | 2483 | import numpy as np
import pandas as pd
def calculate_constraints(
marginals, joint_dist, tolerance=1e-3, max_iterations=1000):
"""
Calculate constraints on household or person classes using
single category marginals and the observed class proportions
in a population sample.
Constraints are calculated via an iterative proportional fitting
procedure.
Parameters
----------
marginals : pandas.Series
The total count of each observed subcategory tracked.
This should have a pandas.MultiIndex with the outer level containing
high-level category descriptions and the inner level containing
the individual subcategory breakdowns.
joint_dist : pandas.Series
The observed counts of each household or person class in some sample.
The index will be a pandas.MultiIndex with a level for each observed
class in the sample. The levels should be named for ease of
introspection.
tolerance : float, optional
The condition for stopping the IPF procedure. If the change in
constraints is less than or equal to this value after an iteration
the calculations are stopped.
max_iterations : int, optional
Maximum number of iterations to do before stopping and raising
an exception.
Returns
-------
constraints : pandas.Series
Will have the index of `joint_dist` and contain the desired
totals for each class.
iterations : int
Number of iterations performed.
"""
flat_joint_dist = joint_dist.reset_index()
constraints = joint_dist.values.copy().astype('float')
prev_constraints = constraints.copy()
prev_constraints += tolerance # ensure we run at least one iteration
def calc_diff(x, y):
return np.abs(x - y).sum()
iterations = 0
list_of_loc = [
((flat_joint_dist[idx[0]] == idx[1]).values, marginals[idx])
for idx in marginals.index
]
while calc_diff(constraints, prev_constraints) > tolerance:
prev_constraints[:] = constraints
for loc, target in list_of_loc:
constraints[loc] *= target / constraints[loc].sum()
iterations += 1
if iterations > max_iterations:
raise RuntimeError(
'Maximum number of iterations reached during IPF: {}'.format(
max_iterations))
return pd.Series(constraints, index=joint_dist.index), iterations
| bsd-3-clause |
zihua/scikit-learn | sklearn/mixture/gmm.py | 11 | 32100 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
# Important note for the deprecation cleaning of 0.20 :
# All the functions and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
# - 'sklearn/mixture/test_gmm.py'
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
@deprecated("The function log_multivariate_normal_density is deprecated in 0.18"
" and will be removed in 0.20.")
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
@deprecated("The function sample_gaussian is deprecated in 0.18"
" and will be removed in 0.20."
" Use numpy.random.multivariate_normal instead.")
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class _GMMBase(BaseEstimator):
"""Gaussian Mixture Model.
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. The best results is kept.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the Dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) # doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) # doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance."""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: Due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate.
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
"""Perform the Mstep of the EM algorithm and return the cluster weights.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
@deprecated("The class GMM is deprecated in 0.18 and will be "
" removed in 0.20. Use class GaussianMixture instead.")
class GMM(_GMMBase):
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
super(GMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
random_state=random_state, tol=tol, min_covar=min_covar,
n_iter=n_iter, n_init=n_init, params=params,
init_params=init_params, verbose=verbose)
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model."""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model."""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if cv.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model."""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values."""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
@deprecated("The functon distribute_covar_matrix_to_match_covariance_type"
"is deprecated in 0.18 and will be removed in 0.20.")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for diagonal cases."""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Perform the covariance M step for spherical cases."""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for full cases."""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for tied cases."""
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
chaxor/Spectro | Spectro.py | 1 | 42248 | ## Spectro
## =======
## This program analyzes UV-VIS absorption spectra from aqueous surfactant suspended dispersions of carbon nanotubes.
## It does so by fitting absorption profile models from the literature with a linear regression at each step of a non-linear regression fitting of the background (amorphous carbon and pi plasmon resonances) model.
##
## This program is written using python 2.7 with a Qt4 Gui and has the following dependencies:
##
## pythonxy:
## http://www.mirrorservice.org/sites/pythonxy.com/Python(x,y)-2.7.6.1.exe
##
## lmfit:
## https://pypi.python.org/packages/2.7/l/lmfit/lmfit-0.7.4.win32-py2.7.exe
##
## This program was written by Chase Brown and is under the GNU General Public License.
from PyQt4 import QtCore, QtGui
import sys
from math import factorial
import matplotlib
matplotlib.use('Qt4Agg')
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
import numpy as np
from scipy import *
import scipy
import lmfit
import multiprocessing as mp
import time
import datetime
import xlsxwriter
import operator
##
## CONSTANTS
##
# A value of 1 for APP_SCREEN_RATIO will open the program in a 'maximized' type of position.
APP_SCREEN_RATIO = 0.7
# Due to the Global Interepreter Lock (GIL) in python, we have to switch the main thread from updating the GUI front end to doing calculations in the back end
UPDATE_TIME_IN_MILLISECONDS = 300
# E=hc/lambda , and hc=1240 eV*nm
WAVELENGTH_TO_ENERGY_CONVERSION = 1240.0 # eV*nm
# Using Dresselhaus' convention for carbon nanotubes (CNTs), (n,m), we can create all CNTs within two (n,m) values
lowestNM = 5
highestNM = 12
## Pi Plasmon ##
# Starting pi plasmon amplitude as a ratio of the absorbance at the peak center
AMP_RATIO_PI = 0.6
# Starting peak center for the pi plasmon
PI_PLASMON_CENTER = 5.6
# Allowable variance in pi plasmon center
PI_PLASMON_CENTER_VAR = 0.6
PI_PLASMON_CENTER_MAX = (PI_PLASMON_CENTER + PI_PLASMON_CENTER_VAR)
PI_PLASMON_CENTER_MIN = (PI_PLASMON_CENTER - PI_PLASMON_CENTER_VAR)
# Full width at half maximum for pi plasmon
PI_PLASMON_FWHM = 0.6
PI_PLASMON_FWHM_MAX = 5.0
PI_PLASMON_FWHM_MIN = 0.1
## Graphite Lorentzian ##
# Starting graphite amplitude as a ratio of the absorbance at the peak center
AMP_RATIO_GRAPHITE = 0.5
# Starting peak center for the graphite
GRAPHITE_CENTER = 4.1
# Allowable variance in graphite
GRAPHITE_CENTER_VAR = 0.4
GRAPHITE_CENTER_MAX = (GRAPHITE_CENTER + GRAPHITE_CENTER_VAR)
GRAPHITE_CENTER_MIN = (GRAPHITE_CENTER - GRAPHITE_CENTER_VAR)
# Full width at half maximum for graphite
GRAPHITE_FWHM = 0.6
GRAPHITE_FWHM_MAX = 5.0
GRAPHITE_FWHM_MIN = 0.5
# Extinction coefficients for types of amorphous carbon
# Source: "Analyzing Absorption Backgrounds in Single-Walled Carbon Nanotube Spectra" (Anton V. Naumov, Saunab Ghosh, Dmitri A. Tsyboulski, Sergei M. Bachilo, and R. Bruce Weisman)
alpha_N134 = 0.155 # L/mg
alpha_aCB = 0.082 # L/mg
b_N134 = 0.0030 # nm^-1
b_aCB = 0.00155 # nm^-1
# Metallic Background coefficients
alpha_METAL = 0.048 # L/mg
b_METAL = 0.00155 # nm^-1
# Energy Transitions given by Reference 2: "An atlas of carbon nanotube optical transitions"
# Equation used for energy transitions can be found in the supplementary information
# Below is the list of anisotropy prefactors and fermi velocity renormalization constants used in the equation
# Supporting Info:
beta = -0.620 # eV * nm
alpha = [1.532, 1.474, 1.504, 1.556, 1.560, 1.576, 1.588, 1.593, 1.596, 1.608] # eV * nm
eta = [0.148, 0.097, 0.068, 0.058, 0.058, 0.061, 0.050, 0.052, 0.058, 0.058] # eV * nm^2
gamma = [-0.056,-0.009,-0.002,0.014, 0.016, 0.009, 0.000, 0.000, 0.011, 0.004] # eV * nm^2
# Main Paper:
beta_mainpaper = -0.173 # eV * nm^2
eta_mainpaper = [0.142, 0.097, 0.068, 0.058, 0.058, 0.058, 0.047, 0.052, 0.047, 0.054] # eV * nm^2
vF_mainpaper = [1.229, 1.152, 1.176, 1.221, 1.226, 1.236, 1.241, 1.244, 1.248, 1.256] # 10^6 m s^-1
## Useful functions for getting data from arbitrary data files
## Since most data files contain contiguous sets of data in columns, these functions extract the data and disregard the other information
def retrieve_XY(file_path):
# XY data is read in from a file in text format
file_data = open(file_path).readlines()
# The list of strings (lines in the file) is made into a list of lists while splitting by whitespace and removing commas
file_data = map(lambda line: line.rstrip('\n').replace(',',' ').split(), file_data)
# Remove empty lists, make into numpy array
xy_array = np.array(filter(None, file_data))
# Each line is searched to make sure that all items in the line are a number
where_num = np.array(map(is_number, xy_array))
# The data is searched for the longest contiguous chain of numbers
contig = contiguous_regions(where_num)
try:
# Data lengths
data_lengths = contig[:,1] - contig[:,0]
# All maximums in contiguous data
maxs = np.amax(data_lengths)
longest_contig_idx = np.where(data_lengths == maxs)
except ValueError:
print 'Problem finding contiguous data'
return np.array([])
# Starting and stopping indices of the contiguous data are stored
ss = contig[longest_contig_idx]
# The file data with this longest contiguous chain of numbers
# Float must be cast to each value in the lists of the contiguous data and cast to a numpy array
longest_data_chains = np.array([[map(float, n) for n in xy_array[i[0]:i[1]]] for i in ss])
# If there are multiple sets of data of the same length, they are added in columns
column_stacked_data_chain = np.hstack(longest_data_chains)
return column_stacked_data_chain
#http://stackoverflow.com/questions/4494404/find-large-number-of-consecutive-values-fulfilling-condition-in-a-numpy-array
def contiguous_regions(condition):
"""Finds contiguous True regions of the boolean array "condition". Returns
a 2D array where the first column is the start index of the region and the
second column is the end index."""
# Find the indicies of changes in "condition"
d = np.diff(condition)
idx, = d.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, condition.size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
return idx
def is_number(s):
try:
np.float64(s)
return True
except ValueError:
return False
## Useful functions for spectral processing
heaviside = lambda x: 0.5 * (np.sign(x) + 1)
# https://gist.github.com/RyanHope/2321077
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
# Optical transition from i (valence) to j (conduction)
# Metallics may have a splitting between high and low energies (trigonal warping effect)
class Transition:
def __init__(self, swcnt, i, j=None, k = 0):
self.swcnt = swcnt
self.i = i
self.k = k
if j is not None:
self.j = j
else:
self.j = i
# Solvents affect optical transitions (redshifting or blueshifting) due to differences in dielectric constants and refractive indexes, etc.
# Silvera-Batista, Carlos A., Randy K. Wang, Philip Weinberg, and Kirk J. Ziegler.
# "Solvatochromic shifts of single-walled carbon nanotubes in nonpolar microenvironments."
# Physical Chemistry Chemical Physics 12, no. 26 (2010): 6990-6998.
self.epsilon = self.swcnt.spectra.epsilon
self.eta = self.swcnt.spectra.eta
self.Dswcntsolvent = self.swcnt.spectra.Dswcntsolvent
# Calculate the properties of the transition and store them
self.p = self.p()
self.k_p = self.k_p()
self.theta_p = self.theta_p()
self.Eii = self.Eii()
self.fs = self.fs()
self.FWHM = self.FWHM()
self.a = self.a()
self.b = self.b()
self.delta = self.delta()
self.shape = 1.0
self.amp = self.fs
if self.swcnt.spectra.X is not None:
E = self.swcnt.spectra.X
else:
E = np.array([0])
# Absorption profile from:
# Liu, Kaihui, Xiaoping Hong, Sangkook Choi, Chenhao Jin, Rodrigo B. Capaz, Jihoon Kim, Shaul Aloni et al.
# "Systematic Determination of Absolute Absorption Cross-section of Individual Carbon Nanotubes."
# arXiv preprint arXiv:1311.3328 (2013).
self.line = self.fs/np.pi * \
self.FWHM/((E-self.Eii)**2+self.FWHM**2) + \
self.fs/(self.a*np.pi) * \
np.convolve( (self.b*self.FWHM)/(E**2+(self.b*self.FWHM)**2), \
(heaviside(E-(self.Eii + self.delta))/np.sqrt(abs(E-(self.Eii + self.delta))))
, mode='same')
# Energy Transition Functions from Reference 2:
# Optical transition index 'p'
def p(self):
if(self.swcnt.mod_type==0.): p = 3.*self.i
elif(self.swcnt.mod_type==1. or self.swcnt.mod_type==2.):
if(self.i%2.==0.): p = self.i+(self.i/2.)-1. # If i is even
elif(self.i%2.==1.): p = self.i+int(self.i/2.) # If i is odd
else: print("Error in electronic type")
p = int(p)
return p
# Length of polar coordinates vector from the K point (Reference 2):
def k_p(self):
k_p = 2.*self.p/(3.*self.swcnt.dt)
return k_p
# Angle of wave vector around K point in radians
def theta_p(self):
theta_p = []
if(self.swcnt.mod_type==0.):
if(self.k==0):
# lower energy sub band
theta_p = self.swcnt.theta + np.pi
if(self.k==1):
# higher energy sub band
theta_p = self.swcnt.theta
elif(self.swcnt.mod_type==1.):
theta_p = self.swcnt.theta + self.i*np.pi
elif(self.swcnt.mod_type==2.):
theta_p = self.swcnt.theta + (self.i+1.)*np.pi
return theta_p
# Energy Optical Transitions from Valence Band (i) to Conduction Band (i) given by Reference 2: "An atlas of carbon nanotube optical transitions"
# Equation used for energy transitions can be found in the supplementary information of Reference 2
def Eii_vacc(self):
theta_p = self.theta_p
k_p = self.k_p
p = self.p
# Supporting info algorithm
Eii = alpha[p-1]*k_p + beta*k_p*np.log10(1.5*k_p) + (k_p**2.)*(eta[p-1]+gamma[p-1]*np.cos(theta_p*3.))*np.cos(theta_p * 3.)
return Eii
def Eii(self):
Eii_vacc = self.Eii_vacc()
# Silvera-Batista, Carlos A., Randy K. Wang, Philip Weinberg, and Kirk J. Ziegler.
# "Solvatochromic shifts of single-walled carbon nanotubes in nonpolar microenvironments."
# Physical Chemistry Chemical Physics 12, no. 26 (2010): 6990-6998.
Eii_shift = -self.Dswcntsolvent*self.Onsager()/(Eii_vacc**3.*self.swcnt.dt**5.)
Eii = Eii_vacc + Eii_shift
return Eii
def Onsager(self):
fe = 2.*(self.epsilon-1.)/(2*self.epsilon+1)
fn = 2.*(self.eta**2-1.)/(2*self.eta**2+1)
return fe - fn
# Optical osciallator strength per atom for semiconducting tubes
# Liu, Kaihui, Xiaoping Hong, Sangkook Choi, Chenhao Jin, Rodrigo B. Capaz, Jihoon Kim, Shaul Aloni et al.
# "Systematic Determination of Absolute Absorption Cross-section of Individual Carbon Nanotubes."
# arXiv preprint arXiv:1311.3328 (2013).
def fs(self):
return 45.9/((self.p + 7.5)*self.swcnt.dt)
# Full width at half maximum
def FWHM(self):
if(self.swcnt.electronic_type!=0):
FWHM = 0.0194 * self.Eii
else:
FWHM = 0.0214 * self.Eii
return FWHM
def a(self):
if(self.swcnt.electronic_type!=0):
a = 4.673 - 0.747 * self.swcnt.dt
else:
a = 0.976 + 0.186 * self.swcnt.dt
return a
def b(self):
if(self.swcnt.electronic_type!=0):
b = 0.97 + 0.256 * self.swcnt.dt
else:
b = 3.065 - 0.257 * self.swcnt.dt
return b
def delta(self):
if(self.swcnt.electronic_type!=0):
delta = 0.273 - 0.041 * self.swcnt.dt
else:
delta = 0.175 - 0.0147 * self.swcnt.dt
return delta
# Returns a string which gives information about the transition
def transition_string(self):
NM_Eii_string = "SWCNT" + self.swcnt.NM() + '_E' + str(self.i) + str(self.j)
if(self.swcnt.electronic_type!=0):
transition_string = NM_Eii_string
if(self.swcnt.electronic_type==0):
if(self.k==0):
transition_string = NM_Eii_string + "_low"
if(self.k==1):
transition_string = NM_Eii_string + "_high"
else:
transition_string = NM_Eii_string
return transition_string
# Single-walled carbon nanotube object
# each SWCNT is defined by it's characteristic (n,m) chirality
class SWCNT:
def __init__(self, n, m, spectra):
n = float(n)
m = float(m)
self.n = n
self.m = m
if spectra is not None:
self.spectra = spectra
else:
self.spectra = Spectra('default', np.zeros(0), np.zeros(0))
# Electronic type test:
# 0 - metallic
# 1 - semiconducting type 1 (S1)
# 2 - semiconducting type 2 (S2)
self.electronic_type = np.mod(2.*n + m,3.)
# Alternative nomenclature:
# 0 - metallic
# 1 - mod type 1 <=> semiconducting type 2 (MOD1 <=> S2)
# 2 - mod type 2 <=> semiconducting type 1 (MOD2 <=> S1)
self.mod_type = np.mod(n - m,3.)
# Basic Nanotube Properties from Reference 1:
# Chiral vector length in angstroms (1.421 is the C-C distance in angstroms)
self.Ch = np.sqrt(3.0)*1.421*np.sqrt(n**2. + m**2. + n*m)
# CNT diameter in angstroms (/10. --> nanometers)
self.dt = self.Ch/np.pi/10.
# Chiral angle in radians
self.theta = np.arctan(np.sqrt(3.)*m/(m + 2.*n))
# Consider S11, S22, S33, S44, S55, S66, S77 and M11 M22, M33, with high and low tranisitions for metals
upper_ij_metal = 3
upper_ij_sc = 7
if(self.electronic_type==0):
if(self.n==self.m):
self.allowed_transitions = [Transition(self, i) for i in range(1,upper_ij_metal+1)]
else:
self.allowed_transitions = [Transition(self, i, i, k) for i in range(1,upper_ij_metal+1) for k in range(0,2)]
else:
self.allowed_transitions = [Transition(self, i) for i in range(1,upper_ij_sc+1)]
# A model line for each nanotube is the sum of each of the transitions that can occur for the nanotube
@property
def line(self):
arrays = np.array([transition.line for transition in self.allowed_transitions])
return np.sum(arrays, axis=0)
# Other useful functions for debugging and documentation
def print_electronic_type(self):
if(self.mod_type==0): return "Metallic"
elif(self.mod_type==1 or self.mod_type==2): return "Semiconducting"
else: return "Error in n,m indices"
# Returns the Dresselhaus nomenclature "(n,m)" for each nanotube
def strNM(self):
string_tube = "(" + str(np.int(self.n)) + "," + str(np.int(self.m)) + ")"
return string_tube
# For paramters, we cannot store "(,)" symbols
# So this is just a string of "NM" - such as "66" for "(6,6)"
def NM(self):
NM = str(np.int(self.n)).rstrip(".") + str(np.int(self.m)).rstrip(".")
return NM
# Psuedo Voigts can create lorentz and Guassian functions or a convolution of both
# It is useful for background creation
def pseudoVoigt(x, amp, center, width, shapeFactor):
LorentzPortion = (width**2./((x-center)**2.+width**2.))
GaussianPortion = np.e**(-((x-center)**2./(2.*width**2.)))
try:
Voigt = amp*(shapeFactor*LorentzPortion+(1.-shapeFactor)*GaussianPortion)
except ZeroDivisionError:
width += 0.001
pseudoVoigt(x, amp, center, width, shapeFactor)
return Voigt
# Function to create all of the tubes and store them in a list
def initialize_SWCNTs(lowestNM, highestNM, spectra):
SWCNTs=[]
# Create a list of all tube species we are interested in
for n in range(0,highestNM+1):
for m in range(0,highestNM+1):
if(n<lowestNM and m<lowestNM): break
elif(n<m): break
else: SWCNTs.append(SWCNT(n, m, spectra))
return SWCNTs
# Spectra object which holds the X and Y data and the sample name
class Spectra(QtCore.QObject):
# Signal/slot setup uses signals to tell the program when to update the GUI
update_signal = QtCore.pyqtSignal(QtCore.QObject)
done_signal = QtCore.pyqtSignal()
def __init__(self, spectra_name, X, Y):
QtCore.QObject.__init__(self)
self.spectra_name = spectra_name
self.X = X
# The given spectra is smoothed using the savitsky golay smoothing algorithm
self.Y = savitzky_golay(y=Y,window_size=5, order=2)
self.model = Y*0
self.background_model = Y*0
self.model_without_background = Y*0
self.step = 0
# Solvents affect optical transitions (redshifting or blueshifting) due to differences in dielectric constants and refractive indexes, etc.
# Silvera-Batista, Carlos A., Randy K. Wang, Philip Weinberg, and Kirk J. Ziegler.
# "Solvatochromic shifts of single-walled carbon nanotubes in nonpolar microenvironments."
# Physical Chemistry Chemical Physics 12, no. 26 (2010): 6990-6998.
self.epsilon = 2.3
self.eta = 1.33
self.Dswcntsolvent = 0.09 # eV^4*nm^5
# All the single-walled carbon nanotubes to be used in the deconvolution process
# The list will be an array of SWCNTs from (n,m)=(lowestNM, 0) to (n,m)=(highestNM,highestNM)
self.SWCNT_list = initialize_SWCNTs(lowestNM, highestNM, self)
self.transition_list = [transition for swcnt in self.SWCNT_list for transition in swcnt.allowed_transitions] #if(self.in_spectrum(transition)==True)]
# First, create our SWCNT profile matrix
self.SWCNT_matrix = np.matrix(np.column_stack([swcnt.line for swcnt in self.SWCNT_list]))
self.swcnts_soln = np.ones(self.SWCNT_matrix.shape[1], dtype=np.float64)
self.params = lmfit.Parameters()
self.species_percentage_dictionary = {}
self.species_percentage_error_dictionary = {}
self.metallic_percentage = 0.0
self.mean_diameter = 0.0
graphite_amp = AMP_RATIO_GRAPHITE * np.interp(GRAPHITE_CENTER, self.X, self.Y)
PP_amp = AMP_RATIO_PI * np.interp(PI_PLASMON_CENTER, self.X, self.Y)
self.bkg_soln = np.array([0.0, 0.0, 0.0, 0.0, \
graphite_amp, GRAPHITE_CENTER, GRAPHITE_FWHM, \
PP_amp, PI_PLASMON_CENTER, PI_PLASMON_FWHM])
self.bkg_soln_bounds = np.array([(0.0,None), (0.0, None), (0.0, None), (0.0, None), \
(0.0, None), (GRAPHITE_CENTER_MIN, GRAPHITE_CENTER_MAX), (GRAPHITE_FWHM_MIN, GRAPHITE_FWHM_MAX), \
(0.0, None), (PI_PLASMON_CENTER_MIN, PI_PLASMON_CENTER_MAX), (PI_PLASMON_FWHM_MIN, PI_PLASMON_FWHM_MAX)])
self.sample_params = np.array([self.epsilon, self.eta, self.Dswcntsolvent])
self.sample_params_bounds = np.array([(1.,5.),(1.,5.),(0.0,0.1)])
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.send_update_GUI)
self.timer.start(UPDATE_TIME_IN_MILLISECONDS)
def calc_species_norm_amps_dictionary(self):
species_norm_amp_dict = {}
for i, swcnt in enumerate(self.SWCNT_list):
species_norm_amp_dict[swcnt] = self.swcnts_soln[i]/swcnt.allowed_transitions[0].fs
return species_norm_amp_dict
def calc_species_norm_amps_error_dictionary(self):
species_norm_amp_error_dict = {}
for swcnt in self.SWCNT_list:
for transition in swcnt.allowed_transitions:
try:
Eii_amp_error_value = self.params[transition.transition_string() + '_amp'].stderr
except KeyError:
Eii_amp_error_value = -1.0
try:
species_norm_amp_error_dict[swcnt] = Eii_amp_error_value/transition.fs
except TypeError:
species_norm_amp_error_dict[swcnt] = -1.0
break
return species_norm_amp_error_dict
def calc_species_percentage_dictionary(self):
species_percentage_dict = {}
species_norm_dict = self.calc_species_norm_amps_dictionary()
# First get the sum of all of the amplitudes, while normalizing them using the optical oscillator strength
sum_Eiis_norm_by_fs = sum(species_norm_dict.values())
for swcnt in self.SWCNT_list:
try:
species_percentage_dict[swcnt] = 100.*species_norm_dict[swcnt] / sum_Eiis_norm_by_fs
except (ZeroDivisionError, KeyError):
species_percentage_dict[swcnt] = 0.0
return species_percentage_dict
def calc_species_percentage_error_dictionary(self):
species_percentage_error_dict = {}
species_norm_error_dict = self.calc_species_norm_amps_error_dictionary()
# First get the sum of all of the amplitudes, while normalizing them using the optical oscillator strength
sum_Eiis_norm_by_fs = sum(species_norm_error_dict.values())
for swcnt in self.SWCNT_list:
try:
species_percentage_error_dict[swcnt] = 100.*species_norm_error_dict[swcnt] / sum_Eiis_norm_by_fs
except (ZeroDivisionError, KeyError, TypeError):
species_percentage_error_dict[swcnt] = -1.0
return species_percentage_error_dict
def calc_metallic_percentage(self):
metallic_percentage = 0.0
for swcnt in self.SWCNT_list:
if(swcnt.electronic_type==0.0):
metallic_percentage += self.species_percentage_dictionary[swcnt]
return metallic_percentage
def calc_mean_diameter(self):
mean_diameter = 0.0
for swcnt in self.SWCNT_list:
mean_diameter += self.species_percentage_dictionary[swcnt]/100. * swcnt.dt
return mean_diameter
@QtCore.pyqtSlot()
def deconvolute(self):
self.state = 0
x, f, d = scipy.optimize.fmin_l_bfgs_b(func = self.residual, x0=self.bkg_soln, bounds=self.bkg_soln_bounds, approx_grad = True, factr = 10)
self.done_signal.emit()
def residual(self, bkg_params):
self.step += 1
# Initialize
residual_array = np.zeros(len(self.X))
temp_background_model = np.zeros(len(self.X))
temp_model = np.zeros(len(self.X))
temp_model_without_background = np.zeros(len(self.X))
# Calculate the background first and add SWCNT voigt profiles on later
self.bkg_soln = bkg_params
aCBConc = bkg_params[0]
N134Conc = bkg_params[1]
aCBy0 = bkg_params[2]
N134y0 = bkg_params[3]
GLamp = bkg_params[4]
GLcenter = bkg_params[5]
GLFWHM = bkg_params[6]
PPamp = bkg_params[7]
PPcenter = bkg_params[8]
PPFWHM = bkg_params[9]
aCB = aCBConc * alpha_aCB * (aCBy0 + np.exp(-b_aCB * (WAVELENGTH_TO_ENERGY_CONVERSION/self.X)))
N134 = N134Conc * alpha_N134 * (N134y0 + np.exp(-b_N134 * (WAVELENGTH_TO_ENERGY_CONVERSION/self.X)))
GL = pseudoVoigt(self.X, GLamp, GLcenter, GLFWHM, 1)
PP = pseudoVoigt(self.X, PPamp, PPcenter, PPFWHM, 1)
temp_background_model = aCB + N134 + GL + PP
# In the first state, solve the fitting for just the background parameters and nanotube amplitudes simultaneously
if(self.state==0):
# bkg_sub, if the background is fit properly, will contain just the absorption profile from van hove singularity transitions from nanotubes
bkg_sub = self.Y - temp_background_model
# Solve the system with swcnts:
self.swcnts_soln, residual = scipy.optimize.nnls(self.SWCNT_matrix, bkg_sub)
# Change the amplitudes for each SWCNT in the SWCNT object
for i, swcnt in enumerate(self.SWCNT_list):
swcnt.line = self.swcnts_soln[i] * np.array(self.SWCNT_matrix[:,i])
temp_model_without_background = np.inner(self.SWCNT_matrix, self.swcnts_soln)
temp_model = temp_model_without_background + temp_background_model
# The background model should not exceed the given spectrum at any point
# Therefore, if it does, apply a large residual to that point
for x_index in range(0, len(self.X)):
if(temp_background_model[x_index] > self.Y[x_index]):
residual_array[x_index] = -999.*(temp_model[x_index] - self.Y[x_index])
else:
residual_array[x_index] = temp_model[x_index] - self.Y[x_index]
# Update and store all of the values in the Spectra object
self.model_without_background = temp_model_without_background
self.background_model = temp_background_model
self.model = temp_model
self.species_percentage_dictionary = self.calc_species_percentage_dictionary()
self.species_percentage_error_dictionary = self.calc_species_percentage_error_dictionary()
self.metallic_percentage = self.calc_metallic_percentage()
self.mean_diameter = self.calc_mean_diameter()
return np.sum(residual_array**2)
def send_update_GUI(self):
self.update_signal.emit(self)
return
# Each tab which holds a spectra gets its own object
class Spectra_Tab(QtGui.QTabWidget):
start_comp = QtCore.pyqtSignal()
kill_thread = QtCore.pyqtSignal()
def __init__(self, parent, temp_spectra):
self.parent = parent
QtGui.QTabWidget.__init__(self, parent)
self.temp_spectra = temp_spectra
self.top_layer_grid = QtGui.QGridLayout(self)
self.canvas_frame = QtGui.QFrame(self)
self.canvas_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.results_frame = QtGui.QFrame(self)
self.results_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.top_layer_grid.addWidget(self.canvas_frame)
self.top_layer_grid.addWidget(self.results_frame)
self.canvas_grid = QtGui.QGridLayout(self.canvas_frame)
self.top_left_frame = QtGui.QFrame(self.canvas_frame)
self.top_left_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.canvas_grid.addWidget(self.top_left_frame)
self.bottom_canvas_frame = QtGui.QFrame(self.canvas_frame)
self.bottom_canvas_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.canvas_grid.addWidget(self.bottom_canvas_frame)
vertical_splitter = QtGui.QSplitter(QtCore.Qt.Vertical)
vertical_splitter.addWidget(self.top_left_frame)
vertical_splitter.addWidget(self.bottom_canvas_frame)
self.canvas_grid.addWidget(vertical_splitter)
self.results_grid = QtGui.QGridLayout(self.results_frame)
self.treeWidget = QtGui.QTreeWidget(self.results_frame)
self.treeWidget.setFocusPolicy(QtCore.Qt.WheelFocus)
self.treeWidget.setAutoFillBackground(True)
self.treeWidget.setAlternatingRowColors(True)
self.treeWidget.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.treeWidget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.treeWidget.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerItem)
self.treeWidget.setAutoExpandDelay(-1)
self.treeWidget.setHeaderLabels(["(n,m)/Property","%, [value]"])
self.other_properties = QtGui.QTreeWidgetItem(self.treeWidget, ["Properties"])
self.nm_species = QtGui.QTreeWidgetItem(self.treeWidget, ["(n,m)"])
self.semiconducting = QtGui.QTreeWidgetItem(self.other_properties, ["Semiconducting %"])
self.metallic = QtGui.QTreeWidgetItem(self.other_properties, ["Metallic %"])
self.avg_diameter = QtGui.QTreeWidgetItem(self.other_properties, ["Average Diameter"])
self.step_in_tree = QtGui.QTreeWidgetItem(self.other_properties, ["Iteration #"])
self.dict_of_nm_tree = {}
for swcnt in temp_spectra.SWCNT_list:
self.dict_of_nm_tree[swcnt] = QtGui.QTreeWidgetItem(self.nm_species, [swcnt.strNM()])
self.results_grid.addWidget(self.treeWidget)
graph_results_splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)
graph_results_splitter.addWidget(self.canvas_frame)
graph_results_splitter.addWidget(self.results_frame)
self.top_layer_grid.addWidget(graph_results_splitter)
policy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred,QtGui.QSizePolicy.Preferred)
policy.setHorizontalStretch(8)
self.canvas_frame.setSizePolicy(policy)
# Make figure for original line, background line, and total fit line
self.top_left_fig = matplotlib.figure.Figure()
self.top_left_plot = self.top_left_fig.add_subplot(111)
self.top_left_plot.set_ylabel('Absorbance [a.u.]')
self.top_left_plot.set_xlabel('Photon Energy [eV]')
self.top_left_plot.set_title('Total Absorbance Fit')
init_values = np.zeros(len(self.temp_spectra.X))
self.top_left_line, = self.top_left_plot.plot(self.temp_spectra.X, self.temp_spectra.Y, 'r-')
self.top_left_background_line, self.top_left_total_fit_line, = self.top_left_plot.plot(self.temp_spectra.X, init_values, 'k-', self.temp_spectra.X, init_values, 'b-', animated=True)
self.top_left_canvas = FigureCanvas(self.top_left_fig)
plotLayout = QtGui.QVBoxLayout()
plotLayout.addWidget(self.top_left_canvas)
self.top_left_frame.setLayout(plotLayout)
self.top_left_canvas.show()
self.top_left_canvas.draw()
self.top_left_canvas_BBox = self.top_left_plot.figure.canvas.copy_from_bbox(self.top_left_plot.bbox)
self.ax1 = self.top_left_plot.figure.axes[0]
self.ax1.set_xlim(self.temp_spectra.X.min(), self.temp_spectra.X.max())
self.ax1.set_ylim(0, self.temp_spectra.Y.max() + .05*self.temp_spectra.Y.max())
self.top_left_plot_old_size = self.top_left_plot.bbox.width, self.top_left_plot.bbox.height
# Make bottom figure
self.bottom_fig = matplotlib.figure.Figure()
self.bottom_plot = self.bottom_fig.add_subplot(111)
self.bottom_plot.set_ylabel('Absorbance [a.u.]')
self.bottom_plot.set_xlabel('Photon Energy [eV]')
self.bottom_plot.set_title('Background Subtracted Fit')
self.bottom_line_original_without_background, = self.bottom_plot.plot(self.temp_spectra.X, self.temp_spectra.Y, 'r-', linewidth=3, animated=True)
self.bottom_line, = self.bottom_plot.plot(self.temp_spectra.X, init_values, 'b-', linewidth=3, animated=True)
self.swcnt_line_dict = {}
for swcnt in temp_spectra.SWCNT_list:
self.swcnt_line_dict[swcnt], = self.bottom_plot.plot(self.temp_spectra.X, swcnt.line, animated=True)
self.bottom_canvas = FigureCanvas(self.bottom_fig)
bottomplotLayout = QtGui.QVBoxLayout()
bottomplotLayout.addWidget(self.bottom_canvas)
self.bottom_canvas_frame.setLayout(bottomplotLayout)
self.bottom_canvas.show()
self.bottom_canvas.draw()
self.bottom_canvas_BBox = self.bottom_plot.figure.canvas.copy_from_bbox(self.bottom_plot.bbox)
self.bottom_ax1 = self.bottom_plot.figure.axes[0]
self.bottom_ax1.set_xlim(self.temp_spectra.X.min(), self.temp_spectra.X.max())
self.bottom_ax1.set_ylim(0, self.temp_spectra.Y.max() + .05*self.temp_spectra.Y.max())
self.bottom_plot_old_size = self.bottom_plot.bbox.width, self.bottom_plot.bbox.height
# Make Thread associated with the tab
thread = QtCore.QThread(parent=self)
self.worker = self.temp_spectra
self.worker.moveToThread(thread)
self.worker.update_signal.connect(self.update_GUI)
self.worker.done_signal.connect(self.closeEvent)
self.start_comp.connect(self.worker.deconvolute)
self.kill_thread.connect(thread.quit)
thread.start()
@QtCore.pyqtSlot(Spectra)
def update_GUI(self, tmp_spectra):
# change the GUI to reflect changes made to Spectra
# Get the first background of the plots to blits lines to
if(tmp_spectra.step==1):
self.top_left_canvas_BBox = self.top_left_plot.figure.canvas.copy_from_bbox(self.top_left_plot.bbox)
self.bottom_canvas_BBox = self.bottom_plot.figure.canvas.copy_from_bbox(self.bottom_plot.bbox)
# If the size of the box changes, get that background instead
top_left_plot_current_size = self.top_left_plot.bbox.width, self.top_left_plot.bbox.height
bottom_plot_current_size = self.bottom_plot.bbox.width, self.bottom_plot.bbox.height
if( self.top_left_plot_old_size != top_left_plot_current_size or self.bottom_plot_old_size != bottom_plot_current_size):
self.top_left_plot_old_size = top_left_plot_current_size
self.top_left_plot.clear()
self.top_left_canvas.draw()
self.top_left_canvas_BBox = self.top_left_plot.figure.canvas.copy_from_bbox(self.top_left_plot.bbox)
self.top_left_plot.set_ylabel('Absorbance [a.u.]')
self.top_left_plot.set_xlabel('Photon Energy [eV]')
self.top_left_plot.set_title('Total Absorbance Fit')
self.bottom_plot_old_size = bottom_plot_current_size
self.bottom_plot.clear()
self.bottom_canvas.draw()
self.bottom_canvas_BBox = self.bottom_plot.figure.canvas.copy_from_bbox(self.bottom_plot.bbox)
self.bottom_plot.set_ylabel('Absorbance [a.u.]')
self.bottom_plot.set_xlabel('Photon Energy [eV]')
self.bottom_plot.set_title('Background Subtracted Fit')
# Write to the Top Left Plot with original data, background data, and total fit
self.top_left_background_line.set_ydata(tmp_spectra.background_model)
self.top_left_total_fit_line.set_ydata(tmp_spectra.model)
self.top_left_plot.figure.canvas.restore_region(self.top_left_canvas_BBox)
if( tmp_spectra.background_model.max() > tmp_spectra.Y.max()):
self.ax1.set_ylim(0, 1.05*tmp_spectra.background_model.max())
elif(tmp_spectra.model.max() > tmp_spectra.Y.max()):
self.ax1.set_ylim(0, 1.05*tmp_spectra.model.max())
else:
self.ax1.set_ylim(0, 1.05*tmp_spectra.Y.max())
self.top_left_plot.draw_artist(self.top_left_line)
self.top_left_plot.draw_artist(self.top_left_background_line)
self.top_left_plot.draw_artist(self.top_left_total_fit_line)
self.top_left_plot.figure.canvas.blit(self.top_left_plot.bbox)
# Write to the Bottom Plot with each nanotube peak
self.bottom_line_original_without_background.set_ydata(tmp_spectra.Y-tmp_spectra.background_model)
self.bottom_line.set_ydata(tmp_spectra.model_without_background)
try:
for swcnt in self.dict_of_nm_tree:
self.swcnt_line_dict[swcnt].set_ydata(swcnt.line)
self.swcnt_line_dict[swcnt].set_linewidth(1)
current_swcnt = None
for swcnt in self.dict_of_nm_tree:
if(self.dict_of_nm_tree[swcnt] == self.treeWidget.currentItem()):
current_swcnt = swcnt
break
self.swcnt_line_dict[current_swcnt].set_linewidth(4)
except KeyError:
pass
self.bottom_plot.figure.canvas.restore_region(self.bottom_canvas_BBox)
if( np.amax(tmp_spectra.Y-tmp_spectra.background_model) > np.amax(tmp_spectra.model_without_background) ):
self.bottom_ax1.set_ylim(0, 1.05*np.amax(tmp_spectra.Y-tmp_spectra.background_model))
if( np.amax(tmp_spectra.model_without_background) < 0.05):
self.bottom_ax1.set_ylim(0, 0.05)
else:
self.bottom_ax1.set_ylim(0, 1.05*np.amax(tmp_spectra.model_without_background))
self.bottom_plot.draw_artist(self.bottom_line_original_without_background)
self.bottom_plot.draw_artist(self.bottom_line)
for swcnt in tmp_spectra.SWCNT_list:
self.bottom_plot.draw_artist(self.swcnt_line_dict[swcnt])
self.bottom_plot.figure.canvas.blit(self.bottom_plot.bbox)
# Show percentage of species on the side bar
try:
percent_dict = tmp_spectra.species_percentage_dictionary
percent_error_dict = tmp_spectra.species_percentage_error_dictionary
for swcnt in tmp_spectra.SWCNT_list:
self.dict_of_nm_tree[swcnt].setText(1, str(round(percent_dict[swcnt], 0)).rstrip('0') + ' % +-' + str(round(percent_error_dict[swcnt], 1)))
self.semiconducting.setText(1, str(round(100.-tmp_spectra.metallic_percentage, 0)).rstrip('0') + ' %')
self.metallic.setText(1, str(round(tmp_spectra.metallic_percentage, 0)).rstrip('0') + ' %')
self.avg_diameter.setText(1, str(round(tmp_spectra.mean_diameter,2)) + ' nm')
self.step_in_tree.setText(1, str(tmp_spectra.step))
except KeyError:
pass
def output_results(self):
print "Making Excel Workbook..."
date_time = datetime.datetime.now().strftime("%Y-%m-%d(%H-%M-%S)")
name = str(self.temp_spectra.spectra_name)
book = xlsxwriter.Workbook(name + ' -- ' + date_time +'_OA_Results.xlsx')
OA_sheet_name = "Optical Absorption Data"
Results_sheet_name = "Results"
Other_params_name = "Other Parameters"
OA_sheet = book.add_worksheet(OA_sheet_name)
Results_sheet = book.add_worksheet(Results_sheet_name)
Other_params_sheet = book.add_worksheet(Other_params_name)
# Write x, y data for main and all species
OA_sheet.write('A1', "Energy (eV)")
OA_sheet.write_column('A2', self.temp_spectra.X)
OA_sheet.write('B1', name )
OA_sheet.write_column('B2', self.temp_spectra.Y)
OA_sheet.write('C1', "Model")
OA_sheet.write_column('C2', self.temp_spectra.model)
OA_sheet.write('D1', "Background")
OA_sheet.write_column('D2', self.temp_spectra.background_model)
for i, swcnt in enumerate(self.temp_spectra.SWCNT_list):
OA_sheet.write(0, 3+i, swcnt.strNM())
OA_sheet.write_column(1, 3+i, swcnt.line)
Results_sheet.write('A1', "(n,m)")
Results_sheet.write('B1', "%")
quant_dict = self.temp_spectra.species_percentage_dictionary.iteritems()
for i, (swcnt, amount) in enumerate(sorted(quant_dict)):
Results_sheet.write(i+1, 0, swcnt.strNM())
Results_sheet.write(i+1, 1, round(amount,1))
Results_sheet.write('D1', "Semiconducting %")
Results_sheet.write('D2', "Metallic %")
Results_sheet.write('E1', round(100-self.temp_spectra.calc_metallic_percentage(),1))
Results_sheet.write('E2', round(self.temp_spectra.calc_metallic_percentage(),1))
Other_params_sheet.write("A1", "(n,m)")
Other_params_sheet.write_column("A2", [swcnt.strNM() for swcnt in self.temp_spectra.SWCNT_list])
Other_params_sheet.write("B1", "SWCNT Solution Vector")
Other_params_sheet.write_column("B2", self.temp_spectra.swcnts_soln)
book.close()
print "Excel Workbook Made."
def start_computation(self):
self.start_comp.emit()
return
def closeEvent(self):
print 'done with processing'
self.kill_thread.emit()
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent = None):
self.spectra_list = []
self.tab_list = []
QtGui.QMainWindow.__init__(self)
self.setWindowTitle("Spectro")
screen_height = app.desktop().screenGeometry().height()
screen_width = app.desktop().screenGeometry().width()
self.resize(int(screen_width*APP_SCREEN_RATIO), int(screen_height*APP_SCREEN_RATIO))
self.setTabShape(QtGui.QTabWidget.Rounded)
self.centralwidget = QtGui.QWidget(self)
self.top_level_layout = QtGui.QGridLayout(self.centralwidget)
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
self.top_level_layout.addWidget(self.tabWidget, 1, 0, 25, 25)
open_spectra_button = QtGui.QPushButton("Open Spectra")
self.top_level_layout.addWidget(open_spectra_button, 0, 0)
QtCore.QObject.connect(open_spectra_button, QtCore.SIGNAL("clicked()"), self.open_spectra)
process_spectra_button = QtGui.QPushButton("Process Spectra")
self.top_level_layout.addWidget(process_spectra_button, 0, 1)
QtCore.QObject.connect(process_spectra_button, QtCore.SIGNAL("clicked()"), self.process_spectra)
save_results_button = QtGui.QPushButton("Save Results")
self.top_level_layout.addWidget(save_results_button, 0, 2)
QtCore.QObject.connect(save_results_button, QtCore.SIGNAL("clicked()"), self.output_results)
self.setCentralWidget(self.centralwidget)
self.centralwidget.setLayout(self.top_level_layout)
def open_spectra(self):
fileNameList = QtGui.QFileDialog.getOpenFileNames(caption="Select Files for Processing")
for file_name in fileNameList:
# file_name is form ~ "C:/Users/you/someData.asc", so split it after the last "/" and before the "." in ".asc" or ".xls"
spectra_name = file_name.split('\\')[-1].split('.')[0]
xy = retrieve_XY(file_name)
X = WAVELENGTH_TO_ENERGY_CONVERSION/xy[:,0]
Y = xy[:,1]
self.spectra_list.append(Spectra(spectra_name, X, Y))
self.tab_list.append(Spectra_Tab(self.tabWidget, self.spectra_list[-1]))
self.tabWidget.addTab(self.tab_list[-1], spectra_name)
return
def process_spectra(self):
for tab in self.tab_list:
tab.start_computation()
return
def output_results(self):
for tab in self.tab_list:
tab.output_results()
return
if __name__ == "__main__":
app = QtGui.QApplication([])
win = MainWindow()
win.show()
sys.exit(app.exec_())
| gpl-3.0 |
IshankGulati/scikit-learn | examples/neighbors/plot_lof.py | 30 | 1939 | """
=================================================
Anomaly detection with Local Outlier Factor (LOF)
=================================================
This example presents the Local Outlier Factor (LOF) estimator. The LOF
algorithm is an unsupervised outlier detection method which computes the local
density deviation of a given data point with respect to its neighbors.
It considers as outlier samples that have a substantially lower density than
their neighbors.
The number of neighbors considered, (parameter n_neighbors) is typically
chosen 1) greater than the minimum number of objects a cluster has to contain,
so that other objects can be local outliers relative to this cluster, and 2)
smaller than the maximum number of close by objects that can potentially be
local outliers.
In practice, such informations are generally not available, and taking
n_neighbors=20 appears to work well in general.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
np.random.seed(42)
# Generate train data
X = 0.3 * np.random.randn(100, 2)
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
X = np.r_[X + 2, X - 2, X_outliers]
# fit the model
clf = LocalOutlierFactor(n_neighbors=20)
y_pred = clf.fit_predict(X)
y_pred_outliers = y_pred[200:]
# plot the level sets of the decision function
xx, yy = np.meshgrid(np.linspace(-5, 5, 50), np.linspace(-5, 5, 50))
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Local Outlier Factor (LOF)")
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues_r)
a = plt.scatter(X[:200, 0], X[:200, 1], c='white')
b = plt.scatter(X[200:, 0], X[200:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a, b],
["normal observations",
"abnormal observations"],
loc="upper left")
plt.show()
| bsd-3-clause |
jm-begon/scikit-learn | sklearn/metrics/tests/test_ranking.py | 75 | 40883 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
pygeo/pycmbs | docs/source/figures/fig_portraet.py | 1 | 1116 | # -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
from pycmbs.plots import GlecklerPlot
import matplotlib.pyplot as plt
# this is just an artificial example illustrating principle usage of the
# Portraet diagram
fig = plt.figure(figsize=(8,6))
G = GlecklerPlot(fig=fig)
# register models
G.add_model('MPI-ESM-MR')
G.add_model('MPI-ESM-LR')
G.add_model('MPI-ESM-P')
# then register variables
G.add_variable('Ta')
G.add_variable('P')
# after that you can add values to be plotted
# pos=1 top triangle, pos2= lower triangle
# different positions are for different observations
G.add_data('Ta','MPI-ESM-MR',0.5,pos=1)
G.add_data('Ta','MPI-ESM-MR',-0.2,pos=2)
G.add_data('Ta','MPI-ESM-LR',0.3,pos=1)
G.add_data('Ta','MPI-ESM-LR',0.2,pos=2)
G.add_data('Ta','MPI-ESM-P',0.05,pos=1)
G.add_data('Ta','MPI-ESM-P',-0.1,pos=2)
G.add_data('P','MPI-ESM-P',0.5,pos=1)
G.add_data('P','MPI-ESM-P',-0.2,pos=2)
G.add_data('P','MPI-ESM-LR',0.3,pos=1)
G.add_data('P','MPI-ESM-MR',-0.2,pos=2)
G.plot() #do plot
plt.show()
| mit |
taknevski/tensorflow-xsmm | tensorflow/contrib/learn/python/learn/tests/dataframe/in_memory_source_test.py | 62 | 3960 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NumpySource and PandasSource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class NumpySourceTestCase(test.TestCase):
def testNumpySource(self):
batch_size = 3
iterations = 1000
array = np.arange(32).reshape([16, 2])
numpy_source = in_memory_source.NumpySource(array, batch_size=batch_size)
index_column = numpy_source().index
value_column = numpy_source().value
cache = {}
with ops.Graph().as_default():
value_tensor = value_column.build(cache)
index_tensor = index_column.build(cache)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
expected_index = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_value = get_rows(array, expected_index)
actual_index, actual_value = sess.run([index_tensor, value_tensor])
np.testing.assert_array_equal(expected_index, actual_index)
np.testing.assert_array_equal(expected_value, actual_value)
coord.request_stop()
coord.join(threads)
class PandasSourceTestCase(test.TestCase):
def testPandasFeeding(self):
if not HAS_PANDAS:
return
batch_size = 3
iterations = 1000
index = np.arange(100, 132)
a = np.arange(32)
b = np.arange(32, 64)
dataframe = pd.DataFrame({"a": a, "b": b}, index=index)
pandas_source = in_memory_source.PandasSource(
dataframe, batch_size=batch_size)
pandas_columns = pandas_source()
cache = {}
with ops.Graph().as_default():
pandas_tensors = [col.build(cache) for col in pandas_columns]
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
indices = [
j % dataframe.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = dataframe.index[indices]
expected_rows = dataframe.iloc[indices]
actual_value = sess.run(pandas_tensors)
np.testing.assert_array_equal(expected_df_indices, actual_value[0])
for col_num, col in enumerate(dataframe.columns):
np.testing.assert_array_equal(expected_rows[col].values,
actual_value[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mirnylab/cooler | cooler/create/_create.py | 1 | 40902 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
from datetime import datetime
from six.moves import map
from pandas.api.types import is_categorical, is_integer
import os.path as op
import pandas as pd
import numpy as np
import posixpath
import tempfile
import warnings
import h5py
import simplejson as json
import six
from .._version import __version__, __format_version__, __format_version_scool__
from .._logging import get_logger
from ..core import put, get
from ..util import (
parse_cooler_uri,
get_chromsizes,
get_binsize,
infer_meta,
get_meta,
rlencode,
)
from ._ingest import validate_pixels
from . import (
MAGIC,
MAGIC_SCOOL,
URL,
CHROM_DTYPE,
CHROMID_DTYPE,
CHROMSIZE_DTYPE,
COORD_DTYPE,
BIN_DTYPE,
COUNT_DTYPE,
CHROMOFFSET_DTYPE,
BIN1OFFSET_DTYPE,
PIXEL_FIELDS,
PIXEL_DTYPES,
)
logger = get_logger("cooler.create")
def write_chroms(grp, chroms, h5opts):
"""
Write the chromosome table.
Parameters
----------
grp : h5py.Group
Group handle of an open HDF5 file with write permissions.
chroms : DataFrame
Chromosome table containing at least 'chrom' and 'length' columns
h5opts : dict
HDF5 dataset filter options.
"""
n_chroms = len(chroms)
names = np.array(chroms["name"], dtype=CHROM_DTYPE) # auto-adjusts char length
grp.create_dataset(
"name", shape=(n_chroms,), dtype=names.dtype, data=names, **h5opts
)
grp.create_dataset(
"length",
shape=(n_chroms,),
dtype=CHROMSIZE_DTYPE,
data=chroms["length"],
**h5opts
)
# Extra columns
columns = list(chroms.keys())
for col in ["name", "length"]:
columns.remove(col)
if columns:
put(grp, chroms[columns])
def write_bins(grp, bins, chromnames, h5opts, chrom_as_enum=True):
"""
Write the genomic bin table.
Parameters
----------
grp : h5py.Group
Group handle of an open HDF5 file with write permissions.
bins : pandas.DataFrame
BED-like data frame with at least three columns: ``chrom``, ``start``,
``end``, sorted by ``chrom`` then ``start``, and forming a complete
genome segmentation. The ``chrom`` column must be sorted according to
the ordering in ``chroms``.
chromnames : sequence of str
Contig names.
h5opts : dict
HDF5 dataset filter options.
"""
n_chroms = len(chromnames)
n_bins = len(bins)
idmap = dict(zip(chromnames, range(n_chroms)))
# Convert chrom names to enum
chrom_ids = [idmap[chrom] for chrom in bins["chrom"]]
if chrom_as_enum:
chrom_dtype = h5py.special_dtype(enum=(CHROMID_DTYPE, idmap))
else:
chrom_dtype = CHROMID_DTYPE
# Store bins
try:
chrom_dset = grp.create_dataset(
"chrom", shape=(n_bins,), dtype=chrom_dtype, data=chrom_ids, **h5opts
)
except ValueError:
# If too many scaffolds for HDF5 enum header,
# try storing chrom IDs as raw int instead
if chrom_as_enum:
chrom_as_enum = False
chrom_dtype = CHROMID_DTYPE
chrom_dset = grp.create_dataset(
"chrom", shape=(n_bins,), dtype=chrom_dtype, data=chrom_ids, **h5opts
)
else:
raise
if not chrom_as_enum:
chrom_dset.attrs["enum_path"] = u"/chroms/name"
grp.create_dataset(
"start", shape=(n_bins,), dtype=COORD_DTYPE, data=bins["start"], **h5opts
)
grp.create_dataset(
"end", shape=(n_bins,), dtype=COORD_DTYPE, data=bins["end"], **h5opts
)
# Extra columns
columns = list(bins.keys())
for col in ["chrom", "start", "end"]:
columns.remove(col)
if columns:
put(grp, bins[columns])
def prepare_pixels(grp, n_bins, max_size, columns, dtypes, h5opts):
columns = list(columns)
init_size = min(5 * n_bins, max_size)
grp.create_dataset(
"bin1_id",
dtype=dtypes.get("bin1_id", BIN_DTYPE),
shape=(init_size,),
maxshape=(max_size,),
**h5opts
)
grp.create_dataset(
"bin2_id",
dtype=dtypes.get("bin2_id", BIN_DTYPE),
shape=(init_size,),
maxshape=(max_size,),
**h5opts
)
if "count" in columns:
grp.create_dataset(
"count",
dtype=dtypes.get("count", COUNT_DTYPE),
shape=(init_size,),
maxshape=(max_size,),
**h5opts
)
for col in ["bin1_id", "bin2_id", "count"]:
try:
columns.remove(col)
except ValueError:
pass
if columns:
for col in columns:
grp.create_dataset(
col,
dtype=dtypes.get(col, float),
shape=(init_size,),
maxshape=(max_size,),
**h5opts
)
def write_pixels(filepath, grouppath, columns, iterable, h5opts, lock):
"""
Write the non-zero pixel table.
Parameters
----------
filepath : str
Path to HDF5 output file.
grouppath : str
Qualified path to destination HDF5 group.
columns : sequence
Sequence of column names
iterable : an iterable object
An object that processes and yields binned contacts from some input
source as a stream of chunks. The chunks must be either pandas
DataFrames or mappings of column names to arrays.
h5opts : dict
HDF5 filter options.
lock : multiprocessing.Lock, optional
Optional lock to synchronize concurrent HDF5 file access.
"""
nnz = 0
total = 0
for i, chunk in enumerate(iterable):
if isinstance(chunk, pd.DataFrame):
chunk = {k: v.values for k, v in six.iteritems(chunk)}
try:
if lock is not None:
lock.acquire()
logger.debug("writing chunk {}".format(i))
with h5py.File(filepath, "r+") as fw:
grp = fw[grouppath]
dsets = [grp[col] for col in columns]
n = len(chunk[columns[0]])
for col, dset in zip(columns, dsets):
dset.resize((nnz + n,))
dset[nnz : nnz + n] = chunk[col]
nnz += n
if "count" in chunk:
total += chunk["count"].sum()
fw.flush()
finally:
if lock is not None:
lock.release()
return nnz, total
def index_pixels(grp, n_bins, nnz):
bin1 = grp["bin1_id"]
bin1_offset = np.zeros(n_bins + 1, dtype=BIN1OFFSET_DTYPE)
curr_val = 0
for start, length, value in zip(*rlencode(bin1, 1000000)):
bin1_offset[curr_val : value + 1] = start
curr_val = value + 1
bin1_offset[curr_val:] = nnz
return bin1_offset
def index_bins(grp, n_chroms, n_bins):
chrom_ids = grp["chrom"]
chrom_offset = np.zeros(n_chroms + 1, dtype=CHROMOFFSET_DTYPE)
curr_val = 0
for start, length, value in zip(*rlencode(chrom_ids)):
chrom_offset[curr_val : value + 1] = start
curr_val = value + 1
chrom_offset[curr_val:] = n_bins
return chrom_offset
def write_indexes(grp, chrom_offset, bin1_offset, h5opts):
"""
Write the indexes.
Parameters
----------
grp : h5py.Group
Group handle of an open HDF5 file with write permissions.
chrom_offset : sequence
Lookup table: chromosome ID -> first row in bin table (bin ID)
corresponding to that chromosome.
bin1_offset : sequence
Lookup table: genomic bin ID -> first row in pixel table (pixel ID)
having that bin on the first axis.
"""
grp.create_dataset(
"chrom_offset",
shape=(len(chrom_offset),),
dtype=CHROMOFFSET_DTYPE,
data=chrom_offset,
**h5opts
)
grp.create_dataset(
"bin1_offset",
shape=(len(bin1_offset),),
dtype=BIN1OFFSET_DTYPE,
data=bin1_offset,
**h5opts
)
def write_info(grp, info, scool=False):
"""
Write the file description and metadata attributes.
Parameters
----------
grp : h5py.Group
Group handle of an open HDF5 file with write permissions.
info : dict
Dictionary, unnested with the possible exception of the ``metadata``
key. ``metadata``, if present, must be JSON-serializable.
Required keys
-------------
nbins : int
number of genomic bins
nnz : int
number of non-zero pixels
"""
assert "nbins" in info
if not scool:
assert "nnz" in info
info.setdefault("genome-assembly", "unknown")
info["metadata"] = json.dumps(info.get("metadata", {}))
info["creation-date"] = datetime.now().isoformat()
info["generated-by"] = six.text_type("cooler-" + __version__)
if scool:
info["format"] = MAGIC_SCOOL
info["format-version"] = six.text_type(__format_version_scool__)
else:
info["format"] = MAGIC
info["format-version"] = six.text_type(__format_version__)
info["format-url"] = URL
grp.attrs.update(info)
def _rename_chroms(grp, rename_dict, h5opts):
chroms = get(grp["chroms"]).set_index("name")
n_chroms = len(chroms)
new_names = np.array(
chroms.rename(rename_dict).index.values, dtype=CHROM_DTYPE
) # auto-adjusts char length
del grp["chroms/name"]
grp["chroms"].create_dataset(
"name", shape=(n_chroms,), dtype=new_names.dtype, data=new_names, **h5opts
)
bins = get(grp["bins"])
n_bins = len(bins)
idmap = dict(zip(new_names, range(n_chroms)))
if is_categorical(bins["chrom"]) or is_integer(bins["chrom"]):
chrom_ids = bins["chrom"].cat.codes
chrom_dtype = h5py.special_dtype(enum=(CHROMID_DTYPE, idmap))
del grp["bins/chrom"]
try:
grp["bins"].create_dataset(
"chrom", shape=(n_bins,), dtype=chrom_dtype, data=chrom_ids, **h5opts
)
except ValueError:
# If HDF5 enum header would be too large,
# try storing chrom IDs as raw int instead
chrom_dtype = CHROMID_DTYPE
grp["bins"].create_dataset(
"chrom", shape=(n_bins,), dtype=chrom_dtype, data=chrom_ids, **h5opts
)
def rename_chroms(clr, rename_dict, h5opts=None):
"""
Substitute existing chromosome/contig names for new ones. They will be
written to the file and the Cooler object will be refreshed.
Parameters
----------
clr : Cooler
Cooler object that can be opened with write permissions.
rename_dict : dict
Dictionary of old -> new chromosome names. Any names omitted from
the dictionary will be kept as is.
h5opts : dict, optional
HDF5 filter options.
"""
h5opts = _set_h5opts(h5opts)
with clr.open("r+") as f:
_rename_chroms(f, rename_dict, h5opts)
clr._refresh()
def _get_dtypes_arg(dtypes, kwargs):
if "dtype" in kwargs:
if dtypes is None:
dtypes = kwargs.pop("dtype")
warnings.warn("Use dtypes= instead of dtype=", FutureWarning)
else:
raise ValueError(
'Received both "dtypes" and "dtype" arguments. '
'Please use "dtypes" to provide a column name -> dtype mapping. '
'"dtype" remains as an alias but is deprecated.'
)
return dtypes
def _set_h5opts(h5opts):
result = {}
if h5opts is not None:
result.update(h5opts)
available_opts = {
"chunks",
"maxshape",
"compression",
"compression_opts",
"scaleoffset",
"shuffle",
"fletcher32",
"fillvalue",
"track_times",
}
for key in result.keys():
if key not in available_opts:
raise ValueError("Unknown storage option '{}'.".format(key))
result.setdefault("compression", "gzip")
if result["compression"] == "gzip" and "compression_opts" not in result:
result["compression_opts"] = 6
result.setdefault("shuffle", True)
return result
def create(
cool_uri,
bins,
pixels,
columns=None,
dtypes=None,
metadata=None,
assembly=None,
symmetric_upper=True,
mode=None,
h5opts=None,
boundscheck=True,
triucheck=True,
dupcheck=True,
ensure_sorted=False,
lock=None,
append=False,
append_scool=False,
scool_root_uri=None,
**kwargs
):
"""
Create a new Cooler.
Deprecated parameters
---------------------
chromsizes : Series
Chromsizes are now inferred from ``bins``.
append : bool, optional
Append new Cooler to the file if it exists. If False, an existing file
with the same name will be truncated. Default is False.
Use the ``mode`` argument instead.
dtype : dict, optional
Dictionary mapping column names in the pixel table to dtypes.
Use the ``dtypes`` argument instead.
"""
file_path, group_path = parse_cooler_uri(cool_uri)
if mode is None:
mode = "a" if append else "w"
h5opts = _set_h5opts(h5opts)
if not isinstance(bins, pd.DataFrame):
raise ValueError(
"Second positional argument must be a pandas DataFrame. "
"Note that the `chromsizes` argument is now deprecated: "
"see documentation for `create`."
)
if append_scool == True and scool_root_uri is None:
raise ValueError(
"If the parameter `append_scool` is set, the parameter `scool_root_uri` must be defined."
)
dtypes = _get_dtypes_arg(dtypes, kwargs)
for col in ["chrom", "start", "end"]:
if col not in bins.columns:
raise ValueError("Missing column from bin table: '{}'.".format(col))
# Populate expected pixel column names. Include user-provided value
# columns.
if columns is None:
columns = ["bin1_id", "bin2_id", "count"]
else:
columns = list(columns)
for col in ["bin1_id", "bin2_id"]: # don't include count!
if col not in columns:
columns.insert(0, col)
# Populate dtypes for expected pixel columns, and apply user overrides.
if dtypes is None:
dtypes = dict(PIXEL_DTYPES)
else:
dtypes_ = dict(dtypes)
dtypes = dict(PIXEL_DTYPES)
dtypes.update(dtypes_)
# Get empty "meta" header frame (assigns the undeclared dtypes).
# Any columns from the input not in meta will be ignored.
meta = get_meta(columns, dtypes, default_dtype=float)
# Determine the appropriate iterable
try:
from dask.dataframe import DataFrame as dask_df
except (ImportError, AttributeError): # pragma: no cover
dask_df = ()
if isinstance(pixels, dask_df):
iterable = map(lambda x: x.compute(), pixels.to_delayed())
input_columns = infer_meta(pixels).columns
elif isinstance(pixels, pd.DataFrame):
iterable = (pixels,)
input_columns = infer_meta(pixels).columns
elif isinstance(pixels, dict):
iterable = (pixels,)
input_columns = infer_meta([(k, v.dtype) for (k, v) in pixels.items()]).columns
else:
iterable = pixels
input_columns = None
# If possible, ensure all expected columns are available
if input_columns is not None:
for col in columns:
if col not in input_columns:
col_type = "Standard" if col in PIXEL_FIELDS else "User"
raise ValueError(
"{} column not found in input: '{}'".format(col_type, col)
)
# Prepare chroms and bins
bins = bins.copy()
bins["chrom"] = bins["chrom"].astype(object)
chromsizes = get_chromsizes(bins)
try:
chromsizes = six.iteritems(chromsizes)
except AttributeError:
pass
chromnames, lengths = zip(*chromsizes)
chroms = pd.DataFrame(
{"name": chromnames, "length": lengths}, columns=["name", "length"]
)
binsize = get_binsize(bins)
n_chroms = len(chroms)
n_bins = len(bins)
if not symmetric_upper and triucheck:
warnings.warn(
"Creating a non-symmetric matrix, but `triucheck` was set to "
"True. Changing to False."
)
triucheck = False
# Chain input validation to the end of the pipeline
if boundscheck or triucheck or dupcheck or ensure_sorted:
validator = validate_pixels(
n_bins, boundscheck, triucheck, dupcheck, ensure_sorted
)
iterable = map(validator, iterable)
# Create root group
with h5py.File(file_path, mode) as f:
logger.info('Creating cooler at "{}::{}"'.format(file_path, group_path))
if group_path == "/":
for name in ["chroms", "bins", "pixels", "indexes"]:
if name in f:
del f[name]
else:
try:
f.create_group(group_path)
except ValueError:
del f[group_path]
f.create_group(group_path)
# Write chroms, bins and pixels
if append_scool:
src_path, src_group = parse_cooler_uri(scool_root_uri)
dst_path, dst_group = parse_cooler_uri(cool_uri)
with h5py.File(src_path, "r+") as src, h5py.File(dst_path, "r+") as dst:
dst[dst_group]["chroms"] = src["chroms"]
# hard link to root bins table, but only the three main datasets
dst[dst_group]["bins/chrom"] = src["bins/chrom"]
dst[dst_group]["bins/start"]= src["bins/start"]
dst[dst_group]["bins/end"]= src["bins/end"]
# create per cell the additional columns e.g. 'weight'
# these columns are individual for each cell
columns = list(bins.keys())
for col in ["chrom", "start", "end"]:
columns.remove(col)
if columns:
put(dst[dst_group]['bins'], bins[columns])
with h5py.File(file_path, "r+") as f:
h5 = f[group_path]
grp = h5.create_group("pixels")
if symmetric_upper:
max_size = n_bins * (n_bins - 1) // 2 + n_bins
else:
max_size = n_bins * n_bins
prepare_pixels(grp, n_bins, max_size, meta.columns, dict(meta.dtypes), h5opts)
else:
with h5py.File(file_path, "r+") as f:
h5 = f[group_path]
logger.info("Writing chroms")
grp = h5.create_group("chroms")
write_chroms(grp, chroms, h5opts)
logger.info("Writing bins")
grp = h5.create_group("bins")
write_bins(grp, bins, chroms["name"], h5opts)
grp = h5.create_group("pixels")
if symmetric_upper:
max_size = n_bins * (n_bins - 1) // 2 + n_bins
else:
max_size = n_bins * n_bins
prepare_pixels(grp, n_bins, max_size, meta.columns, dict(meta.dtypes), h5opts)
# Multiprocess HDF5 reading is supported only if the same HDF5 file is not
# open in write mode anywhere. To read and write to the same file, pass a
# lock shared with the HDF5 reading processes. `write_pixels` will acquire
# it and open the file for writing for the duration of each write step
# only. After it closes the file and releases the lock, the reading
# processes will have to re-acquire the lock and re-open the file to obtain
# the updated file state for reading.
logger.info("Writing pixels")
target = posixpath.join(group_path, "pixels")
nnz, ncontacts = write_pixels(
file_path, target, meta.columns, iterable, h5opts, lock
)
# Write indexes
with h5py.File(file_path, "r+") as f:
h5 = f[group_path]
logger.info("Writing indexes")
grp = h5.create_group("indexes")
chrom_offset = index_bins(h5["bins"], n_chroms, n_bins)
bin1_offset = index_pixels(h5["pixels"], n_bins, nnz)
write_indexes(grp, chrom_offset, bin1_offset, h5opts)
logger.info("Writing info")
info = {}
info["bin-type"] = u"fixed" if binsize is not None else u"variable"
info["bin-size"] = binsize if binsize is not None else u"null"
info["storage-mode"] = u"symmetric-upper" if symmetric_upper else u"square"
info["nchroms"] = n_chroms
info["nbins"] = n_bins
info["sum"] = ncontacts
info["nnz"] = nnz
if assembly is not None:
info["genome-assembly"] = assembly
if metadata is not None:
info["metadata"] = metadata
write_info(h5, info)
def create_from_unordered(
cool_uri,
bins,
chunks,
columns=None,
dtypes=None,
mode=None,
mergebuf=int(20e6),
delete_temp=True,
temp_dir=None,
max_merge=200,
**kwargs
):
"""
Create a Cooler in two passes via an external sort mechanism. In the first
pass, a sequence of data chunks are processed and sorted in memory and saved
to temporary Coolers. In the second pass, the temporary Coolers are merged
into the output. This way the individual chunks do not need to be provided
in any particular order.
"""
from ..api import Cooler
from ..reduce import CoolerMerger
# chromsizes = get_chromsizes(bins)
bins = bins.copy()
bins["chrom"] = bins["chrom"].astype(object)
if columns is not None:
columns = [col for col in columns if col not in {"bin1_id", "bin2_id"}]
if temp_dir is None:
temp_dir = op.dirname(parse_cooler_uri(cool_uri)[0])
elif temp_dir == "-":
temp_dir = None # makes tempfile module use the system dir
dtypes = _get_dtypes_arg(dtypes, kwargs)
temp_files = []
# Sort pass
tf = tempfile.NamedTemporaryFile(
suffix=".multi.cool", delete=delete_temp, dir=temp_dir
)
temp_files.append(tf)
uris = []
for i, chunk in enumerate(chunks):
uri = tf.name + "::" + str(i)
uris.append(uri)
logger.info("Writing chunk {}: {}".format(i, uri))
create(uri, bins, chunk, columns=columns, dtypes=dtypes, mode="a", **kwargs)
# Merge passes
n = len(uris)
if n > max_merge > 0:
# Recursive merge: do the first of two merge passes.
# Divide into ~sqrt(n) merges
edges = np.linspace(0, n, int(np.sqrt(n)), dtype=int)
tf2 = tempfile.NamedTemporaryFile(
suffix=".multi.cool", delete=delete_temp, dir=temp_dir
)
temp_files.append(tf2)
uris2 = []
for lo, hi in zip(edges[:-1], edges[1:]):
chunk_subset = CoolerMerger(
[Cooler(uri) for uri in uris[lo:hi]], mergebuf, columns=columns
)
uri = tf2.name + "::" + "{}-{}".format(lo, hi)
uris2.append(uri)
logger.info("Merging chunks {}-{}: {}".format(lo, hi, uri))
create(
uri,
bins,
chunk_subset,
columns=columns,
dtypes=dtypes,
mode="a",
**kwargs
)
final_uris = uris2
else:
# Do a single merge pass
final_uris = uris
# Do the final merge pass
chunks = CoolerMerger(
[Cooler(uri) for uri in final_uris], mergebuf, columns=columns
)
logger.info("Merging into {}".format(cool_uri))
create(cool_uri, bins, chunks, columns=columns, dtypes=dtypes, mode=mode, **kwargs)
del temp_files
def append(cool_uri, table, data, chunked=False, force=False, h5opts=None, lock=None): # pragma: no cover
"""
Append one or more data columns to an existing table.
Parameters
----------
cool_uri : str
Path to Cooler file or URI to Cooler group.
table : str
Name of table (HDF5 group).
data : dict-like
DataFrame, Series or mapping of column names to data. If the input is a
dask DataFrame or Series, the data is written in chunks.
chunked : bool, optional
If True, the values of the data dict are treated as separate chunk
iterators of column data.
force : bool, optional
If True, replace existing columns with the same name as the input.
h5opts : dict, optional
HDF5 dataset filter options to use (compression, shuffling,
checksumming, etc.). Default is to use autochunking and GZIP
compression, level 6.
lock : multiprocessing.Lock, optional
Optional lock to synchronize concurrent HDF5 file access.
"""
h5opts = _set_h5opts(h5opts)
file_path, group_path = parse_cooler_uri(cool_uri)
try:
from dask.dataframe import DataFrame as dask_df, Series as dask_series
except (ImportError, AttributeError):
dask_df = ()
dask_series = ()
if isinstance(data, dask_series):
data = data.to_frame()
try:
names = data.keys()
except AttributeError:
names = data.columns
with h5py.File(file_path, "r+") as f:
h5 = f[group_path]
for name in names:
if name in h5[table]:
if not force:
raise ValueError(
"'{}' column already exists. ".format(name)
+ "Use --force option to overwrite."
)
else:
del h5[table][name]
if isinstance(data, dask_df):
# iterate over dataframe chunks
for chunk in data.to_delayed():
i = 0
for chunk in data.to_delayed():
chunk = chunk.compute()
try:
if lock is not None:
lock.acquire()
put(h5[table], chunk, lo=i, h5opts=h5opts)
finally:
if lock is not None:
lock.release()
i += len(chunk)
elif chunked:
# iterate over chunks from each column
for key in data.keys():
i = 0
for chunk in data[key]:
try:
if lock is not None:
lock.acquire()
put(h5[table], {key: chunk}, lo=i, h5opts=h5opts)
finally:
if lock is not None:
lock.release()
i += len(chunk)
else:
# write all the data
try:
if lock is not None:
lock.acquire()
put(h5[table], data, lo=0, h5opts=h5opts)
finally:
if lock is not None:
lock.release()
_DOC_OTHER_PARAMS = """
columns : sequence of str, optional
Customize which value columns from the input pixels to store in the
cooler. Non-standard value columns will be given dtype ``float64``
unless overriden using the ``dtypes`` argument. If ``None``, we only
attempt to store a value column named ``"count"``.
dtypes : dict, optional
Dictionary mapping column names to dtypes. Can be used to override the
default dtypes of ``bin1_id``, ``bin2_id`` or ``count`` or assign
dtypes to custom value columns. Non-standard value columns given in
``dtypes`` must also be provided in the ``columns`` argument or they
will be ignored.
metadata : dict, optional
Experiment metadata to store in the file. Must be JSON compatible.
assembly : str, optional
Name of genome assembly.
ordered : bool, optional [default: False]
If the input chunks of pixels are provided with correct triangularity
and in ascending order of (``bin1_id``, ``bin2_id``), set this to
``True`` to write the cooler in one step.
If ``False`` (default), we create the cooler in two steps using an
external sort mechanism. See Notes for more details.
symmetric_upper : bool, optional [default: True]
If True, sets the file's storage-mode property to ``symmetric-upper``:
use this only if the input data references the upper triangle of a
symmetric matrix! For all other cases, set this option to False.
mode : {'w' , 'a'}, optional [default: 'w']
Write mode for the output file. 'a': if the output file exists, append
the new cooler to it. 'w': if the output file exists, it will be
truncated. Default is 'w'.
Other parameters
----------------
mergebuf : int, optional
Maximum number of records to buffer in memory at any give time during
the merge step.
delete_temp : bool, optional
Whether to delete temporary files when finished.
Useful for debugging. Default is False.
temp_dir : str, optional
Create temporary files in a specified directory instead of the same
directory as the output file. Pass ``-`` to use the system default.
max_merge : int, optional
If merging more than ``max_merge`` chunks, do the merge recursively in
two passes.
h5opts : dict, optional
HDF5 dataset filter options to use (compression, shuffling,
checksumming, etc.). Default is to use autochunking and GZIP
compression, level 6.
lock : multiprocessing.Lock, optional
Optional lock to control concurrent access to the output file.
ensure_sorted : bool, optional
Ensure that each input chunk is properly sorted.
boundscheck : bool, optional
Input validation: Check that all bin IDs lie in the expected range.
dupcheck : bool, optional
Input validation: Check that no duplicate pixels exist within any chunk.
triucheck : bool, optional
Input validation: Check that ``bin1_id`` <= ``bin2_id`` when creating
coolers in symmetric-upper mode.
""".strip()
_DOC_NOTES = """
Notes
-----
If the pixel chunks are provided in the correct order required for the
output to be properly sorted, then the cooler can be created in a single
step by setting ``ordered=True``.
If not, the cooler is created in two steps via an external sort mechanism.
In the first pass, the sequence of pixel chunks are processed and sorted in
memory and saved to temporary coolers. In the second pass, the temporary
coolers are merged into the output file. This way the individual chunks do
not need to be provided in any particular order. When ``ordered=False``,
the following options for the merge step are available: ``mergebuf``,
``delete_temp``, ``temp_dir``, ``max_merge``.
Each chunk of pixels will go through a validation pipeline, which can be
customized with the following options: ``boundscheck``, ``triucheck``,
``dupcheck``, ``ensure_sorted``.
""".strip()
def _format_docstring(**kwargs):
def decorate(func):
func.__doc__ = func.__doc__.format(**kwargs)
return func
return decorate
@_format_docstring(other_parameters=_DOC_OTHER_PARAMS, notes=_DOC_NOTES)
def create_cooler(
cool_uri,
bins,
pixels,
columns=None,
dtypes=None,
metadata=None,
assembly=None,
ordered=False,
symmetric_upper=True,
mode="w",
mergebuf=int(20e6),
delete_temp=True,
temp_dir=None,
max_merge=200,
boundscheck=True,
dupcheck=True,
triucheck=True,
ensure_sorted=False,
h5opts=None,
lock=None,
):
r"""
Create a cooler from bins and pixels at the specified URI.
Because the number of pixels is often very large, the input pixels are
normally provided as an iterable (e.g., an iterator or generator) of
DataFrame **chunks** that fit in memory.
.. versionadded:: 0.8.0
Parameters
----------
cool_uri : str
Path to cooler file or URI string. If the file does not exist,
it will be created.
bins : pandas.DataFrame
Segmentation of the chromosomes into genomic bins as a BED-like
DataFrame with columns ``chrom``, ``start`` and ``end``. May contain
additional columns.
pixels : DataFrame, dictionary, or iterable of either
A table, given as a dataframe or a column-oriented dict, containing
columns labeled ``bin1_id``, ``bin2_id`` and ``count``, sorted by
(``bin1_id``, ``bin2_id``). If additional columns are included in the
pixel table, their names and dtypes must be specified using the
``columns`` and ``dtypes`` arguments. For larger input data, an
**iterable** can be provided that yields the pixel data as a sequence
of chunks. If the input is a dask DataFrame, it will also be processed
one chunk at a time.
{other_parameters}
See also
--------
cooler.create_scool
cooler.create.sanitize_records
cooler.create.sanitize_pixels
{notes}
"""
# dispatch to the approprate creation method
if isinstance(pixels, (pd.DataFrame, dict)):
pixels = pd.DataFrame(pixels).sort_values(["bin1_id", "bin2_id"])
ordered = True
if ordered:
create(
cool_uri,
bins,
pixels,
columns=columns,
dtypes=dtypes,
metadata=metadata,
assembly=assembly,
symmetric_upper=symmetric_upper,
mode=mode,
boundscheck=boundscheck,
dupcheck=dupcheck,
triucheck=triucheck,
ensure_sorted=ensure_sorted,
h5opts=h5opts,
lock=lock,
)
else:
create_from_unordered(
cool_uri,
bins,
pixels,
columns=columns,
dtypes=dtypes,
metadata=metadata,
assembly=assembly,
symmetric_upper=symmetric_upper,
mode=mode,
boundscheck=boundscheck,
dupcheck=dupcheck,
triucheck=triucheck,
ensure_sorted=ensure_sorted,
h5opts=h5opts,
lock=lock,
mergebuf=mergebuf,
delete_temp=delete_temp,
temp_dir=temp_dir,
max_merge=max_merge,
)
@_format_docstring(other_parameters=_DOC_OTHER_PARAMS, notes=_DOC_NOTES)
def create_scool(
cool_uri,
bins,
cell_name_pixels_dict,
columns=None,
dtypes=None,
metadata=None,
assembly=None,
ordered=False,
symmetric_upper=True,
mode="w",
mergebuf=int(20e6),
delete_temp=True,
temp_dir=None,
max_merge=200,
boundscheck=True,
dupcheck=True,
triucheck=True,
ensure_sorted=False,
h5opts=None,
lock=None,
**kwargs):
r"""
Create a single-cell (scool) file.
For each cell store a cooler matrix under **/cells**, where all matrices
have the same dimensions.
Each cell is a regular cooler data collection, so the input must be a
bin table and pixel table for each cell. The pixel tables are provided as
a dictionary where the key is a unique cell name. The bin tables can be
provided as a dict with the same keys or a single common bin table can be
given.
.. versionadded:: 0.8.9
Parameters
----------
cool_uri : str
Path to scool file or URI string. If the file does not exist,
it will be created.
bins : :class:`pandas.DataFrame` or Dict[str, DataFrame]
A single bin table or dictionary of cell names to bins tables. A bin
table is a dataframe with columns ``chrom``, ``start`` and ``end``.
May contain additional columns.
cell_name_pixels_dict : Dict[str, DataFrame]
Cell name as key and pixel table DataFrame as value.
A table, given as a dataframe or a column-oriented dict, containing
columns labeled ``bin1_id``, ``bin2_id`` and ``count``, sorted by
(``bin1_id``, ``bin2_id``). If additional columns are included in the
pixel table, their names and dtypes must be specified using the
``columns`` and ``dtypes`` arguments. For larger input data, an
**iterable** can be provided that yields the pixel data as a sequence
of chunks. If the input is a dask DataFrame, it will also be processed
one chunk at a time.
{other_parameters}
See also
--------
cooler.create_cooler
cooler.zoomify_cooler
{notes}
"""
file_path, group_path = parse_cooler_uri(cool_uri)
h5opts = _set_h5opts(h5opts)
if isinstance(bins, pd.DataFrame):
bins_dict = {cell_name: bins for cell_name in cell_name_pixels_dict}
cell_names = sorted(cell_name_pixels_dict)
else:
# Assume bins is a dict of cell name -> dataframe
bins_dict = bins
if len(bins_dict) == 0:
raise ValueError("At least one bin must be given.")
else:
bins = bins_dict[next(iter(bins_dict))][["chrom", "start", "end"]]
# Sort bins_dict and cell_name_pixels_dict to guarantee matching keys
bins_keys = sorted(bins_dict)
cell_names = sorted(cell_name_pixels_dict)
for key_bins, key_pixels in zip(bins_keys, cell_names):
if key_bins != key_pixels:
raise ValueError('Bins and pixel dicts do not have matching keys')
dtypes = _get_dtypes_arg(dtypes, kwargs)
for col in ["chrom", "start", "end"]:
if col not in bins.columns:
raise ValueError("Missing column from bin table: '{}'.".format(col))
# Populate dtypes for expected pixel columns, and apply user overrides.
if dtypes is None:
dtypes = dict(PIXEL_DTYPES)
else:
dtypes_ = dict(dtypes)
dtypes = dict(PIXEL_DTYPES)
dtypes.update(dtypes_)
# Determine the appropriate iterable
try:
from dask.dataframe import DataFrame as dask_df
except (ImportError, AttributeError): # pragma: no cover
dask_df = ()
# Prepare chroms and bins
bins = bins.copy()
bins["chrom"] = bins["chrom"].astype(object)
chromsizes = get_chromsizes(bins)
try:
chromsizes = six.iteritems(chromsizes)
except AttributeError:
pass
chromnames, lengths = zip(*chromsizes)
chroms = pd.DataFrame(
{"name": chromnames, "length": lengths}, columns=["name", "length"]
)
binsize = get_binsize(bins)
n_chroms = len(chroms)
n_bins = len(bins)
# Create root group
with h5py.File(file_path, mode) as f:
logger.info('Creating cooler at "{}::{}"'.format(file_path, group_path))
if group_path == "/":
for name in ["chroms", "bins"]:
if name in f:
del f[name]
else:
try:
f.create_group(group_path)
except ValueError:
del f[group_path]
f.create_group(group_path)
with h5py.File(file_path, "r+") as f:
h5 = f[group_path]
logger.info("Writing chroms")
grp = h5.create_group("chroms")
write_chroms(grp, chroms, h5opts)
logger.info("Writing bins")
grp = h5.create_group("bins")
write_bins(grp, bins, chroms["name"], h5opts)
with h5py.File(file_path, "r+") as f:
h5 = f[group_path]
logger.info("Writing info")
info = {}
info["bin-type"] = u"fixed" if binsize is not None else u"variable"
info["bin-size"] = binsize if binsize is not None else u"null"
info["nchroms"] = n_chroms
info["ncells"] = len(cell_name_pixels_dict)
info["nbins"] = n_bins
if assembly is not None:
info["genome-assembly"] = assembly
if metadata is not None:
info["metadata"] = metadata
write_info(h5, info, True)
# Append single cells
for key in cell_names:
if '/' in key:
cell_name = key.split('/')[-1]
else:
cell_name = key
create(
cool_uri + '::/cells/' + cell_name,
bins_dict[key],
cell_name_pixels_dict[key],
columns=columns,
dtypes=dtypes,
metadata=metadata,
assembly=assembly,
ordered=ordered,
symmetric_upper=symmetric_upper,
mode='a',
boundscheck=boundscheck,
dupcheck=dupcheck,
triucheck=triucheck,
ensure_sorted=ensure_sorted,
h5opts=h5opts,
lock=lock,
mergebuf=mergebuf,
delete_temp=delete_temp,
temp_dir=temp_dir,
max_merge=max_merge,
append_scool=True,
scool_root_uri=cool_uri
)
| bsd-3-clause |
hippke/TTV-TDV-exomoons | create_figures/create_figure_4a.py | 1 | 7168 | """n-body simulator to derive TDV+TTV diagrams of planet-moon configurations.
Credit for part of the source is given to
https://github.com/akuchling/50-examples/blob/master/gravity.rst
Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
"""
import numpy
import math
import matplotlib.pylab as plt
from modified_turtle import Turtle
from phys_const import *
class Body(Turtle):
"""Subclass of Turtle representing a gravitationally-acting body"""
name = 'Body'
vx = vy = 0.0 # velocities in m/s
px = py = 0.0 # positions in m
def attraction(self, other):
"""(Body): (fx, fy) Returns the force exerted upon this body by the other body"""
# Distance of the other body
sx, sy = self.px, self.py
ox, oy = other.px, other.py
dx = (ox-sx)
dy = (oy-sy)
d = math.sqrt(dx**2 + dy**2)
# Force f and direction to the body
f = G * self.mass * other.mass / (d**2)
theta = math.atan2(dy, dx)
# direction of the force
fx = math.cos(theta) * f
fy = math.sin(theta) * f
return fx, fy
def loop(bodies, orbit_duration):
"""([Body]) Loops and updates the positions of all the provided bodies"""
# Calculate the duration of our simulation: One full orbit of the outer moon
seconds_per_day = 24*60*60
timesteps_per_day = 10000
timestep = seconds_per_day / timesteps_per_day
total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day)
#print total_steps, orbit_duration / 24 / 60 / 60
for body in bodies:
body.penup()
body.hideturtle()
for step in range(total_steps):
for body in bodies:
if body.name == 'planet':
# Add current position and velocity to our list
tdv_list.append(body.vx)
ttv_list.append(body.px)
force = {}
for body in bodies:
# Add up all of the forces exerted on 'body'
total_fx = total_fy = 0.0
for other in bodies:
# Don't calculate the body's attraction to itself
if body is other:
continue
fx, fy = body.attraction(other)
total_fx += fx
total_fy += fy
# Record the total force exerted
force[body] = (total_fx, total_fy)
# Update velocities based upon on the force
for body in bodies:
fx, fy = force[body]
body.vx += fx / body.mass * timestep
body.vy += fy / body.mass * timestep
# Update positions
body.px += body.vx * timestep
body.py += body.vy * timestep
#body.goto(body.px*SCALE, body.py*SCALE)
#body.dot(3)
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
"""Main routine"""
# Set variables and constants. Do not change these!
G = 6.67428e-11 # Gravitational constant G
SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim
tdv_list = []
ttv_list = []
R_star = 6.96 * 10**5 # [km], solar radius
transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0
print(transit_duration)
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
# Define parameters
firstmoon = Body()
firstmoon.mass = M_io
firstmoon.px = 0.4218 * 10**9
secondmoon = Body()
secondmoon.mass = M_eur
secondmoon.px = 0.66956576 * 10**9
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *secondmoon.px ** 3) / (G * (secondmoon.mass + planet.mass)))
orbit_duration = orbit_duration * 1.08
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon])
# Output information
print('TTV amplitude =', numpy.amax(ttv_array), \
'[min] = ', numpy.amax(ttv_array) * 60, '[sec]')
print('TDV amplitude =', numpy.amax(tdv_array), \
'[min] = ', numpy.amax(tdv_array) * 60, '[sec]')
ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.ylim([numpy.amin(tdv_array) * 1.2, numpy.amax(tdv_array) * 1.2])
plt.xlim([numpy.amin(ttv_array) * 1.2, numpy.amax(ttv_array) * 1.2])
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)
# Fix axes for comparisons
plt.xlim(-0.07, +0.07)
plt.ylim(-0.2, +0.2)
plt.annotate(r"2:1", xy=(-0.065, +0.16), size=16)
#plt.show()
plt.savefig("fig_4a.eps", bbox_inches = 'tight')
| mit |
Adai0808/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 143 | 22295 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([0, 1, 2])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/tests/test_compare_images.py | 15 | 3854 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import shutil
from nose.tools import assert_equal, assert_not_equal, assert_almost_equal
from matplotlib.testing.compare import compare_images
from matplotlib.testing.decorators import _image_directories
baseline_dir, result_dir = _image_directories(lambda: 'dummy func')
# Tests of the image comparison algorithm.
def image_comparison_expect_rms(im1, im2, tol, expect_rms):
"""Compare two images, expecting a particular RMS error.
im1 and im2 are filenames relative to the baseline_dir directory.
tol is the tolerance to pass to compare_images.
expect_rms is the expected RMS value, or None. If None, the test will
succeed if compare_images succeeds. Otherwise, the test will succeed if
compare_images fails and returns an RMS error almost equal to this value.
"""
im1 = os.path.join(baseline_dir, im1)
im2_src = os.path.join(baseline_dir, im2)
im2 = os.path.join(result_dir, im2)
# Move im2 from baseline_dir to result_dir. This will ensure that
# compare_images writes the diff file to result_dir, instead of trying to
# write to the (possibly read-only) baseline_dir.
shutil.copyfile(im2_src, im2)
results = compare_images(im1, im2, tol=tol, in_decorator=True)
if expect_rms is None:
assert_equal(None, results)
else:
assert_not_equal(None, results)
assert_almost_equal(expect_rms, results['rms'], places=4)
def test_image_compare_basic():
#: Test comparison of an image and the same image with minor differences.
# This expects the images to compare equal under normal tolerance, and have
# a small RMS.
im1 = 'basn3p02.png'
im2 = 'basn3p02-minorchange.png'
image_comparison_expect_rms(im1, im2, tol=10, expect_rms=None)
# Now test with no tolerance.
image_comparison_expect_rms(im1, im2, tol=0, expect_rms=6.50646)
def test_image_compare_1px_offset():
#: Test comparison with an image that is shifted by 1px in the X axis.
im1 = 'basn3p02.png'
im2 = 'basn3p02-1px-offset.png'
image_comparison_expect_rms(im1, im2, tol=0, expect_rms=90.15611)
def test_image_compare_half_1px_offset():
#: Test comparison with an image with half the pixels shifted by 1px in
#: the X axis.
im1 = 'basn3p02.png'
im2 = 'basn3p02-half-1px-offset.png'
image_comparison_expect_rms(im1, im2, tol=0, expect_rms=63.75)
def test_image_compare_scrambled():
#: Test comparison of an image and the same image scrambled.
# This expects the images to compare completely different, with a very
# large RMS.
# Note: The image has been scrambled in a specific way, by having each
# color component of each pixel randomly placed somewhere in the image. It
# contains exactly the same number of pixels of each color value of R, G
# and B, but in a totally different position.
im1 = 'basn3p02.png'
im2 = 'basn3p02-scrambled.png'
# Test with no tolerance to make sure that we pick up even a very small RMS
# error.
image_comparison_expect_rms(im1, im2, tol=0, expect_rms=172.63582)
def test_image_compare_shade_difference():
#: Test comparison of an image and a slightly brighter image.
# The two images are solid color, with the second image being exactly 1
# color value brighter.
# This expects the images to compare equal under normal tolerance, and have
# an RMS of exactly 1.
im1 = 'all127.png'
im2 = 'all128.png'
image_comparison_expect_rms(im1, im2, tol=0, expect_rms=1.0)
# Now test the reverse comparison.
image_comparison_expect_rms(im2, im1, tol=0, expect_rms=1.0)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| gpl-2.0 |
kobejean/tensorflow | tensorflow/contrib/learn/python/learn/estimators/__init__.py | 39 | 12688 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity (deprecated).
These classes are deprecated and replaced with `tf.estimator`.
See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### DynamicRnnEstimator
An `Estimator` that uses a recurrent neural network with dynamic unrolling.
```python
problem_type = ProblemType.CLASSIFICATION # or REGRESSION
prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE
estimator = DynamicRnnEstimator(problem_type,
prediction_type,
my_feature_columns)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import GraphRewriteSpec
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.head import binary_svm_head
from tensorflow.contrib.learn.python.learn.estimators.head import Head
from tensorflow.contrib.learn.python.learn.estimators.head import loss_only_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_class_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_label_head
from tensorflow.contrib.learn.python.learn.estimators.head import no_op_train_fn
from tensorflow.contrib.learn.python.learn.estimators.head import poisson_regression_head
from tensorflow.contrib.learn.python.learn.estimators.head import regression_head
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearEstimator
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.rnn_common import PredictionType
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| apache-2.0 |
sriharshams/mlnd | smartcab/smartcab/simulator.py | 11 | 25158 | ###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
###########################################
import os
import time
import random
import importlib
import csv
class Simulator(object):
"""Simulates agents in a dynamic smartcab environment.
Uses PyGame to display GUI, if available.
"""
colors = {
'black' : ( 0, 0, 0),
'white' : (255, 255, 255),
'red' : (255, 0, 0),
'green' : ( 0, 255, 0),
'dgreen' : ( 0, 228, 0),
'blue' : ( 0, 0, 255),
'cyan' : ( 0, 200, 200),
'magenta' : (200, 0, 200),
'yellow' : (255, 255, 0),
'mustard' : (200, 200, 0),
'orange' : (255, 128, 0),
'maroon' : (200, 0, 0),
'crimson' : (128, 0, 0),
'gray' : (155, 155, 155)
}
def __init__(self, env, size=None, update_delay=2.0, display=True, log_metrics=False, optimized=False):
self.env = env
self.size = size if size is not None else ((self.env.grid_size[0] + 1) * self.env.block_size, (self.env.grid_size[1] + 2) * self.env.block_size)
self.width, self.height = self.size
self.road_width = 44
self.bg_color = self.colors['gray']
self.road_color = self.colors['black']
self.line_color = self.colors['mustard']
self.boundary = self.colors['black']
self.stop_color = self.colors['crimson']
self.quit = False
self.start_time = None
self.current_time = 0.0
self.last_updated = 0.0
self.update_delay = update_delay # duration between each step (in seconds)
self.display = display
if self.display:
try:
self.pygame = importlib.import_module('pygame')
self.pygame.init()
self.screen = self.pygame.display.set_mode(self.size)
self._logo = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "logo.png")), (self.road_width, self.road_width))
self._ew = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "east-west.png")), (self.road_width, self.road_width))
self._ns = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "north-south.png")), (self.road_width, self.road_width))
self.frame_delay = max(1, int(self.update_delay * 1000)) # delay between GUI frames in ms (min: 1)
self.agent_sprite_size = (32, 32)
self.primary_agent_sprite_size = (42, 42)
self.agent_circle_radius = 20 # radius of circle, when using simple representation
for agent in self.env.agent_states:
if agent.color == 'white':
agent._sprite = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "car-{}.png".format(agent.color))), self.primary_agent_sprite_size)
else:
agent._sprite = self.pygame.transform.smoothscale(self.pygame.image.load(os.path.join("images", "car-{}.png".format(agent.color))), self.agent_sprite_size)
agent._sprite_size = (agent._sprite.get_width(), agent._sprite.get_height())
self.font = self.pygame.font.Font(None, 20)
self.paused = False
except ImportError as e:
self.display = False
print "Simulator.__init__(): Unable to import pygame; display disabled.\n{}: {}".format(e.__class__.__name__, e)
except Exception as e:
self.display = False
print "Simulator.__init__(): Error initializing GUI objects; display disabled.\n{}: {}".format(e.__class__.__name__, e)
# Setup metrics to report
self.log_metrics = log_metrics
self.optimized = optimized
if self.log_metrics:
a = self.env.primary_agent
# Set log files
if a.learning:
if self.optimized: # Whether the user is optimizing the parameters and decay functions
self.log_filename = os.path.join("logs", "sim_improved-learning.csv")
self.table_filename = os.path.join("logs","sim_improved-learning.txt")
else:
self.log_filename = os.path.join("logs", "sim_default-learning.csv")
self.table_filename = os.path.join("logs","sim_default-learning.txt")
self.table_file = open(self.table_filename, 'wb')
else:
self.log_filename = os.path.join("logs", "sim_no-learning.csv")
self.log_fields = ['trial', 'testing', 'parameters', 'initial_deadline', 'final_deadline', 'net_reward', 'actions', 'success']
self.log_file = open(self.log_filename, 'wb')
self.log_writer = csv.DictWriter(self.log_file, fieldnames=self.log_fields)
self.log_writer.writeheader()
def run(self, tolerance=0.05, n_test=0):
""" Run a simulation of the environment.
'tolerance' is the minimum epsilon necessary to begin testing (if enabled)
'n_test' is the number of testing trials simulated
Note that the minimum number of training trials is always 20. """
self.quit = False
# Get the primary agent
a = self.env.primary_agent
total_trials = 1
testing = False
trial = 1
while True:
# Flip testing switch
if not testing:
if total_trials > 20: # Must complete minimum 20 training trials
if a.learning:
if a.epsilon < tolerance: # assumes epsilon decays to 0
testing = True
trial = 1
else:
testing = True
trial = 1
# Break if we've reached the limit of testing trials
else:
if trial > n_test:
break
# Pretty print to terminal
print
print "/-------------------------"
if testing:
print "| Testing trial {}".format(trial)
else:
print "| Training trial {}".format(trial)
print "\-------------------------"
print
self.env.reset(testing)
self.current_time = 0.0
self.last_updated = 0.0
self.start_time = time.time()
while True:
try:
# Update current time
self.current_time = time.time() - self.start_time
# Handle GUI events
if self.display:
for event in self.pygame.event.get():
if event.type == self.pygame.QUIT:
self.quit = True
elif event.type == self.pygame.KEYDOWN:
if event.key == 27: # Esc
self.quit = True
elif event.unicode == u' ':
self.paused = True
if self.paused:
self.pause()
# Update environment
if self.current_time - self.last_updated >= self.update_delay:
self.env.step()
self.last_updated = self.current_time
# Render text
self.render_text(trial, testing)
# Render GUI and sleep
if self.display:
self.render(trial, testing)
self.pygame.time.wait(self.frame_delay)
except KeyboardInterrupt:
self.quit = True
finally:
if self.quit or self.env.done:
break
if self.quit:
break
# Collect metrics from trial
if self.log_metrics:
self.log_writer.writerow({
'trial': trial,
'testing': self.env.trial_data['testing'],
'parameters': self.env.trial_data['parameters'],
'initial_deadline': self.env.trial_data['initial_deadline'],
'final_deadline': self.env.trial_data['final_deadline'],
'net_reward': self.env.trial_data['net_reward'],
'actions': self.env.trial_data['actions'],
'success': self.env.trial_data['success']
})
# Trial finished
if self.env.success == True:
print "\nTrial Completed!"
print "Agent reached the destination."
else:
print "\nTrial Aborted!"
print "Agent did not reach the destination."
# Increment
total_trials = total_trials + 1
trial = trial + 1
# Clean up
if self.log_metrics:
if a.learning:
f = self.table_file
f.write("/-----------------------------------------\n")
f.write("| State-action rewards from Q-Learning\n")
f.write("\-----------------------------------------\n\n")
for state in a.Q:
f.write("{}\n".format(state))
for action, reward in a.Q[state].iteritems():
f.write(" -- {} : {:.2f}\n".format(action, reward))
f.write("\n")
self.table_file.close()
self.log_file.close()
print "\nSimulation ended. . . "
# Report final metrics
if self.display:
self.pygame.display.quit() # shut down pygame
def render_text(self, trial, testing=False):
""" This is the non-GUI render display of the simulation.
Simulated trial data will be rendered in the terminal/command prompt. """
status = self.env.step_data
if status and status['waypoint'] is not None: # Continuing the trial
# Previous State
if status['state']:
print "Agent previous state: {}".format(status['state'])
else:
print "!! Agent state not been updated!"
# Result
if status['violation'] == 0: # Legal
if status['waypoint'] == status['action']: # Followed waypoint
print "Agent followed the waypoint {}. (rewarded {:.2f})".format(status['action'], status['reward'])
elif status['action'] == None:
if status['light'] == 'red': # Stuck at red light
print "Agent properly idled at a red light. (rewarded {:.2f})".format(status['reward'])
else:
print "Agent idled at a green light with oncoming traffic. (rewarded {:.2f})".format(status['reward'])
else: # Did not follow waypoint
print "Agent drove {} instead of {}. (rewarded {:.2f})".format(status['action'], status['waypoint'], status['reward'])
else: # Illegal
if status['violation'] == 1: # Minor violation
print "Agent idled at a green light with no oncoming traffic. (rewarded {:.2f})".format(status['reward'])
elif status['violation'] == 2: # Major violation
print "Agent attempted driving {} through a red light. (rewarded {:.2f})".format(status['action'], status['reward'])
elif status['violation'] == 3: # Minor accident
print "Agent attempted driving {} through traffic and cause a minor accident. (rewarded {:.2f})".format(status['action'], status['reward'])
elif status['violation'] == 4: # Major accident
print "Agent attempted driving {} through a red light with traffic and cause a major accident. (rewarded {:.2f})".format(status['action'], status['reward'])
# Time Remaining
if self.env.enforce_deadline:
time = (status['deadline'] - 1) * 100.0 / (status['t'] + status['deadline'])
print "{:.0f}% of time remaining to reach destination.".format(time)
else:
print "Agent not enforced to meet deadline."
# Starting new trial
else:
a = self.env.primary_agent
print "Simulating trial. . . "
if a.learning:
print "epsilon = {:.4f}; alpha = {:.4f}".format(a.epsilon, a.alpha)
else:
print "Agent not set to learn."
def render(self, trial, testing=False):
""" This is the GUI render display of the simulation.
Supplementary trial data can be found from render_text. """
# Reset the screen.
self.screen.fill(self.bg_color)
# Draw elements
# * Static elements
# Boundary
self.pygame.draw.rect(self.screen, self.boundary, ((self.env.bounds[0] - self.env.hang)*self.env.block_size, (self.env.bounds[1]-self.env.hang)*self.env.block_size, (self.env.bounds[2] + self.env.hang/3)*self.env.block_size, (self.env.bounds[3] - 1 + self.env.hang/3)*self.env.block_size), 4)
for road in self.env.roads:
# Road
self.pygame.draw.line(self.screen, self.road_color, (road[0][0] * self.env.block_size, road[0][1] * self.env.block_size), (road[1][0] * self.env.block_size, road[1][1] * self.env.block_size), self.road_width)
# Center line
self.pygame.draw.line(self.screen, self.line_color, (road[0][0] * self.env.block_size, road[0][1] * self.env.block_size), (road[1][0] * self.env.block_size, road[1][1] * self.env.block_size), 2)
for intersection, traffic_light in self.env.intersections.iteritems():
self.pygame.draw.circle(self.screen, self.road_color, (intersection[0] * self.env.block_size, intersection[1] * self.env.block_size), self.road_width/2)
if traffic_light.state: # North-South is open
self.screen.blit(self._ns,
self.pygame.rect.Rect(intersection[0]*self.env.block_size - self.road_width/2, intersection[1]*self.env.block_size - self.road_width/2, intersection[0]*self.env.block_size + self.road_width, intersection[1]*self.env.block_size + self.road_width/2))
self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size - self.road_width/2), (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size + self.road_width/2), 2)
self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size + self.road_width/2 + 1, intersection[1] * self.env.block_size - self.road_width/2), (intersection[0] * self.env.block_size + self.road_width/2 + 1, intersection[1] * self.env.block_size + self.road_width/2), 2)
else:
self.screen.blit(self._ew,
self.pygame.rect.Rect(intersection[0]*self.env.block_size - self.road_width/2, intersection[1]*self.env.block_size - self.road_width/2, intersection[0]*self.env.block_size + self.road_width, intersection[1]*self.env.block_size + self.road_width/2))
self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size - self.road_width/2), (intersection[0] * self.env.block_size + self.road_width/2, intersection[1] * self.env.block_size - self.road_width/2), 2)
self.pygame.draw.line(self.screen, self.stop_color, (intersection[0] * self.env.block_size + self.road_width/2, intersection[1] * self.env.block_size + self.road_width/2 + 1), (intersection[0] * self.env.block_size - self.road_width/2, intersection[1] * self.env.block_size + self.road_width/2 + 1), 2)
# * Dynamic elements
self.font = self.pygame.font.Font(None, 20)
for agent, state in self.env.agent_states.iteritems():
# Compute precise agent location here (back from the intersection some)
agent_offset = (2 * state['heading'][0] * self.agent_circle_radius + self.agent_circle_radius * state['heading'][1] * 0.5, \
2 * state['heading'][1] * self.agent_circle_radius - self.agent_circle_radius * state['heading'][0] * 0.5)
agent_pos = (state['location'][0] * self.env.block_size - agent_offset[0], state['location'][1] * self.env.block_size - agent_offset[1])
agent_color = self.colors[agent.color]
if hasattr(agent, '_sprite') and agent._sprite is not None:
# Draw agent sprite (image), properly rotated
rotated_sprite = agent._sprite if state['heading'] == (1, 0) else self.pygame.transform.rotate(agent._sprite, 180 if state['heading'][0] == -1 else state['heading'][1] * -90)
self.screen.blit(rotated_sprite,
self.pygame.rect.Rect(agent_pos[0] - agent._sprite_size[0] / 2, agent_pos[1] - agent._sprite_size[1] / 2,
agent._sprite_size[0], agent._sprite_size[1]))
else:
# Draw simple agent (circle with a short line segment poking out to indicate heading)
self.pygame.draw.circle(self.screen, agent_color, agent_pos, self.agent_circle_radius)
self.pygame.draw.line(self.screen, agent_color, agent_pos, state['location'], self.road_width)
if state['destination'] is not None:
self.screen.blit(self._logo,
self.pygame.rect.Rect(state['destination'][0] * self.env.block_size - self.road_width/2, \
state['destination'][1]*self.env.block_size - self.road_width/2, \
state['destination'][0]*self.env.block_size + self.road_width/2, \
state['destination'][1]*self.env.block_size + self.road_width/2))
# * Overlays
self.font = self.pygame.font.Font(None, 50)
if testing:
self.screen.blit(self.font.render("Testing Trial %s"%(trial), True, self.colors['black'], self.bg_color), (10, 10))
else:
self.screen.blit(self.font.render("Training Trial %s"%(trial), True, self.colors['black'], self.bg_color), (10, 10))
self.font = self.pygame.font.Font(None, 30)
# Status text about each step
status = self.env.step_data
if status:
# Previous State
if status['state']:
self.screen.blit(self.font.render("Previous State: {}".format(status['state']), True, self.colors['white'], self.bg_color), (350, 10))
if not status['state']:
self.screen.blit(self.font.render("!! Agent state not updated!", True, self.colors['maroon'], self.bg_color), (350, 10))
# Action
if status['violation'] == 0: # Legal
if status['action'] == None:
self.screen.blit(self.font.render("No action taken. (rewarded {:.2f})".format(status['reward']), True, self.colors['dgreen'], self.bg_color), (350, 40))
else:
self.screen.blit(self.font.render("Agent drove {}. (rewarded {:.2f})".format(status['action'], status['reward']), True, self.colors['dgreen'], self.bg_color), (350, 40))
else: # Illegal
if status['action'] == None:
self.screen.blit(self.font.render("No action taken. (rewarded {:.2f})".format(status['reward']), True, self.colors['maroon'], self.bg_color), (350, 40))
else:
self.screen.blit(self.font.render("{} attempted (rewarded {:.2f})".format(status['action'], status['reward']), True, self.colors['maroon'], self.bg_color), (350, 40))
# Result
if status['violation'] == 0: # Legal
if status['waypoint'] == status['action']: # Followed waypoint
self.screen.blit(self.font.render("Agent followed the waypoint!", True, self.colors['dgreen'], self.bg_color), (350, 70))
elif status['action'] == None:
if status['light'] == 'red': # Stuck at a red light
self.screen.blit(self.font.render("Agent idled at a red light!", True, self.colors['dgreen'], self.bg_color), (350, 70))
else:
self.screen.blit(self.font.render("Agent idled at a green light with oncoming traffic.", True, self.colors['mustard'], self.bg_color), (350, 70))
else: # Did not follow waypoint
self.screen.blit(self.font.render("Agent did not follow the waypoint.", True, self.colors['mustard'], self.bg_color), (350, 70))
else: # Illegal
if status['violation'] == 1: # Minor violation
self.screen.blit(self.font.render("There was a green light with no oncoming traffic.", True, self.colors['maroon'], self.bg_color), (350, 70))
elif status['violation'] == 2: # Major violation
self.screen.blit(self.font.render("There was a red light with no traffic.", True, self.colors['maroon'], self.bg_color), (350, 70))
elif status['violation'] == 3: # Minor accident
self.screen.blit(self.font.render("There was traffic with right-of-way.", True, self.colors['maroon'], self.bg_color), (350, 70))
elif status['violation'] == 4: # Major accident
self.screen.blit(self.font.render("There was a red light with traffic.", True, self.colors['maroon'], self.bg_color), (350, 70))
# Time Remaining
if self.env.enforce_deadline:
time = (status['deadline'] - 1) * 100.0 / (status['t'] + status['deadline'])
self.screen.blit(self.font.render("{:.0f}% of time remaining to reach destination.".format(time), True, self.colors['black'], self.bg_color), (350, 100))
else:
self.screen.blit(self.font.render("Agent not enforced to meet deadline.", True, self.colors['black'], self.bg_color), (350, 100))
# Denote whether a trial was a success or failure
if (state['destination'] != state['location'] and state['deadline'] > 0) or (self.env.enforce_deadline is not True and state['destination'] != state['location']):
self.font = self.pygame.font.Font(None, 40)
if self.env.success == True:
self.screen.blit(self.font.render("Previous Trial: Success", True, self.colors['dgreen'], self.bg_color), (10, 50))
if self.env.success == False:
self.screen.blit(self.font.render("Previous Trial: Failure", True, self.colors['maroon'], self.bg_color), (10, 50))
if self.env.primary_agent.learning:
self.font = self.pygame.font.Font(None, 22)
self.screen.blit(self.font.render("epsilon = {:.4f}".format(self.env.primary_agent.epsilon), True, self.colors['black'], self.bg_color), (10, 80))
self.screen.blit(self.font.render("alpha = {:.4f}".format(self.env.primary_agent.alpha), True, self.colors['black'], self.bg_color), (10, 95))
# Reset status text
else:
self.pygame.rect.Rect(350, 10, self.width, 200)
self.font = self.pygame.font.Font(None, 40)
self.screen.blit(self.font.render("Simulating trial. . .", True, self.colors['white'], self.bg_color), (400, 60))
# Flip buffers
self.pygame.display.flip()
def pause(self):
""" When the GUI is enabled, this function will pause the simulation. """
abs_pause_time = time.time()
self.font = self.pygame.font.Font(None, 30)
pause_text = "Simulation Paused. Press any key to continue. . ."
self.screen.blit(self.font.render(pause_text, True, self.colors['red'], self.bg_color), (400, self.height - 30))
self.pygame.display.flip()
print pause_text
while self.paused:
for event in self.pygame.event.get():
if event.type == self.pygame.KEYDOWN:
self.paused = False
self.pygame.time.wait(self.frame_delay)
self.screen.blit(self.font.render(pause_text, True, self.bg_color, self.bg_color), (400, self.height - 30))
self.start_time += (time.time() - abs_pause_time)
| apache-2.0 |
christos-tsotskas/PhD_post_processing | src/post_process.py | 1 | 4796 | '''
Created on 19 Sep 2016
@author: Christos
'''
import numpy as np
import matplotlib.pyplot as plt
class ComparisonPlot(object):
__Visualisers = None
def __init__(self, Visualisers):
self.__Visualisers = Visualisers
print 'received:'+ str( len(self.__Visualisers)) + " visualisers"
def plot_to_compare_time_performance(self):
fig = plt.figure()
ax = plt.gca()
for plot in self.__Visualisers:
(x,y1) = plot.get_number_of_variables_vs_time()
ax.scatter( x,y1 , label=plot.get_name())
# ax.set_xscale('log')
plt.xlabel('number of variables')
plt.ylabel('Time(s)')
plt.legend(loc=9, ncol=2)
plt.grid(True)
plt.show()
class Visualiser(object):
__data = None
__filename_to_plot = None
__name = None
__figure_number = None
__figure = None
def __init__(self, filename_to_plot, name, figure_number):
self.__filename_to_plot = filename_to_plot
self.__name = name
self.__figure_number = figure_number
def get_data(self):
self.load_file_to_data_structure()
return self.__data
def get_name(self):
return self.__name
def load_file_to_data_structure(self):
'''
expects data with 3 columns, where
the first column is the number of variables
the second column is the time the optimisation run
the third column is the Hypervolume indicator (calculated from 20,20)
'''
with open(self.__filename_to_plot) as in_file:
lines = [line.rstrip('\n') for line in in_file]
if lines[0].startswith("#"):
lines.pop(0)
number_of_lines = len(lines)
# self.__data = np.empty([number_of_lines, 3], dtype=float)
self.__data = np.zeros(shape=(number_of_lines, 3), dtype=float)
row_index = 0
for line in lines:
text = line.split()
self.__data[row_index][0] = float(text[0])
self.__data[row_index][1] = float(text[1])
self.__data[row_index][2] = float(text[2])
row_index += 1
def get_number_of_variables_vs_time(self):
self.load_file_to_data_structure()
x = self.__data[:][0]
y1 = self.__data[:][1]
number_of_variables_vs_time = (x, y1)
return number_of_variables_vs_time
def plot_two_axis_of_the_same_case(self):
number_of_variables = self.__data[:,0]
time_performance = self.__data[:,1]
HV_performance = self.__data[:,2]
plt.figure(self.__figure_number)
fig, ax1 = plt.subplots()
ax1.set_title('Scalability of '+self.__name)
ax1.semilogx(number_of_variables, time_performance, '.')
ax1.set_xlabel('Number of variables')
# Make the time_performance-axis label and tick labels match the line color.
ax1.set_ylabel('Time(s)', color='b')
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
ax2.semilogx(number_of_variables, HV_performance, '*')
ax2.set_ylabel('HV indicator', color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
def plot_comparisons(self):
pass
def plot_quality_and_time_together(self):
self.load_file_to_data_structure()
self.plot_two_axis_of_the_same_case()
def plot_quality_and_time_together():
f1 = 'all_tests_phd_corrections4_20000evals.txt'
f2 = 'all_tests_phd_corrections5_20000evals.txt'
f3 = 'all_tests_phd_corrections7_20000evals.txt'
v1 = Visualiser(f2,'case5',1)
v1.plot_quality_and_time_together()
v2 = Visualiser(f3,'case7',2)
v2.plot_quality_and_time_together()
v3 = Visualiser(f1,'case4',3)
v3.plot_quality_and_time_together()
plt.show()
def plot_quality():
f1 = 'all_tests_phd_corrections4_20000evals.txt'
f2 = 'all_tests_phd_corrections5_20000evals.txt'
f3 = 'all_tests_phd_corrections7_20000evals.txt'
v1 = Visualiser(f2,'case5',1)
v2 = Visualiser(f3,'case7',2)
v3 = Visualiser(f1,'case4',3)
collection1 = [v1, v2, v3]
# collection1 = [v1, v2]
# collection1 = [v1]
cp1 = ComparisonPlot(collection1)
cp1.plot_to_compare_time_performance()
if __name__ == '__main__':
plot_quality()
| mit |
wdm0006/categorical_encoding | category_encoders/leave_one_out.py | 1 | 11527 | """Leave one out coding"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
import category_encoders.utils as util
from sklearn.utils.random import check_random_state
__author__ = 'hbghhy'
class LeaveOneOutEncoder(BaseEstimator, TransformerMixin):
"""Leave one out coding for categorical features.
This is very similar to target encoding but excludes the current row's
target when calculating the mean target for a level to reduce the effect
of outliers.
Parameters
----------
verbose: int
integer indicating verbosity of the output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
handle_missing: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
handle_unknown: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
sigma: float
adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing
data are untouched). Sigma gives the standard deviation (spread or "width") of the normal distribution.
The optimal value is commonly between 0.05 and 0.6. The default is to not add noise, but that leads
to significantly suboptimal results.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> bunch = load_boston()
>>> y = bunch.target
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = LeaveOneOutEncoder(cols=['CHAS', 'RAD']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 506 entries, 0 to 505
Data columns (total 13 columns):
CRIM 506 non-null float64
ZN 506 non-null float64
INDUS 506 non-null float64
CHAS 506 non-null float64
NOX 506 non-null float64
RM 506 non-null float64
AGE 506 non-null float64
DIS 506 non-null float64
RAD 506 non-null float64
TAX 506 non-null float64
PTRATIO 506 non-null float64
B 506 non-null float64
LSTAT 506 non-null float64
dtypes: float64(13)
memory usage: 51.5 KB
None
References
----------
.. [1] Strategies to encode categorical variables with many categories, from
https://www.kaggle.com/c/caterpillar-tube-pricing/discussion/15748#143154.
"""
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True,
handle_unknown='value', handle_missing='value', random_state=None, sigma=None):
self.return_df = return_df
self.drop_invariant = drop_invariant
self.drop_cols = []
self.verbose = verbose
self.use_default_cols = cols is None # if True, even a repeated call of fit() will select string columns from X
self.cols = cols
self._dim = None
self.mapping = None
self.handle_unknown = handle_unknown
self.handle_missing = handle_missing
self._mean = None
self.random_state = random_state
self.sigma = sigma
self.feature_names = None
def fit(self, X, y, **kwargs):
"""Fit encoder according to X and y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# unite the input into pandas types
X = util.convert_input(X)
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
self._dim = X.shape[1]
# if columns aren't passed, just use every string column
if self.use_default_cols:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
categories = self.fit_leave_one_out(
X, y,
cols=self.cols
)
self.mapping = categories
X_temp = self.transform(X, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self
def transform(self, X, y=None, override_return_df=False):
"""Perform the transformation to new categorical data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
y : array-like, shape = [n_samples] when transform by leave one out
None, when transform without target information (such as transform test set)
Returns
-------
p : array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied.
"""
if self.handle_missing == 'error':
if X[self.cols].isnull().any().any():
raise ValueError('Columns to be encoded can not contain null')
if self._dim is None:
raise ValueError('Must train encoder before it can be used to transform data.')
# unite the input into pandas types
X = util.convert_input(X)
# then make sure that it is the right size
if X.shape[1] != self._dim:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
# if we are encoding the training data, we have to check the target
if y is not None:
y = util.convert_input_vector(y, X.index).astype(float)
if X.shape[0] != y.shape[0]:
raise ValueError("The length of X is " + str(X.shape[0]) + " but length of y is " + str(y.shape[0]) + ".")
if not list(self.cols):
return X
X = self.transform_leave_one_out(
X, y,
mapping=self.mapping
)
if self.drop_invariant:
for col in self.drop_cols:
X.drop(col, 1, inplace=True)
if self.return_df or override_return_df:
return X
else:
return X.values
def fit_transform(self, X, y=None, **fit_params):
"""
Encoders that utilize the target must make sure that the training data are transformed with:
transform(X, y)
and not with:
transform(X)
"""
# the interface requires 'y=None' in the signature but we need 'y'
if y is None:
raise(TypeError, 'fit_transform() missing argument: ''y''')
return self.fit(X, y, **fit_params).transform(X, y)
def fit_leave_one_out(self, X_in, y, cols=None):
X = X_in.copy(deep=True)
if cols is None:
cols = X.columns.values
self._mean = y.mean()
return {col: self.fit_column_map(X[col], y) for col in cols}
def fit_column_map(self, series, y):
category = pd.Categorical(series)
categories = category.categories
codes = category.codes.copy()
codes[codes == -1] = len(categories)
categories = np.append(categories, np.nan)
return_map = pd.Series(dict([(code, category) for code, category in enumerate(categories)]))
result = y.groupby(codes).agg(['sum', 'count'])
return result.rename(return_map)
def transform_leave_one_out(self, X_in, y, mapping=None):
"""
Leave one out encoding uses a single column of floats to represent the means of the target variables.
"""
X = X_in.copy(deep=True)
random_state_ = check_random_state(self.random_state)
for col, colmap in mapping.items():
level_notunique = colmap['count'] > 1
unique_train = colmap.index
unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train])
is_nan = X[col].isnull()
is_unknown_value = X[col].isin(unseen_values.dropna().astype(object))
if X[col].dtype.name == 'category': # Pandas 0.24 tries hard to preserve categorical data type
X[col] = X[col].astype(str)
if self.handle_unknown == 'error' and is_unknown_value.any():
raise ValueError('Columns to be encoded can not contain new values')
if y is None: # Replace level with its mean target; if level occurs only once, use global mean
level_means = (colmap['sum'] / colmap['count']).where(level_notunique, self._mean)
X[col] = X[col].map(level_means)
else: # Replace level with its mean target, calculated excluding this row's target
# The y (target) mean for this level is normally just the sum/count;
# excluding this row's y, it's (sum - y) / (count - 1)
level_means = (X[col].map(colmap['sum']) - y) / (X[col].map(colmap['count']) - 1)
# The 'where' fills in singleton levels (count = 1 -> div by 0) with the global mean
X[col] = level_means.where(X[col].map(colmap['count'][level_notunique]).notnull(), self._mean)
if self.handle_unknown == 'value':
X.loc[is_unknown_value, col] = self._mean
elif self.handle_unknown == 'return_nan':
X.loc[is_unknown_value, col] = np.nan
if self.handle_missing == 'value':
X.loc[is_nan & unseen_values.isnull().any(), col] = self._mean
elif self.handle_missing == 'return_nan':
X.loc[is_nan, col] = np.nan
if self.sigma is not None and y is not None:
X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0])
return X
def get_feature_names(self):
"""
Returns the names of all transformed / added columns.
Returns
-------
feature_names: list
A list with all feature names transformed or added.
Note: potentially dropped features are not included!
"""
if not isinstance(self.feature_names, list):
raise ValueError('Must fit data first. Affected feature names are not known before.')
else:
return self.feature_names
| bsd-3-clause |
GoogleCloudPlatform/keras-idiomatic-programmer | zoo/datasets_c.py | 1 | 5732 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import Sequential, Model, Input
from tensorflow.keras import layers
from tensorflow.keras.layers import ReLU, Dense, Conv2D, Conv2DTranspose
from tensorflow.keras.layers import DepthwiseConv2D, SeparableConv2D, Dropout
from tensorflow.keras.layers import GlobalAveragePooling2D, Activation, BatchNormalization
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.compat.v1.keras.initializers import glorot_uniform, he_normal
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import tensorflow_datasets as tfds
import tensorflow.keras.backend as K
import numpy as np
from sklearn.model_selection import train_test_split
import random
import math
import sys
from layers_c import Layers
from preprocess_c import Preprocess
from pretraining_c import Pretraining
from hypertune_c import HyperTune
from training_c import Training
class Dataset(object):
''' Dataset base (super) class for Models '''
def __init__(self):
""" Constructor
"""
self.x_train = None
self.y_train = None
self.x_test = None
self.y_test = None
self.n_classes = 0
@property
def data(self):
return (x_train, y_train), (x_test, y_test)
def load_data(self, train, test=None, std=False, onehot=False, smoothing=0.0):
""" Load in memory data
train: expect form: (x_train, y_train)
"""
self.x_train, self.y_train = train
if test is not None:
self.x_test, self.y_test = test
if std:
self.x_train, self.x_test = self.standardization(self.x_train, self.x_test)
if self.y_train.ndim == 2:
self.n_classes = np.max(self.y_train) + 1
else:
self.n_classes = self.y_train.shape[1]
if onehot:
self.y_train = to_categorical(self.y_train, self.n_classes)
self.y_test = to_categorical(self.y_test, self.n_classes)
if smoothing > 0.0:
self.y_train = self.label_smoothing(self.y_train, self.n_classes, smoothing)
def cifar10(self, epochs=10, decay=('cosine', 0)):
""" Train on CIFAR-10
epochs : number of epochs for full training
"""
from tensorflow.keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train, x_test = self.standardization(x_train, x_test)
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
y_train = self.label_smoothing(y_train, 10, 0.1)
# compile the model
self.compile(loss='categorical_crossentropy', metrics=['acc'])
self.warmup(x_train, y_train)
lr, batch_size = self.random_search(x_train, y_train, x_test, y_test)
self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,
lr=lr, decay=decay)
self.evaluate(x_test, y_test)
def cifar100(self, epochs=20, decay=('cosine', 0)):
""" Train on CIFAR-100
epochs : number of epochs for full training
"""
from tensorflow.keras.datasets import cifar100
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train, x_test = self.normalization(x_train, x_test)
y_train = to_categorical(y_train, 100)
y_test = to_categorical(y_test, 100)
y_train = self.label_smoothing(y_train, 10, 0.1)
self.compile(loss='categorical_crossentropy', metrics=['acc'])
self.warmup(x_train, y_train)
lr, batch_size = self.grid_search(x_train, y_train, x_test, y_test)
self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,
lr=lr, decay=decay)
self.evaluate(x_test, y_test)
def coil100(self, epochs=20, decay=('cosine', 0)):
"""
"""
# Get TF.dataset generator for COIL100
train, info = tfds.load('coil100', split='train', shuffle_files=True, with_info=True, as_supervised=True)
n_classes = info.features['label'].num_classes
n_images = info.splits['train'].num_examples
input_shape = info.features['image'].shape
# Get the dataset into memory
train = train.shuffle(n_images).batch(n_images)
for images, labels in train.take(1):
pass
images = np.asarray(images)
images, _ = self.standardization(images, None)
labels = to_categorical(np.asarray(labels), n_classes)
# split the dataset into train/test
x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size=0.2)
self.compile(loss='categorical_crossentropy', metrics=['acc'])
self.warmup(x_train, y_train)
lr, batch_size = self.grid_search(x_train, y_train, x_test, y_test)
self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,
lr=lr, decay=decay)
self.evaluate(x_test, y_test)
| apache-2.0 |
vtsuperdarn/davitpy | davitpy/models/raydarn/rt.py | 1 | 45736 | # Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
"""Ray-tracing raydarn module
This module runs the raytracing code
Classes
-------------------------------------------------------
rt.RtRun run the code
rt.Scatter store and process modeled backscatter
rt.Edens store and process electron density profiles
rt.Rays store and process individual rays
-------------------------------------------------------
Notes
-----
The ray tracing requires mpi to run. You can adjust the number of processors, but
be wise about it and do not assign more than you have
"""
import numpy as np
import pandas as pd
import logging
#########################################################################
# Main object
#########################################################################
class RtRun(object):
"""This class runs the raytracing code and processes the output
Parameters
----------
sTime : Optional[datetime.datetime]
start time UT
eTime : Optional[datetime.datetime]
end time UT (if not provided run for a single time sTime)
rCode : Optional[str]
radar 3-letter code
radarObj : Optional[pydarn.radar.radar]
radar object (overrides rCode)
dTime : Optional[float]
time step in Hours
freq : Optional[float]
operating frequency [MHz]
beam : Optional[int]
beam number (if None run all beams)
nhops : Optional[int]
number of hops
elev : Optional[tuple]
(start elevation, end elevation, step elevation) [degrees]
azim : Optional[tuple]
(start azimuth, end azimuth, step azimuth) [degrees East] (overrides beam specification)
hmf2 : Optional[float]
F2 peak alitude [km] (default: use IRI)
nmf2 : Optional[float]
F2 peak electron density [log10(m^-3)] (default: use IRI)
fext : Optional[str]
output file id, max 10 character long (mostly used for multiple users environments, like a website)
loadFrom : Optional[str]
file name where a pickled instance of RtRun was saved (supersedes all other args)
nprocs : Optional[int]
number of processes to use with MPI
Attributes
----------
radar :
site :
azim :
beam :
elev :
time : list
dTime : float
freq : float
nhops : int
hmf2 : float
nmf2 : float
outDir :
fExt :
davitpy_path : str
edens_file :
Methods
-------
RtRun.readRays
RtRun.readEdens
RtRun.readScatter
RtRun.save
RtRun.load
Example
-------
# Run a 2-hour ray trace from Blackstone on a random day
sTime = dt.datetime(2012, 11, 18, 5)
eTime = sTime + dt.timedelta(hours=2)
radar = 'bks'
# Save the results to your /tmp directory
rto = raydarn.RtRun(sTime, eTime, rCode=radar, outDir='/tmp')
"""
def __init__(self, sTime=None, eTime=None,
rCode=None, radarObj=None,
dTime=.5,
freq=11, beam=None, nhops=1,
elev=(5, 60, .1), azim=None,
hmf2=None, nmf2=None,
outDir=None,
fext=None,
loadFrom=None,
edens_file=None,
nprocs=4):
import datetime as dt
from os import path
from davitpy.pydarn import radar
from davitpy import rcParams
# Load pickled instance...
if loadFrom:
self.load(loadFrom)
# ...or get to work!
else:
# Load radar info
if radarObj:
self.radar = radarObj
elif rCode:
self.radar = radar.radar(code=rCode)
# Set azimuth
self.site = self.radar.getSiteByDate(sTime)
if (beam is not None) and not azim:
az = self.site.beamToAzim(beam)
azim = (az, az, 1)
else:
az1 = self.site.beamToAzim(0)
az2 = self.site.beamToAzim(self.site.maxbeam-1)
azim = (az1, az2, self.site.bmsep)
self.azim = azim
self.beam = beam
# Set elevation
self.elev = elev
# Set time interval
if not sTime:
logging.warning('No start time. Using now.')
sTime = dt.datetime.utcnow()
if not eTime:
eTime = sTime + dt.timedelta(minutes=1)
if eTime > sTime + dt.timedelta(days=1):
logging.warning('The time interval requested if too large. Reducing to 1 day.')
eTime = sTime + dt.timedelta(days=1)
self.time = [sTime, eTime]
self.dTime = dTime
# Set frequency
self.freq = freq
# Set number of hops
self.nhops = nhops
# Set ionosphere
self.hmf2 = hmf2 if hmf2 else 0
self.nmf2 = nmf2 if nmf2 else 0
# Set output directory and file extension
if not outDir:
outDir = rcParams['DAVIT_TMPDIR']
# outDir = path.abspath( path.curdir )
self.outDir = path.join( outDir, '' )
self.fExt = '0' if not fext else fext
# Set DaViTpy Install path
self.davitpy_path = rcParams['DAVITPY_PATH']
# Set user-supplied electron density profile
if edens_file is not None:
self.edens_file = edens_file
# Write input file
inputFile = self._genInput()
# Run the ray tracing
success = self._execute(nprocs, inputFile)
def _genInput(self):
"""Generate input file
Returns
-------
fname
"""
from os import path
fname = path.join(self.outDir, 'rtrun.{}.inp'.format(self.fExt))
with open(fname, 'w') as f:
f.write( "{:8.2f} Transmitter latitude (degrees N)\n".format( self.site.geolat ) )
f.write( "{:8.2f} Transmitter Longitude (degrees E\n".format( self.site.geolon ) )
f.write( "{:8.2f} Azimuth (degrees E) (begin)\n".format( self.azim[0] ) )
f.write( "{:8.2f} Azimuth (degrees E) (end)\n".format( self.azim[1] ) )
f.write( "{:8.2f} Azimuth (degrees E) (step)\n".format( self.azim[2] ) )
f.write( "{:8.2f} Elevation angle (begin)\n".format( self.elev[0] ) )
f.write( "{:8.2f} Elevation angle (end)\n".format( self.elev[1] ) )
f.write( "{:8.2f} Elevation angle (step)\n".format( self.elev[2] ) )
f.write( "{:8.2f} Frequency (Mhz)\n".format( self.freq ) )
f.write( "{:8d} nubmer of hops (minimum 1)\n".format( self.nhops) )
f.write( "{:8d} Year (yyyy)\n".format( self.time[0].year ) )
f.write( "{:8d} Month and day (mmdd)\n".format( self.time[0].month*100 + self.time[0].day ) )
tt = self.time[0].hour + self.time[0].minute/60.
tt += 25.
f.write( "{:8.2f} hour (add 25 for UT) (begin)\n".format( tt ) )
tt = self.time[1].hour + self.time[1].minute/60.
tt += (self.time[1].day - self.time[0].day) * 24.
tt += 25.
f.write( "{:8.2f} hour (add 25 for UT) (end)\n".format( tt ) )
f.write( "{:8.2f} hour (step)\n".format( self.dTime ) )
f.write( "{:8.2f} hmf2 (km, if 0 then ignored)\n".format( self.hmf2 ) )
f.write( "{:8.2f} nmf2 (log10, if 0 then ignored)\n".format( self.nmf2 ) )
f.write( self.davitpy_path+"\n" ) # DaViTpy install path
if hasattr(self,'edens_file'): # Path to user-defined electron profile
f.write( self.edens_file )
return fname
def _execute(self, nprocs, inputFileName):
"""Execute raytracing command
Parameters
----------
nprocs : int
number of processes to use with MPI
inputFilename : str
"""
import subprocess as subp
from os import path
command = ['mpiexec', '-n', '{}'.format(nprocs),
path.join(path.abspath( __file__.split('rt.py')[0] ), 'rtFort'),
inputFileName,
self.outDir,
self.fExt]
#print ' '.join(command)
process = subp.Popen(command, shell=False, stdout=subp.PIPE, stderr=subp.STDOUT)
output = process.communicate()[0]
exitCode = process.returncode
if (exitCode != 0):
logging.debug('In:: {}'.format( command ))
logging.debug('Exit code:: {}'.format( exitCode ))
logging.debug('Returned:: \n' + output)
logging.debug('In:: {}'.format( command ))
logging.debug('Exit code:: {}'.format( exitCode ))
logging.debug('Returned:: \n' + output)
if (exitCode != 0):
raise Exception('Fortran execution error.')
else:
# subp.call(['rm',inputFileName])
return True
def readRays(self, saveToAscii=None):
"""Read rays.dat fortran output into dictionnary
Parameters
----------
saveToAscii : Optional[str]
output content to text file
Returns
-------
Add a new member to class rt.RtRun *rays*, of type class rt.rays
"""
import subprocess as subp
from os import path
# File name and path
fName = path.join(self.outDir, 'rays.{}.dat'.format(self.fExt))
if hasattr(self, 'rays') and not path.exists(fName):
logging.error('The file is gone, and it seems you may already have read it into memory...?')
return
# Initialize rays output
self.rays = Rays(fName,
site=self.site, radar=self.radar,
saveToAscii=saveToAscii)
# Remove Input file
# subp.call(['rm',fName])
def readEdens(self):
"""Read edens.dat fortran output
Parameters
----------
None
Returns
-------
Add a new member to class rt.RtRun *rays*, of type class rt.rays
"""
import subprocess as subp
from os import path
# File name and path
fName = path.join(self.outDir, 'edens.{}.dat'.format(self.fExt))
if hasattr(self, 'ionos') and not path.exists(fName):
logging.error('The file is gone, and it seems you may already have read it into memory...?')
return
# Initialize rays output
self.ionos = Edens(fName,
site=self.site, radar=self.radar)
# Remove Input file
# subp.call(['rm',fName])
def readScatter(self):
"""Read iscat.dat and gscat.dat fortran output
Parameters
----------
None
Returns
-------
Add a new member to class rt.RtRun *rays*, of type class rt.rays
"""
import subprocess as subp
from os import path
# File name and path
isName = path.join(self.outDir, 'iscat.{}.dat'.format(self.fExt))
gsName = path.join(self.outDir, 'gscat.{}.dat'.format(self.fExt))
if hasattr(self, 'scatter') \
and (not path.exists(isName) \
or not path.exists(gsName)):
logging.error('The files are gone, and it seems you may already have read them into memory...?')
return
# Initialize rays output
self.scatter = Scatter(gsName, isName,
site=self.site, radar=self.radar)
# Remove Input file
# subp.call(['rm',isName])
# subp.call(['rm',gsName])
def save(self, filename):
"""Save class rt.RtRun to a file
Parameters
----------
filename : str
"""
import cPickle as pickle
with open( filename, "wb" ) as f:
pickle.dump(self, f)
def load(self, filename):
"""Load class rt.RtRun from a file
Parameters
----------
filename : str
"""
import cPickle as pickle
with open( filename, "rb" ) as f:
obj = pickle.load(f)
for k, v in obj.__dict__.items():
self.__dict__[k] = v
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.clean()
def clean(self):
"""Clean-up files
"""
import subprocess as subp
from os import path
files = ['rays', 'edens', 'gscat', 'iscat']
for f in files:
fName = path.join(self.outDir, '{}.{}.dat'.format(f, self.fExt))
# subp.call(['rm', fName])
#########################################################################
# Electron densities
#########################################################################
class Edens(object):
"""Store and process electron density profiles after ray tracing
Parameters
----------
readFrom : str
edens.dat file to read the rays from
site : Optional[pydarn.radar.site]
radar site object
radar : Optional[pydarn.radar.radar]
radar object
Attributes
----------
readFrom : str
edens : dict
name : str
Methods
-------
Edens.readEdens
Edens.plot
"""
def __init__(self, readFrom,
site=None, radar=None):
self.readFrom = readFrom
self.edens = {}
self.name = ''
if radar:
self.name = radar.code[0].upper()
# Read rays
self.readEdens(site=site)
def readEdens(self, site=None):
"""Read edens.dat fortran output
Parameters
----------
site : Optional[pydarn.radar.radStrict.site]
site object of current radar
Returns
-------
Populate member edens class rt.Edens
"""
from struct import unpack
import datetime as dt
from numpy import array
# Read binary file
with open(self.readFrom, 'rb') as f:
logging.debug(self.readFrom + ' header: ')
self.header = _readHeader(f)
self.edens = {}
while True:
bytes = f.read(2*4)
# Check for eof
if not bytes: break
# read hour and azimuth
hour, azim = unpack('2f', bytes)
# format time index
hour = hour - 25.
mm = self.header['mmdd']/100
dd = self.header['mmdd'] - mm*100
rtime = dt.datetime(self.header['year'], mm, dd) + dt.timedelta(hours=hour)
# format azimuth index (beam)
raz = site.azimToBeam(azim) if site else round(raz, 2)
# Initialize dicts
if rtime not in self.edens.keys(): self.edens[rtime] = {}
self.edens[rtime][raz] = {}
# Read edens dict
# self.edens[rtime][raz]['pos'] = array( unpack('{}f'.format(250*2),
# f.read(250*2*4)) )
self.edens[rtime][raz]['th'] = array( unpack('{}f'.format(250),
f.read(250*4)) )
self.edens[rtime][raz]['nel'] = array( unpack('{}f'.format(250*250),
f.read(250*250*4)) ).reshape((250,250), order='F')
self.edens[rtime][raz]['dip'] = array( unpack('{}f'.format(250*2),
f.read(250*2*4)) ).reshape((250,2), order='F')
def plot(self, time, beam=None, maxground=2000, maxalt=500,
nel_cmap='jet', nel_lim=[10, 12], title=False,
fig=None, rect=111, ax=None, aax=None,plot_colorbar=True,
nel_rasterize=False):
"""Plot electron density profile
Parameters
----------
time : datetime.datetime
time of profile
beam : Optional[ ]
beam number
maxground : Optional[int]
maximum ground range [km]
maxalt : Optional[int]
highest altitude limit [km]
nel_cmap : Optional[str]
color map name for electron density index coloring
nel_lim : Optional[list, int]
electron density index plotting limits
title : Optional[bool]
Show default title
fig : Optional[pylab.figure]
object (default to gcf)
rect : Optional[int]
subplot spcification
ax : Optional[ ]
Existing main axes
aax : Optional[ ]
Existing auxialary axes
plot_colorbar : Optional[bool]
Plot a colorbar
nel_rasterize : Optional[bool]
Rasterize the electron density plot
(make your pdf files more managable)
Returns
-------
ax : matplotlib.axes
object containing formatting
aax : matplotlib.axes
object containing data
cbax : matplotlib.axes
object containing colorbar
Example
-------
# Show electron density profile
import datetime as dt
from models import raydarn
sTime = dt.datetime(2012, 11, 18, 5)
rto = raydarn.RtRun(sTime, rCode='bks', beam=12)
rto.readEdens() # read electron density into memory
ax, aax, cbax = rto.ionos.plot(sTime, title=True)
ax.grid()
written by Sebastien, 2013-04
"""
import datetime as dt
from davitpy.utils import plotUtils
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
# Set up axes
if not ax and not aax:
ax, aax = plotUtils.curvedEarthAxes(fig=fig, rect=rect,
maxground=maxground, maxalt=maxalt)
else:
ax = ax
aax = aax
if hasattr(ax, 'time'):
time = ax.time
if hasattr(ax, 'beam'):
beam = ax.beam
# make sure that the required time and beam are present
# Allow a 60 second difference between the requested time and the time
# available.
keys = np.array(self.edens.keys())
diffs = np.abs(keys-time)
if diffs.min() < dt.timedelta(minutes=1):
time = keys[diffs.argmin()]
assert (time in self.edens.keys()), logging.error('Unkown time %s' % time)
if beam:
assert (beam in self.edens[time].keys()), logging.error('Unkown beam %s' % beam)
else:
beam = self.edens[time].keys()[0]
X, Y = np.meshgrid(self.edens[time][beam]['th'], ax.Re + np.linspace(60,560,250))
im = aax.pcolormesh(X, Y, np.log10( self.edens[time][beam]['nel'] ),
vmin=nel_lim[0], vmax=nel_lim[1], cmap=nel_cmap,rasterized=nel_rasterize)
# Plot title with date ut time and local time
if title:
stitle = _getTitle(time, beam, self.header, None)
ax.set_title( stitle )
# Add a colorbar
cbax = None
if plot_colorbar:
cbax = plotUtils.addColorbar(im, ax)
_ = cbax.set_ylabel(r"N$_{el}$ [$\log_{10}(m^{-3})$]")
ax.beam = beam
return ax, aax, cbax
#########################################################################
# Scatter
#########################################################################
class Scatter(object):
"""Stores and process ground and ionospheric scatter
Parameters
----------
readISFrom : Optional[str]
iscat.dat file to read the ionospheric scatter from
readGSFrom : Optional[str]
gscat.dat file to read the ground scatter from
site : Optional[pydarn.radar.site]
radar site object
radar : Optional[pydarn.radar.radar]
radar object
Attributes
----------
readISFrom : str
iscat.dat file to read the ionospheric scatter from
readGSFrom : str
gscat.dat file to read the ground scatter from
gsc :
isc :
Methods
-------
Scatter.readGS
Scatter.readIS
Scatter.plot
"""
def __init__(self, readGSFrom=None, readISFrom=None,
site=None, radar=None):
self.readISFrom = readISFrom
self.readGSFrom = readGSFrom
# Read ground scatter
if self.readGSFrom:
self.gsc = {}
self.readGS(site=site)
# Read ionospheric scatter
if self.readISFrom:
self.isc = {}
self.readIS(site=site)
def readGS(self, site=None):
"""Read gscat.dat fortran output
Parameters
----------
site : Optional[pydarn.radar.radStrict.site]
site object of current radar
Returns
-------
Populate member isc class rt.Scatter
"""
from struct import unpack
import datetime as dt
import numpy as np
with open(self.readGSFrom, 'rb') as f:
# read header
logging.debug(self.readGSFrom + ' header: ')
self.header = _readHeader(f)
scatter_list = []
# Then read ray data, one ray at a time
while True:
bytes = f.read(3*4)
# Check for eof
if not bytes: break
# read number of ray steps, time, azimuth and elevation
rhr, raz, rel = unpack('3f', bytes)
# Read reminder of the record
rr, tht, gran, lat, lon = unpack('5f', f.read(5*4))
# Convert azimuth to beam number
raz = site.azimToBeam(raz) if site else np.round(raz, 2)
# Adjust rel to 2 decimal
rel = np.around(rel, 2)
# convert time to python datetime
rhr = rhr - 25.
mm = self.header['mmdd']/100
dd = self.header['mmdd'] - mm*100
rtime = dt.datetime(self.header['year'], mm, dd) + dt.timedelta(hours=rhr)
# Create new entries in rays dict
if rtime not in self.gsc.keys(): self.gsc[rtime] = {}
if raz not in self.gsc[rtime].keys(): self.gsc[rtime][raz] = {}
if rel not in self.gsc[rtime][raz].keys():
self.gsc[rtime][raz][rel] = {
'r': np.empty(0),
'th': np.empty(0),
'gran': np.empty(0),
'lat': np.empty(0),
'lon': np.empty(0) }
self.gsc[rtime][raz][rel]['r'] = np.append( self.gsc[rtime][raz][rel]['r'], rr )
self.gsc[rtime][raz][rel]['th'] = np.append( self.gsc[rtime][raz][rel]['th'], tht )
self.gsc[rtime][raz][rel]['gran'] = np.append( self.gsc[rtime][raz][rel]['gran'], gran )
self.gsc[rtime][raz][rel]['lat'] = np.append( self.gsc[rtime][raz][rel]['lat'], lat )
self.gsc[rtime][raz][rel]['lon'] = np.append( self.gsc[rtime][raz][rel]['lon'], lon )
# Same thing, but let's prepare for a Pandas DataFrame...
tmp = {}
tmp['type'] = 'gs'
tmp['rtime'] = rtime
tmp['raz'] = raz
tmp['rel'] = rel
tmp['r'] = rr
tmp['th'] = tht
tmp['gran'] = gran
tmp['lat'] = lat
tmp['lon'] = lon
scatter_list.append(tmp)
self.gsc_df = pd.DataFrame(scatter_list)
def readIS(self, site=None):
"""Read iscat.dat fortran output
Parameters
----------
site : Optional[pydarn.radar.radStrict.site]
site object of current radar
Returns
-------
Populate member isc class rt.Scatter
"""
from struct import unpack
import datetime as dt
from numpy import around, array
with open(self.readISFrom, 'rb') as f:
# read header
logging.debug(self.readISFrom+' header: ')
self.header = _readHeader(f)
# Then read ray data, one ray at a time
while True:
bytes = f.read(4*4)
# Check for eof
if not bytes: break
# read number of ray steps, time, azimuth and elevation
nstp, rhr, raz, rel = unpack('4f', bytes)
nstp = int(nstp)
# Convert azimuth to beam number
raz = site.azimToBeam(raz) if site else around(raz, 2)
# Adjust rel to 2 decimal
rel = around(rel, 2)
# convert time to python datetime
rhr = rhr - 25.
mm = self.header['mmdd']/100
dd = self.header['mmdd'] - mm*100
rtime = dt.datetime(self.header['year'], mm, dd) + dt.timedelta(hours=rhr)
# Create new entries in rays dict
if rtime not in self.isc.keys(): self.isc[rtime] = {}
if raz not in self.isc[rtime].keys(): self.isc[rtime][raz] = {}
self.isc[rtime][raz][rel] = {}
# Read to paths dict
self.isc[rtime][raz][rel]['nstp'] = nstp
self.isc[rtime][raz][rel]['r'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['th'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['gran'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['rel'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['w'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['nr'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['lat'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['lon'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
self.isc[rtime][raz][rel]['h'] = array( unpack('{}f'.format(nstp),
f.read(nstp*4)) )
def plot(self, time, beam=None, maxground=2000, maxalt=500,
iscat=True, gscat=True, title=False, weighted=False, cmap='hot_r',
fig=None, rect=111, ax=None, aax=None, zorder=4):
"""Plot scatter on ground/altitude profile
Parameters
----------
time : datetime.datetime
time of profile
beam : Optional[ ]
beam number
maxground : Optional[int]
maximum ground range [km]
maxalt : Optional[int]
highest altitude limit [km]
iscat : Optional[bool]
show ionospheric scatter
gscat : Optional[bool]
show ground scatter
title : Optional[bool]
Show default title
weighted : Optional[bool]
plot ionospheric scatter relative strength (based on background density and range)
cmap : Optional[str]
colormap used for weighted ionospheric scatter
fig : Optional[pylab.figure]
object (default to gcf)
rect : Optional[int]
subplot spcification
ax : Optional[ ]
Existing main axes
aax : Optional[ ]
Existing auxialary axes
zorder : Optional[int]
Returns
-------
ax : matplotlib.axes
object containing formatting
aax : matplotlib.axes
object containing data
cbax : matplotlib.axes
object containing colorbar
Example
-------
# Show ionospheric scatter
import datetime as dt
from models import raydarn
sTime = dt.datetime(2012, 11, 18, 5)
rto = raydarn.RtRun(sTime, rCode='bks', beam=12)
rto.readRays() # read rays into memory
ax, aax, cbax = rto.rays.plot(sTime, title=True)
rto.readScatter() # read scatter into memory
rto.scatter.plot(sTime, ax=ax, aax=aax)
ax.grid()
written by Sebastien, 2013-04
"""
from davitpy.utils import plotUtils
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
# Set up axes
if not ax and not aax:
ax, aax = plotUtils.curvedEarthAxes(fig=fig, rect=rect,
maxground=maxground, maxalt=maxalt)
else:
ax = ax
aax = aax
if hasattr(ax, 'beam'):
beam = ax.beam
# make sure that the required time and beam are present
assert (time in self.isc.keys() or time in self.gsc.keys()), logging.error('Unkown time %s' % time)
if beam:
assert (beam in self.isc[time].keys()), logging.error('Unkown beam %s' % beam)
else:
beam = self.isc[time].keys()[0]
if gscat and time in self.gsc.keys():
for ir, (el, rays) in enumerate( sorted(self.gsc[time][beam].items()) ):
if len(rays['r']) == 0: continue
_ = aax.scatter(rays['th'], ax.Re*np.ones(rays['th'].shape),
color='0', zorder=zorder)
if iscat and time in self.isc.keys():
if weighted:
wmin = np.min( [ r['w'].min() for r in self.isc[time][beam].values() if r['nstp'] > 0] )
wmax = np.max( [ r['w'].max() for r in self.isc[time][beam].values() if r['nstp'] > 0] )
for ir, (el, rays) in enumerate( sorted(self.isc[time][beam].items()) ):
if rays['nstp'] == 0: continue
t = rays['th']
r = rays['r']*1e-3
spts = np.array([t, r]).T.reshape(-1, 1, 2)
h = rays['h']*1e-3
rel = np.radians( rays['rel'] )
r = np.sqrt( r**2 + h**2 + 2*r*h*np.sin( rel ) )
t = t + np.arcsin( h/r * np.cos( rel ) )
epts = np.array([t, r]).T.reshape(-1, 1, 2)
segments = np.concatenate([spts, epts], axis=1)
lcol = LineCollection( segments, zorder=zorder )
if weighted:
_ = lcol.set_cmap( cmap )
_ = lcol.set_norm( plt.Normalize(0, 1) )
_ = lcol.set_array( ( rays['w'] - wmin ) / wmax )
else:
_ = lcol.set_color('0')
_ = aax.add_collection( lcol )
# Plot title with date ut time and local time
if title:
stitle = _getTitle(time, beam, self.header, None)
ax.set_title( stitle )
# If weighted, plot ionospheric scatter with colormap
if weighted:
# Add a colorbar
cbax = plotUtils.addColorbar(lcol, ax)
_ = cbax.set_ylabel("Ionospheric Scatter")
else: cbax = None
ax.beam = beam
return ax, aax, cbax
def gate_scatter(self,beam,fov):
"""
Parameters
----------
beam :
fov :
Returns
-------
lag_power
"""
#Add a 0 at the beginning to get the range gate numbering right.
# beam_inx = np.where(beam == fov.beams)[0][0]
# ranges = [0]+fov.slantRFull[beam_inx,:].tolist()
# Some useful parameters
ngates = fov.gates.size
range_gate = 180 + 45*np.arange(ngates+1,dtype=np.int)
Re = 6370.
P = np.array(range_gate,dtype=np.float)
minpower = 4.
if self.gsc_df.size > 0:
weights = 1/(self.gsc_df.gran**3)
lag_power, bins = np.histogram(self.gsc_df.gran/1000.,bins=range_gate,weights=weights)
else:
lag_power = np.zeros_like(fov.gates,dtype=np.float)
self.pwr = lag_power
self.gates = fov.gates
return lag_power
#########################################################################
# Rays
#########################################################################
class Rays(object):
"""Store and process individual rays after ray tracing
Parameters
----------
readFrom : str
rays.dat file to read the rays from
site : Optional[pydarn.radar.site]
radar site object
radar : Optional[ pydarn.radar.radar]
radar object
saveToAscii : Optional[str]
file name where to output ray positions
Attributes
----------
readFrom : str
rays.dat file to read the rays from
paths :
name : str
Methods
-------
Rays.readRays
Rays.writeToAscii
Rays.plot
"""
def __init__(self, readFrom,
site=None, radar=None,
saveToAscii=None):
self.readFrom = readFrom
self.paths = {}
self.name = ''
if radar:
self.name = radar.code[0].upper()
# Read rays
self.readRays(site=site)
# If required, save to ascii
if saveToAscii:
self.writeToAscii(saveToAscii)
def readRays(self, site=None):
"""Read rays.dat fortran output
Parameters
----------
site : Optional[pydarn.radar.radStrict.site]
site object of current radar
Returns
-------
Populate member paths class rt.Rays
"""
from struct import unpack
import datetime as dt
from numpy import round, array
# Read binary file
with open(self.readFrom, 'rb') as f:
# read header
logging.debug(self.readFrom+' header: ')
self.header = _readHeader(f)
# Then read ray data, one ray at a time
while True:
bytes = f.read(4*4)
# Check for eof
if not bytes: break
# read number of ray steps, time, azimuth and elevation
nrstep, rhr, raz, rel = unpack('4f', bytes)
nrstep = int(nrstep)
# Convert azimuth to beam number
raz = site.azimToBeam(raz) if site else round(raz, 2)
# convert time to python datetime
rhr = rhr - 25.
mm = self.header['mmdd']/100
dd = self.header['mmdd'] - mm*100
rtime = dt.datetime(self.header['year'], mm, dd) + dt.timedelta(hours=rhr)
# Create new entries in rays dict
if rtime not in self.paths.keys(): self.paths[rtime] = {}
if raz not in self.paths[rtime].keys(): self.paths[rtime][raz] = {}
self.paths[rtime][raz][rel] = {}
# Read to paths dict
self.paths[rtime][raz][rel]['nrstep'] = nrstep
self.paths[rtime][raz][rel]['r'] = array( unpack('{}f'.format(nrstep),
f.read(nrstep*4)) )
self.paths[rtime][raz][rel]['th'] = array( unpack('{}f'.format(nrstep),
f.read(nrstep*4)) )
self.paths[rtime][raz][rel]['gran'] = array( unpack('{}f'.format(nrstep),
f.read(nrstep*4)) )
# self.paths[rtime][raz][rel]['pran'] = array( unpack('{}f'.format(nrstep),
# f.read(nrstep*4)) )
self.paths[rtime][raz][rel]['nr'] = array( unpack('{}f'.format(nrstep),
f.read(nrstep*4)) )
def writeToAscii(self, fname):
"""Save rays to ASCII file (limited use)
Parameters
----------
fname : str
filename to save to
"""
with open(fname, 'w') as f:
f.write('## HEADER ##\n')
[f.write('{:>10s}'.format(k)) for k in self.header.keys()]
f.write('\n')
for v in self.header.values():
if isinstance(v, float): strFmt = '{:10.2f}'
elif isinstance(v, int): strFmt = '{:10d}'
elif isinstance(v, str): strFmt = '{:10s}'
f.write(strFmt.format(v))
f.write('\n')
f.write('## RAYS ##\n')
for kt in sorted(self.paths.keys()):
f.write('Time: {:%Y %m %d %H %M}\n'.format(kt))
for kb in sorted(self.paths[kt].keys()):
f.write('--Beam/Azimuth: {}\n'.format(kb))
for ke in sorted(self.paths[kt][kb].keys()):
f.write('----Elevation: {:4.2f}\n'.format(ke))
f.write('------r\n')
[f.write('{:10.3f}\t'.format(r*1e-3)) for r in self.paths[kt][kb][ke]['r']]
f.write('\n')
f.write('------theta\n')
[f.write('{:10.5f}\t'.format(th)) for th in self.paths[kt][kb][ke]['th']]
f.write('\n')
def plot(self, time, beam=None,
maxground=2000, maxalt=500, step=1,
showrefract=False, nr_cmap='jet_r', nr_lim=[0.8, 1.],
raycolor='0.3', title=False, zorder=2, alpha=1,
fig=None, rect=111, ax=None, aax=None):
"""Plot ray paths
Parameters
----------
time : datetime.datetime
time of rays
beam: Optional[ ]
beam number
maxground : Optional[int]
maximum ground range [km]
maxalt : Optional[int]
highest altitude limit [km]
step : Optional[int]
step between each plotted ray (in number of ray steps)
showrefract : Optional[bool]
show refractive index along ray paths (supersedes raycolor)
nr_cmap : Optional[str]
color map name for refractive index coloring
nr_lim : Optional[list, float]
refractive index plotting limits
raycolor : Optional[float]
color of ray paths
title : Optional[bool]
Show default title
zorder : Optional[int]
alpha : Optional[int]
fig : Optional[pylab.figure]
object (default to gcf)
rect : Optional[int]
subplot spcification
ax : Optional[ ]
Existing main axes
aax : Optional[ ]
Existing auxialary axes
Returns
-------
ax : matplotlib.axes
object containing formatting
aax : matplotlib.axes
object containing data
cbax : matplotlib.axes
object containing colorbar
Example
-------
# Show ray paths with colored refractive index along path
import datetime as dt
from davitpy.models import raydarn
sTime = dt.datetime(2012, 11, 18, 5)
rto = raydarn.RtRun(sTime, rCode='bks', beam=12, title=True)
rto.readRays() # read rays into memory
ax, aax, cbax = rto.rays.plot(sTime, step=10, showrefract=True, nr_lim=[.85,1])
ax.grid()
written by Sebastien, 2013-04
"""
import datetime as dt
from davitpy.utils import plotUtils
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
from types import MethodType
# Set up axes
if not ax and not aax:
ax, aax = plotUtils.curvedEarthAxes(fig=fig, rect=rect,
maxground=maxground, maxalt=maxalt)
else:
ax = ax
aax = aax
if hasattr(ax, 'time'):
time = ax.time
if hasattr(ax, 'beam'):
beam = ax.beam
# make sure that the required time and beam are present
# Allow a 60 second difference between the requested time and the time
# available.
keys = np.array(self.paths.keys())
diffs = np.abs(keys-time)
if diffs.min() < dt.timedelta(minutes=1):
time = keys[diffs.argmin()]
assert (time in self.paths.keys()), logging.error('Unkown time %s' % time)
if beam:
assert (beam in self.paths[time].keys()), logging.error('Unkown beam %s' % beam)
else:
beam = self.paths[time].keys()[0]
for ir, (el, rays) in enumerate( sorted(self.paths[time][beam].items()) ):
if not ir % step:
if not showrefract:
aax.plot(rays['th'], rays['r']*1e-3, c=raycolor,
zorder=zorder, alpha=alpha)
else:
points = np.array([rays['th'], rays['r']*1e-3]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lcol = LineCollection( segments, zorder=zorder, alpha=alpha)
_ = lcol.set_cmap( nr_cmap )
_ = lcol.set_norm( plt.Normalize(*nr_lim) )
_ = lcol.set_array( rays['nr'] )
_ = aax.add_collection( lcol )
# Plot title with date ut time and local time
if title:
stitle = _getTitle(time, beam, self.header, self.name)
ax.set_title( stitle )
# Add a colorbar when plotting refractive index
if showrefract:
cbax = plotUtils.addColorbar(lcol, ax)
_ = cbax.set_ylabel("refractive index")
else: cbax = None
# Declare a new method to show range markers
# This method is only available after rays have been plotted
# This ensures that the markers match the plotted rays
def showRange(self, markers=None,
color='.8', s=2, zorder=3,
**kwargs):
"""Plot ray paths
Parameters
----------
markers : Optional[ ]
range markers. Defaults to every 250 km
color : Optional[float]
s : Optional[int]
zorder : Optional[int]
**kwargs :
Returns
-------
coll :
a collection of range markers
Notes
-----
Parameters other than markers are borrowed from matplotlib.pyplot.scatter
Example
-------
# Add range markers to an existing ray plot
ax, aax, cbax = rto.rays.plot(sTime, step=10)
rto.rays.showRange()
written by Sebastien, 2013-04
"""
if not markers:
markers = np.arange(0, 5000, 250)
x, y = [], []
for el, rays in self.paths[time][beam].items():
for rm in markers:
inds = (rays['gran']*1e-3 >= rm)
if inds.any():
x.append( rays['th'][inds][0] )
y.append( rays['r'][inds][0]*1e-3 )
coll = aax.scatter(x, y,
color=color, s=s, zorder=zorder, **kwargs)
return coll
# End of new method
# Assign new method
self.showRange = MethodType(showRange, self)
ax.beam = beam
return ax, aax, cbax
#########################################################################
# Misc.
#########################################################################
def _readHeader(fObj):
"""Read the header part of ray-tracing *.dat files
Parameters
----------
fObj :
file object
Returns
-------
header : dict
a dictionary of header values
"""
from struct import unpack
import datetime as dt
from collections import OrderedDict
import os
# Declare header parameters
params = ('nhour', 'nazim', 'nelev',
'tlat', 'tlon',
'saz', 'eaz', 'daz',
'sel', 'eel', 'del',
'freq', 'nhop', 'year', 'mmdd',
'shour', 'ehour', 'dhour',
'hmf2', 'nmf2')
# Read header
header = OrderedDict( zip( params, unpack('3i9f3i5f', fObj.read(3*4 + 9*4 + 3*4 + 5*4)) ) )
header['fext'] = unpack('10s', fObj.read(10))[0].strip()
header['outdir'] = unpack('250s', fObj.read(250))[0].strip()
header['indir'] = unpack('250s', fObj.read(250))[0].strip()
# Only print header if in debug mode
for k, v in header.items(): logging.debug('{:10s} :: {}'.format(k,v))
header.pop('fext'); header.pop('outdir')
header.pop('indir')
return header
def _getTitle(time, beam, header, name):
"""Create a title for ground/altitude plots
Parameters
----------
time : datetime.datetime
time shown in plot
beam :
beam shown in plot
header : dict
header of fortran output file
name : str
radar name
Returns
-------
title : str
a title string
"""
from numpy import floor, round
utdec = time.hour + time.minute/60.
tlon = (header['tlon'] % 360.)
ctlon = tlon if tlon <=180. else tlon - 360.
ltdec = ( utdec + ( ctlon/360.*24.) ) % 24.
lthr = floor(ltdec)
ltmn = round( (ltdec - lthr)*60 )
title = '{:%Y-%b-%d at %H:%M} UT (~{:02.0f}:{:02.0f} LT)'.format(
time, lthr, ltmn)
title += '\n(IRI-2012) {} beam {}; freq {:.1f}MHz'.format(name, beam, header['freq'])
return title
| gpl-3.0 |
sarahgrogan/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
IssamLaradji/scikit-learn | sklearn/setup.py | 24 | 2991 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for hmm
config.add_extension(
'_hmmc',
sources=['_hmmc.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
kanchenxi04/vnpy-app | vn.trader/ctaAlgo/strategy_MACD_v05.py | 2 | 31502 | # encoding: UTF-8
# 首先写系统内置模块
from datetime import datetime, timedelta, date
from time import sleep
# 其次,导入vnpy的基础模块
import sys
# sys.path.append('C:\\vnpy_1.5\\vnpy-master\\vn.trader')
sys.path.append('../')
from vtConstant import EMPTY_STRING, EMPTY_INT, DIRECTION_LONG, DIRECTION_SHORT, OFFSET_OPEN, STATUS_CANCELLED
from utilSinaClient import UtilSinaClient
# 然后是自己编写的模块
from ctaTemplate import *
from ctaBase import *
from ctaLineBar import *
from ctaPosition import *
from ctaPolicy import *
from ctaBacktesting import BacktestingEngine
class Strategy_MACD_01(CtaTemplate):
"""螺纹钢、15分钟级别MACD策略
v1:15f上多空仓开仓
v2:60f上的仓位管理体系
v3:15f上的加仓减仓+开仓点位优化
v4:增加15f上的止损策略
注意:策略用在不同品种上需要调整策略参数
本版本现存问题:
无
已解决问题:
15f上按照百分比开仓
"""
className = 'Strategy_MACD'
author = u'横纵19950206'
# 策略在外部设置的参数
# ----------------------------------------------------------------------
def __init__(self, ctaEngine, setting=None):
"""Constructor"""
super(Strategy_MACD_01, self).__init__(ctaEngine, setting)
# 增加监控参数项目
# 增加监控变量项目
self.varList.append('pos') # 仓位,这里的仓位通常是手数
self.varList.append('entrust') # 是否正在委托,0表示没有委托,1多仓,-1空仓
self.varList.append('percentLimit') # 当前账户最大仓位
self.varList.append('percent') # 当前仓位
self.curDateTime = None # 当前Tick时间
self.curTick = None # 最新的tick
self.lastOrderTime = None # 上一次委托时间
self.cancelSeconds = 60 # 撤单时间(秒)
# 定义日内的交易窗口
self.openWindow = False # 开市窗口
self.tradeWindow = False # 交易窗口
self.closeWindow = False # 收市平仓窗口
self.inited = False # 是否完成了策略初始化
self.backtesting = False # 是否回测
self.lineM15 = None # 5分钟K线
self.lineM60 = None # 60分钟k线
# 创建一个策略规则
self.policy = CtaPolicy()
self.atr = 10 # 平均波动
# 增加仓位管理模块
self.position = CtaPosition(self)
# self.position.longPos多头持仓,self.position.shorPos多头持仓、
# self.position.pos持仓状态,self.position.maxPos最大持仓
# 增加ctabacktesing中的仓位管理
if not ctaEngine:
self.engine = BacktestingEngine()
else:
self.engine = ctaEngine
# 实时权益,可用资金,仓位比例,仓位比例上限
self.capital, self.available, self.percent, self.percentLimit = self.engine.getAccountInfo()
if setting:
# 根据配置文件更新参数
self.setParam(setting)
# 创建的M15 K线
# TODO macd参数选择:中长线配置36+60+10,超短9+17+8,正常:12+26+9
lineM15Setting = {}
lineM15Setting['name'] = u'M15' # k线名称
lineM15Setting['barTimeInterval'] = 60 * 15 # K线的Bar时长
lineM15Setting['inputMacdFastPeriodLen'] = 12 # DIF快线
lineM15Setting['inputMacdSlowPeriodLen'] = 26 # DEA慢线
lineM15Setting['inputMacdSignalPeriodLen'] = 9 # MACD中绿柱
lineM15Setting['inputPreLen'] = 20 # 前高/前低
lineM15Setting['shortSymbol'] = self.shortSymbol
self.lineM15 = CtaLineBar(self, self.onBarM15, lineM15Setting)
try:
mode = setting['mode']
if mode != EMPTY_STRING:
self.lineM15.setMode(setting['mode'])
except KeyError:
self.lineM15.setMode(self.lineM15.TICK_MODE)
self.onInit()
# ----------------------------------------------------------------------
def onInit(self, force=False):
"""初始化 """
if force:
self.writeCtaLog(u'策略强制初始化')
self.inited = False
self.trading = False # 控制是否启动交易
else:
self.writeCtaLog(u'策略初始化')
if self.inited:
self.writeCtaLog(u'已经初始化过,不再执行')
return
self.position.pos = EMPTY_INT # 初始化持仓
self.entrust = EMPTY_INT # 初始化委托状态
self.percent = EMPTY_INT # 初始化仓位状态
if not self.backtesting:
# 这里需要加载前置数据哦。
if not self.__initDataFromSina():
return
self.inited = True # 更新初始化标识
self.trading = True # 启动交易
self.putEvent()
self.writeCtaLog(u'策略初始化完成')
def __initDataFromSina(self):
"""从sina初始化5分钟数据"""
sina = UtilSinaClient(self)
ret = sina.getMinBars(symbol=self.symbol, minute=15, callback=self.lineM15.addBar)
if not ret:
self.writeCtaLog(u'获取M15数据失败')
return False
return True
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'启动')
# ----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.uncompletedOrders.clear()
self.position.pos = EMPTY_INT
self.entrust = EMPTY_INT
self.percent = EMPTY_INT
self.writeCtaLog(u'停止')
self.putEvent()
# ----------------------------------------------------------------------
def onTrade(self, trade):
"""交易更新"""
self.writeCtaLog(u'{0},OnTrade(),当前持仓:{1},当前仓位:{2} '.format(self.curDateTime, self.position.pos, self.percent))
# ----------------------------------------------------------------------
def onOrder(self, order):
"""报单更新"""
self.writeCtaLog(
u'OnOrder()报单更新,orderID:{0},{1},totalVol:{2},tradedVol:{3},offset:{4},price:{5},direction:{6},status:{7}'
.format(order.orderID, order.vtSymbol, order.totalVolume, order.tradedVolume,
order.offset, order.price, order.direction, order.status))
# 委托单主键,vnpy使用 "gateway.orderid" 的组合
orderkey = order.gatewayName + u'.' + order.orderID
if orderkey in self.uncompletedOrders:
if order.totalVolume == order.tradedVolume:
# 开仓,平仓委托单全部成交
# 平空仓完成(cover)
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_LONG and order.offset != OFFSET_OPEN:
self.writeCtaLog(u'平空仓完成,原持仓:{0},原仓位{1}'.format(self.position.pos, self.percent))
self.position.closePos(direction=DIRECTION_LONG, vol=order.tradedVolume)
self.writeCtaLog(u'新持仓:{0},新仓位{1}'.format(self.position.pos, self.percent))
# 平多仓完成(sell)
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_SHORT and order.offset != OFFSET_OPEN:
self.writeCtaLog(u'平空仓完成,原持仓:{0},原仓位{1}'.format(self.position.pos, self.percent))
self.position.closePos(direction=DIRECTION_SHORT, vol=order.tradedVolume)
self.writeCtaLog(u'新持仓:{0},新仓位{1}'.format(self.position.pos, self.percent))
# 开多仓完成
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_LONG and order.offset == OFFSET_OPEN:
self.writeCtaLog(u'平空仓完成,原持仓:{0},原仓位{1}'.format(self.position.pos, self.percent))
self.position.openPos(direction=DIRECTION_LONG, vol=order.tradedVolume, price=order.price)
self.writeCtaLog(u'新持仓:{0},新仓位{1}'.format(self.position.pos, self.percent))
# 开空仓完成
if self.uncompletedOrders[orderkey]['DIRECTION'] == DIRECTION_SHORT and order.offset == OFFSET_OPEN:
# 更新仓位
self.writeCtaLog(u'平空仓完成,原持仓:{0},原仓位{1}'.format(self.position.pos, self.percent))
self.position.openPos(direction=DIRECTION_SHORT, vol=order.tradedVolume, price=order.price)
self.writeCtaLog(u'新持仓:{0},新仓位{1}'.format(self.position.pos, self.percent))
del self.uncompletedOrders[orderkey]
if len(self.uncompletedOrders) == 0:
self.entrust = 0
self.lastOrderTime = None
elif order.tradedVolume > 0 and not order.totalVolume == order.tradedVolume and order.offset != OFFSET_OPEN:
# 平仓委托单部分成交
pass
elif order.offset == OFFSET_OPEN and order.status == STATUS_CANCELLED:
# 开仓委托单被撤销
self.entrust = 0
pass
else:
self.writeCtaLog(u'OnOrder()委托单返回,total:{0},traded:{1}'
.format(order.totalVolume, order.tradedVolume, ))
self.putEvent() # 更新监控事件
# ----------------------------------------------------------------------
def onStopOrder(self, orderRef):
"""停止单更新"""
self.writeCtaLog(u'{0},停止单触发,orderRef:{1}'.format(self.curDateTime, orderRef))
pass
# ----------------------------------------------------------------------
def onTick(self, tick):
"""行情更新
:type tick: object
"""
self.curTick = tick
if (tick.datetime.hour >= 3 and tick.datetime.hour <= 8) or (
tick.datetime.hour >= 16 and tick.datetime.hour <= 20):
self.writeCtaLog(u'休市/集合竞价排名时数据不处理')
return
# 更新策略执行的时间(用于回测时记录发生的时间)
self.curDateTime = tick.datetime
# 2、计算交易时间和平仓时间
self.__timeWindow(self.curDateTime)
# 推送Tick到lineM15
self.lineM15.onTick(tick)
# 首先检查是否是实盘运行还是数据预处理阶段
if not (self.inited and len(self.lineM15.inputMacdSlowPeriodLen) > 0):
return
# ----------------------------------------------------------------------
def onBar(self, bar):
"""分钟K线数据更新(仅用于回测时,从策略外部调用)"""
# 更新策略执行的时间(用于回测时记录发生的时间)
# 回测数据传送的bar.datetime,为bar的开始时间,所以,到达策略时,当前时间为bar的结束时间
self.curDateTime = bar.datetime + timedelta(seconds=self.lineM15.barTimeInterval)
# 2、计算交易时间和平仓时间
self.__timeWindow(bar.datetime)
# 推送tick到15分钟K线
self.lineM15.addBar(bar)
# self.lineM60.addBar(bar)
# 4、交易逻辑
# 首先检查是否是实盘运行还是数据预处理阶段
if not self.inited:
if len(self.lineM15.lineBar) > 120 + 5:
self.inited = True
else:
return
def onBarM15(self, bar):
"""分钟K线数据更新,实盘时,由self.lineM15的回调"""
# 调用lineM15的显示bar内容
self.writeCtaLog(self.lineM15.displayLastBar())
# 未初始化完成
if not self.inited:
if len(self.lineM15.lineBar) > 120 + 5:
self.inited = True
else:
return
# 执行撤单逻辑
self.__cancelLogic(dt=self.curDateTime)
if self.lineM15.mode == self.lineM15.TICK_MODE:
idx = 2
else:
idx = 1
# 收集前15个dif和dea的数据
difdea = []
# TODO lineMacd需要根据不同的品种进行和周期进行调整,后期可能研究一下bp网格进行参数的动态优化
jincha_15f = self.lineM15.lineDif[-1 - idx] < self.lineM15.lineDea[-1 - idx] \
and self.lineM15.lineDif[0 - idx] > self.lineM15.lineDea[0 - idx] \
and abs(self.lineM15.lineMacd[0 - idx]) >= 2
sicha_15f = self.lineM15.lineDif[-1 - idx] > self.lineM15.lineDea[-1 - idx] \
and self.lineM15.lineDif[0 - idx] < self.lineM15.lineDea[0 - idx] \
and abs(self.lineM15.lineMacd[0 - idx]) >= 2
# 用于止盈的策略变量
up_4line = self.lineM15.lineMacd[-3 - idx] > self.lineM15.lineMacd[-2 - idx] > self.lineM15.lineMacd[
-1 - idx] > self.lineM15.lineMacd[0 - idx] and self.lineM15.lineMacd[-3 - idx] >= 10
down_4line = self.lineM15.lineMacd[-3 - idx] < self.lineM15.lineMacd[-2 - idx] < self.lineM15.lineMacd[
-1 - idx] < self.lineM15.lineMacd[0 - idx] and self.lineM15.lineMacd[-3 - idx] <= -10
# 如果未持仓,检查是否符合开仓逻辑
if self.position.pos == 0:
# DIF快线上穿DEA慢线,15f上金叉,做多
# 多仓的时候记录前期顶分型的价格,并且以此价格的稍高位做为止损位
if jincha_15f:
self.percentLimit = 0.4
vol = self.getAvailablePos(bar)
if not vol:
return
for n in range(15):
difdea.append(self.lineM15.lineDif[-n - idx])
difdea.append(self.lineM15.lineDea[-n - idx])
if max(difdea) >= 25: # 高位金叉,不开多仓
return
if max(difdea) <= -30: # 低位金叉,开重仓
self.percentLimit = self.percentLimit + 0.1
vol = self.getAvailablePos(bar)
if not vol:
return
self.writeCtaLog(u'{0},开仓多单{1}手,价格:{2}'.format(bar.datetime, vol, bar.close))
orderid = self.buy(price=bar.close, volume=vol, orderTime=self.curDateTime)
if orderid:
# 更新下单价格(为了定时撤单)
self.lastOrderTime = self.curDateTime
# 更新开仓价格,开仓价格为bar的收盘价
self.policy.entryPrice = bar.close
# 多仓,多仓,设置前低为止损价,设定固定止损价格为前低
# 这里的止损是把这里作为一买点的,如果跌破前低,说明这里不是一买,止损
# 后期应该附加上缠论的顶底分型作为止损精确点
self.policy.exitOnStopPrice = self.lineM15.preLow[-1]
# TODO: 这个后面后期可能要适当加一定参数
return
else: # 在-30到30的位置
self.writeCtaLog(u'{0},开仓多单{1}手,价格:{2}'.format(bar.datetime, vol, bar.close))
orderid = self.buy(price=bar.close, volume=vol, orderTime=self.curDateTime)
if orderid:
# 更新下单价格(为了定时撤单)
self.lastOrderTime = self.curDateTime
# 更新开仓价格,开仓价格为bar的收盘价
self.policy.entryPrice = bar.close
# 多仓,多仓,设置前低为止损价,设定固定止损价格为前低
# 这里的止损是把这里作为一买点的,如果跌破前低,说明这里不是一买,止损
# 后期应该附加上缠论的顶底分型作为止损精确点
self.policy.exitOnStopPrice = self.lineM15.preLow[-1]
# 这个后面后期可能要适当加一定参数
return
# DIF快线下穿DEA慢线,15f上死叉,做空
if sicha_15f:
self.percentLimit = 0.4
vol = self.getAvailablePos(bar)
if not vol:
return
for n in range(15):
difdea.append(self.lineM15.lineDif[-n - idx])
difdea.append(self.lineM15.lineDea[-n - idx])
# TODO 高低位的参数的判断
if max(difdea) >= 30: # 高位死叉,开重仓
self.percentLimit = self.percentLimit + 0.1 # 50%
vol = self.getAvailablePos(bar)
if not vol:
return
self.writeCtaLog(u'{0},开仓多单{1}手,价格:{2}'.format(bar.datetime, vol, bar.close))
orderid = self.short(price=bar.close, volume=vol, orderTime=self.curDateTime)
if orderid:
# 更新下单价格(为了定时撤单)
self.lastOrderTime = self.curDateTime
# 更新开仓价格
self.policy.entryPrice = bar.close
# 做空,设置前高为止损价,exitOnStopPrice固定止损价格
self.policy.exitOnStopPrice = self.lineM15.preHigh[-1]
return
if max(difdea) <= -25: # 低位死叉,不开单
return
else:
self.writeCtaLog(u'{0},开仓多单{1}手,价格:{2}'.format(bar.datetime, vol, bar.close))
orderid = self.buy(price=bar.close, volume=vol, orderTime=self.curDateTime)
if orderid:
# 更新下单价格(为了定时撤单)
self.lastOrderTime = self.curDateTime
return
# 持仓,检查是否满足平仓条件
else: # 持仓
"""
这里减仓策略加入后收益降低了,回头得对着图核对一下,修改减仓策略
"""
# # 多单减仓
# if self.position.pos > 0 and self.entrust != -1 and up_4line:
# self.writeCtaLog(u'{0},平仓多单{1}手,价格:{2}'.format(bar.datetime, self.position.pos / 2, bar.close))
# orderid = self.sell(price=bar.close, volume=self.position.pos / 2, orderTime=self.curDateTime)
# if orderid:
# self.lastOrderTime = self.curDateTime
# return
#
# # 空单减仓
# if self.position.pos < 0 and self.entrust != 1 and down_4line:
# self.writeCtaLog(u'{0},平仓空单{1}手,价格:{2}'.format(bar.datetime, self.position.pos / 2, bar.close))
# vol = self.position.pos * -1
# orderid = self.cover(price=bar.close, volume=vol / 2, orderTime=self.curDateTime)
# if orderid:
# self.lastOrderTime = self.curDateTime
# return
# 死叉,多单离场
if self.lineM15.lineDif[0 - idx] < self.lineM15.lineDea[0 - idx] \
and abs(self.lineM15.lineMacd[0 - idx]) > 2 \
and self.position.pos > 0 and self.entrust != -1:
self.writeCtaLog(u'{0},平仓多单{1}手,价格:{2}'.format(bar.datetime, self.position.pos, bar.close))
orderid = self.sell(price=bar.close, volume=self.position.pos, orderTime=self.curDateTime)
if orderid:
self.lastOrderTime = self.curDateTime
return
# 金叉,空单离场
if self.lineM15.lineDif[0 - idx] > self.lineM15.lineDea[0 - idx] \
and abs(self.lineM15.lineMacd[0 - idx]) > 2 \
and self.position.pos < 0 and self.entrust != 1:
self.writeCtaLog(u'{0},平仓空单{1}手,价格:{2}'.format(bar.datetime, self.position.pos, bar.close))
vol = self.position.pos * -1
orderid = self.cover(price=bar.close, volume=vol, orderTime=self.curDateTime)
if orderid:
self.lastOrderTime = self.curDateTime
return
# 固定止损
if self.policy.exitOnStopPrice > 0:
# 多单止损
if self.pos > 0 and self.entrust != 1 and bar.close < self.policy.exitOnStopPrice:
self.writeCtaLog(u'{0},固定止损,平仓多单{1}手,价格:{2}'.format(bar.datetime, self.position.pos, bar.close))
orderid = self.sell(price=bar.close, volume=self.position.pos, orderTime=self.curDateTime)
if orderid:
# 更新下单时间(为了定时撤单)
self.lastOrderTime = self.curDateTime
return
# 空单止损
if self.pos < 0 and self.entrust != -1 and bar.close > self.policy.exitOnStopPrice:
vol = self.position.pos * -1
self.writeCtaLog(u'{0},固定止损,平仓空单{1}手,价格:{2}'.format(bar.datetime, vol, bar.close))
orderid = self.cover(price=bar.close, volume=vol, orderTime=self.curDateTime)
if orderid:
# 更新下单时间(为了定时撤单)
self.lastOrderTime = self.curDateTime
return
# ----------------------------------------------------------------------
def __cancelLogic(self, dt, force=False):
"撤单逻辑"""
if len(self.uncompletedOrders) < 1:
return
if not self.lastOrderTime:
self.writeCtaLog(u'异常,上一交易时间为None')
return
# 平仓检查时间比开开仓时间需要短一倍
if (self.position.pos >= 0 and self.entrust == 1) \
or (self.position.pos <= 0 and self.entrust == -1):
i = 1
else:
i = 1 # 原来是2,暂时取消
canceled = False
if ((dt - self.lastOrderTime).seconds > self.cancelSeconds / i) \
or force: # 超过设置的时间还未成交
for order in self.uncompletedOrders.keys():
self.writeCtaLog(u'{0}超时{1}秒未成交,取消委托单:{2}'.format(dt, (dt - self.lastOrderTime).seconds, order))
self.cancelOrder(str(order))
canceled = True
# 取消未完成的订单
self.uncompletedOrders.clear()
if canceled:
self.entrust = 0
else:
self.writeCtaLog(u'异常:没有撤单')
def __timeWindow(self, dt):
"""交易与平仓窗口"""
# 交易窗口 避开早盘和夜盘的前5分钟,防止隔夜跳空。
self.closeWindow = False
self.tradeWindow = False
self.openWindow = False
# 初始化当日的首次交易
# if (tick.datetime.hour == 9 or tick.datetime.hour == 21) and tick.datetime.minute == 0 and tick.datetime.second ==0:
# self.firstTrade = True
# 开市期,波动较大,用于判断止损止盈,或开仓
if (dt.hour == 9 or dt.hour == 21) and dt.minute < 2:
self.openWindow = True
# 日盘
if dt.hour == 9 and dt.minute >= 0:
self.tradeWindow = True
return
if dt.hour == 10:
if dt.minute <= 15 or dt.minute >= 30:
self.tradeWindow = True
return
if dt.hour == 11 and dt.minute <= 30:
self.tradeWindow = True
return
if dt.hour == 13 and dt.minute >= 30:
self.tradeWindow = True
return
if dt.hour == 14:
if dt.minute < 59:
self.tradeWindow = True
return
if dt.minute == 59: # 日盘平仓
self.closeWindow = True
return
# 夜盘
if dt.hour == 21 and dt.minute >= 0:
self.tradeWindow = True
return
# 上期 贵金属, 次日凌晨2:30
if self.shortSymbol in NIGHT_MARKET_SQ1:
if dt.hour == 22 or dt.hour == 23 or dt.hour == 0 or dt.hour == 1:
self.tradeWindow = True
return
if dt.hour == 2:
if dt.minute < 29: # 收市前29分钟
self.tradeWindow = True
return
if dt.minute == 29: # 夜盘平仓
self.closeWindow = True
return
return
# 上期 有色金属,黑色金属,沥青 次日01:00
if self.shortSymbol in NIGHT_MARKET_SQ2:
if dt.hour == 22 or dt.hour == 23:
self.tradeWindow = True
return
if dt.hour == 0:
if dt.minute < 59: # 收市前29分钟
self.tradeWindow = True
return
if dt.minute == 59: # 夜盘平仓
self.closeWindow = True
return
return
# 上期 天然橡胶 23:00
if self.shortSymbol in NIGHT_MARKET_SQ3:
if dt.hour == 22:
if dt.minute < 59: # 收市前1分钟
self.tradeWindow = True
return
if dt.minute == 59: # 夜盘平仓
self.closeWindow = True
return
# 郑商、大连 23:30
if self.shortSymbol in NIGHT_MARKET_ZZ or self.shortSymbol in NIGHT_MARKET_DL:
if dt.hour == 22:
self.tradeWindow = True
return
if dt.hour == 23:
if dt.minute < 29: # 收市前1分钟
self.tradeWindow = True
return
if dt.minute == 29 and dt.second > 30: # 夜盘平仓
self.closeWindow = True
return
return
# ----------------------------------------------------------------------
def strToTime(self, t, ms):
"""从字符串时间转化为time格式的时间"""
hh, mm, ss = t.split(':')
tt = datetime.time(int(hh), int(mm), int(ss), microsecond=ms)
return tt
# ----------------------------------------------------------------------
def saveData(self, id):
"""保存过程数据"""
# 保存K线
if not self.backtesting:
return
# ----------------------------------------------------------------------
def getAvailablePos(self, bar):
"""剩余可开仓数量"""
# 实时权益,可用资金,仓位比例,仓位比例上限
capital, avail, _, _ = self.engine.getAccountInfo()
avail = min(avail, capital * self.percentLimit)
midPrice = (bar.high - bar.low) / 2 + bar.low
pricePerLot = self.engine.moneyPerLot(midPrice, self.vtSymbol) # 每笔所需保证金
if pricePerLot:
return int(avail / pricePerLot) # 剩余可加仓数量(整数)
else:
return None
def testRbByTick():
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为Tick
engine.setBacktestingMode(engine.TICK_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20100101')
# 设置回测用的数据结束日期
engine.setEndDate('20160330')
# engine.connectMysql()
engine.setDatabase(dbName='stockcn', symbol='rb')
# 设置产品相关参数
engine.setSlippage(0.5) # 1跳(0.1)2跳0.2
engine.setRate(float(0.0001)) # 万1
engine.setSize(10) # 合约大小
settings = {}
settings['vtSymbol'] = 'RB'
settings['shortSymbol'] = 'RB'
settings['name'] = 'MACD'
settings['mode'] = 'tick'
settings['backtesting'] = True
# 在引擎中创建策略对象
engine.initStrategy(Strategy_MACD_01, setting=settings)
# 使用简单复利模式计算
engine.usageCompounding = False # True时,只针对FINAL_MODE有效
# 启用实时计算净值模式REALTIME_MODE / FINAL_MODE 回测结束时统一计算模式
engine.calculateMode = engine.REALTIME_MODE
engine.initCapital = 100000 # 设置期初资金
engine.percentLimit = 30 # 设置资金使用上限比例(%)
engine.barTimeInterval = 60 * 5 # bar的周期秒数,用于csv文件自动减时间
engine.fixCommission = 10 # 固定交易费用(每次开平仓收费)
# 开始跑回测
engine.runBacktestingWithMysql()
# 显示回测结果
engine.showBacktestingResult()
def testRbByBar():
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为Tick
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20170101')
# 设置回测用的数据结束日期
engine.setEndDate('20170605')
# engine.setDatabase(dbName='stockcn', symbol='rb')
engine.setDatabase(dbName='stockcn', symbol='RB')
# 设置产品相关参数
engine.setSlippage(0.5) # 1跳(0.1)2跳0.2
engine.setRate(float(0.0003)) # 万3
engine.setSize(10) # 合约大小
settings = {}
# settings['vtSymbol'] = 'rb'
settings['vtSymbol'] = 'RB'
settings['shortSymbol'] = 'RB'
settings['name'] = 'MACD'
settings['mode'] = 'bar'
settings['backtesting'] = True
settings['percentLimit'] = 30
# 在引擎中创建策略对象
engine.initStrategy(Strategy_MACD_01, setting=settings)
# 使用简单复利模式计算
engine.usageCompounding = False # True时,只针对FINAL_MODE有效
# 启用实时计算净值模式REALTIME_MODE / FINAL_MODE 回测结束时统一计算模式
engine.calculateMode = engine.REALTIME_MODE
engine.initCapital = 100000 # 设置期初资金
engine.percentLimit = 40 # 设置资金使用上限比例(%)
engine.barTimeInterval = 300 # bar的周期秒数,用于csv文件自动减时间
# 开始跑回测
engine.runBackTestingWithBarFile(os.getcwd() + '/cache/RB99_20100101_20170605_15m.csv')
# 显示回测结果
engine.showBacktestingResult()
# 从csv文件进行回测
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaBacktesting import *
from setup_logger import setup_logger
setup_logger(
filename=u'TestLogs/{0}_{1}.log'.format(Strategy_MACD_01.className, datetime.now().strftime('%m%d_%H%M')),
debug=False
)
# 回测螺纹
testRbByBar()
| mit |
gnieboer/tensorflow | tensorflow/python/client/notebook.py | 109 | 4791 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
eladnoor/equilibrator-api | pathways_cmd.py | 1 | 1031 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 1 13:24:15 2017
@author: noore
"""
import argparse
import logging
from equilibrator_api import Pathway
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Calculate the Max-min Driving Force (MDF) of a pathway.')
parser.add_argument(
'infile', type=argparse.FileType(),
help='path to input file containing reactions')
parser.add_argument(
'outfile', type=str,
help='path to output PDF file')
logging.getLogger().setLevel(logging.WARNING)
args = parser.parse_args()
pp = Pathway.from_sbtab(args.infile)
output_pdf = PdfPages(args.outfile)
mdf_res = pp.calc_mdf()
output_pdf.savefig(mdf_res.conc_plot)
output_pdf.savefig(mdf_res.mdf_plot)
output_pdf.close()
rxn_df = pd.DataFrame(mdf_res.report_reactions)
cpd_df = pd.DataFrame(mdf_res.report_compounds)
| mit |
aasensio/magnetographCorrection | doPlotNew.py | 1 | 5228 | import numpy as np
import matplotlib.pyplot as pl
import brewer2mpl
from scipy.integrate import simps
from scipy.stats import linregress
dat = np.load('posterior.npz')
pB, normaliz, B, sigmas, fluxes = dat['arr_0'], dat['arr_1'], dat['arr_2'], dat['arr_3'], dat['arr_4']
labels = [r'2 Mx cm$^{-2}$',r'5 Mx cm$^{-2}$',r'10 Mx cm$^{-2}$',r'15 Mx cm$^{-2}$',r'20 Mx cm$^{-2}$',r'40 Mx cm$^{-2}$',r'80 Mx cm$^{-2}$',r'100 Mx cm$^{-2}$']
labels = [r'$\Phi_\mathrm{obs}$='+i for i in labels]
nSigmas = len(sigmas)
nFluxes = len(fluxes)
MMAP = np.zeros((nSigmas,nFluxes))
B95 = np.zeros((nSigmas,nFluxes))
B68 = np.zeros((nSigmas,nFluxes))
fwhmPrior = 1500.0
sigmaPrior = fwhmPrior / (2.0 * np.sqrt(2.0*np.log(2.0)))
priorGaussian = np.exp(-0.5*B**2 / sigmaPrior**2)
B0 = 38
sigmaPrior = 1.7 / np.sqrt(2.0)
priorLogN = np.exp(-(np.log(B)-np.log(B0))**2 / (2.0*sigmaPrior**2)) / B
prior = priorLogN
pl.close('all')
f, ax = pl.subplots(1, 2, figsize=(12,4), num=1)
ax = ax.flatten()
colors = brewer2mpl.get_map('Dark2', 'qualitative', nFluxes).mpl_colors
loop = 0
for i in range(len(fluxes)):
posterior = prior * pB[2,i,:]
normaliz = simps(posterior, B)
ax[loop].plot(B, posterior / normaliz, label=labels[i], linewidth=2, color=colors[i])
MMAP[2,i] = B[posterior.argmax()]
ax[loop].set_ylabel(r'p(B|$\Phi_\mathrm{obs}$)')
ax[loop].set_xlabel('B [G]')
#ax[loop].annotate(r'$\sigma_n$='+"{0:4.1f}".format(sigmas[2])+r' Mx cm$^{-2}$', xy=(0.55, 0.86), xycoords='axes fraction', fontsize=14)
ax[loop].set_xlim((0,400))
ax[loop].legend(labelspacing=0.2, prop={'size':12}, loc='upper right')
loop += 1
for i in range(len(fluxes)):
posterior = prior * pB[2,i,:]
cum = np.cumsum(posterior)
ax[loop].plot(B, cum / np.max(cum), label=labels[i], linewidth=2, color=colors[i])
cum = cum / np.max(cum)
print "{0} at 68% - {1} at 95%".format(B[np.argmin(np.abs(cum/np.max(cum)-0.68))],B[np.argmin(np.abs(cum/np.max(cum)-0.95))])
B68[2,i] = B[np.argmin(np.abs(cum/np.max(cum)-0.68))]
B95[2,i] = B[np.argmin(np.abs(cum/np.max(cum)-0.95))]
ax[loop].set_ylabel('cdf(B|$\Phi_\mathrm{obs}$)')
ax[loop].set_xlabel('B [G]')
ax[loop].set_xlim((0,600))
pl.tight_layout()
pl.savefig("posteriorFlux.pdf")
#--------------------------
pl.clf()
f, ax = pl.subplots(1, 2, figsize=(12,4), num=1)
ax = ax.flatten()
labels = [r'0.1 Mx cm$^{-2}$',r'1 Mx cm$^{-2}$',r'5 Mx cm$^{-2}$',r'10 Mx cm$^{-2}$',r'20 Mx cm$^{-2}$']
labels = [r'$\sigma_n$='+i for i in labels]
colors = brewer2mpl.get_map('Dark2', 'qualitative', nSigmas).mpl_colors
loop = 0
for i in range(len(sigmas)):
posterior = prior * pB[i,5,:]
normaliz = simps(posterior, B)
ax[loop].plot(B, posterior / normaliz, label=labels[i], linewidth=2, color=colors[i])
MMAP[2,i] = B[posterior.argmax()]
ax[loop].set_ylabel(r'p(B|$\Phi_\mathrm{obs}$)')
ax[loop].set_xlabel('B [G]')
#ax[loop].annotate(r'$\sigma_n$='+"{0:4.1f}".format(sigmas[2])+r' Mx cm$^{-2}$', xy=(0.55, 0.86), xycoords='axes fraction', fontsize=14)
ax[loop].set_xlim((0,300))
ax[loop].legend(labelspacing=0.2, prop={'size':12}, loc='upper right')
loop += 1
for i in range(len(sigmas)):
posterior = prior * pB[i,5,:]
cum = np.cumsum(posterior)
ax[loop].plot(B, cum / np.max(cum), label=labels[i], linewidth=2, color=colors[i])
cum = cum / np.max(cum)
print "{0} at 68% - {1} at 95%".format(B[np.argmin(np.abs(cum/np.max(cum)-0.68))],B[np.argmin(np.abs(cum/np.max(cum)-0.95))])
B68[2,i] = B[np.argmin(np.abs(cum/np.max(cum)-0.68))]
B95[2,i] = B[np.argmin(np.abs(cum/np.max(cum)-0.95))]
ax[loop].set_ylabel('cdf(B|$\Phi_\mathrm{obs}$)')
ax[loop].set_xlabel('B [G]')
ax[loop].set_xlim((0,300))
pl.tight_layout()
pl.savefig("posteriorNoise.pdf")
#---------------------------
dat = np.load('posteriorAnisot.npz')
pB, normaliz, B, a, fluxes = dat['arr_0'], dat['arr_1'], dat['arr_2'], dat['arr_3'], dat['arr_4']
na = len(a)
pl.clf()
f, ax = pl.subplots(1, 2, figsize=(12,4), num=1)
ax = ax.flatten()
labels = ['-0.5', '0', '1', '2','5','10']
labels = ['a='+i for i in labels]
colors = brewer2mpl.get_map('Dark2', 'qualitative', na).mpl_colors
loop = 0
for i in range(len(a)):
posterior = prior * pB[i,5,:]
normaliz = simps(posterior, B)
ax[loop].plot(B, posterior / normaliz, label=labels[i], linewidth=2, color=colors[i])
MMAP[2,i] = B[posterior.argmax()]
ax[loop].set_ylabel(r'p(B|$\Phi_\mathrm{obs}$)')
ax[loop].set_xlabel('B [G]')
#ax[loop].annotate(r'$\sigma_n$='+"{0:4.1f}".format(sigmas[2])+r' Mx cm$^{-2}$', xy=(0.55, 0.86), xycoords='axes fraction', fontsize=14)
ax[loop].set_xlim((0,300))
ax[loop].legend(labelspacing=0.2, prop={'size':12}, loc='upper right')
loop += 1
for i in range(len(a)):
posterior = prior * pB[i,5,:]
cum = np.cumsum(posterior)
ax[loop].plot(B, cum / np.max(cum), label=labels[i], linewidth=2, color=colors[i])
cum = cum / np.max(cum)
print "{0} at 68% - {1} at 95%".format(B[np.argmin(np.abs(cum/np.max(cum)-0.68))],B[np.argmin(np.abs(cum/np.max(cum)-0.95))])
B68[2,i] = B[np.argmin(np.abs(cum/np.max(cum)-0.68))]
B95[2,i] = B[np.argmin(np.abs(cum/np.max(cum)-0.95))]
ax[loop].set_ylabel('cdf(B|$\Phi_\mathrm{obs}$)')
ax[loop].set_xlabel('B [G]')
ax[loop].set_xlim((0,300))
pl.tight_layout()
pl.savefig("posteriorAnisot.pdf") | mit |
pulinagrawal/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/texmanager.py | 69 | 16818 | """
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \*Agg backends: dvipng
* PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51
(older versions do not work properly)
Backends:
* \*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = '\\TeX\\ is Number $\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!'
Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file (http://matplotlib.sf.net/matplotlibrc)
or include these two lines in your script::
from matplotlib import rc
rc('text', usetex=True)
"""
import copy, glob, os, shutil, sys, warnings
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
DEBUG = False
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
def dvipng_hack_alpha():
stdin, stdout = os.popen4('dvipng -version')
for line in stdout:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s'% version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
raise RuntimeError('Could not obtain dvipng version')
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None: oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
configdir = mpl.get_configdir()
texcache = os.path.join(configdir, 'tex.cache')
if os.path.exists(oldcache):
print >> sys.stderr, """\
WARNING: found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s"."""%(oldcache, texcache)
shutil.move(oldcache, texcache)
if not os.path.exists(texcache):
os.mkdir(texcache)
_dvipng_hack_alpha = dvipng_hack_alpha()
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', r'\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', r'\usepackage{mathptmx}'),
'palatino': ('ppl', r'\usepackage{mathpazo}'),
'zapf chancery': ('pzc', r'\usepackage{chancery}'),
'cursive': ('pzc', r'\usepackage{chancery}'),
'charter': ('pch', r'\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', r'\usepackage{helvet}'),
'avant garde': ('pag', r'\usepackage{avant}'),
'courier': ('pcr', r'\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = ('text.latex.preamble', )\
+ tuple(['font.'+n for n in ('family', ) + font_families])
def __init__(self):
if not os.path.isdir(self.texcache):
os.mkdir(self.texcache)
ff = rcParams['font.family'].lower()
if ff in self.font_families:
self.font_family = ff
else:
mpl.verbose.report('The %s font family is not compatible with LaTeX. serif will be used by default.' % ff, 'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in \
[(ff, ff.replace('-', '_')) for ff in self.font_families]:
for font in rcParams['font.'+font_family]:
if font.lower() in self.font_info:
found_font = self.font_info[font.lower()]
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print 'family: %s, font: %s, info: %s'%(font_family,
font, self.font_info[font.lower()])
break
else:
if DEBUG: print '$s font is not compatible with usetex'
else:
mpl.verbose.report('No LaTeX-compatible font found for the %s font family in rcParams. Using default.' % ff, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive': cmd.append(self.cursive[1])
while r'\usepackage{type1cm}' in cmd:
cmd.remove(r'\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join([r'\usepackage{type1cm}', cmd,
r'\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f'%fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = unicode(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict([(k,None) for k in self._rc_cache_keys])
changed = [par for par in self._rc_cache_keys if rcParams[par] != \
self._rc_cache[par]]
if changed:
if DEBUG: print 'DEBUG following keys changed:', changed
for k in changed:
if DEBUG:
print 'DEBUG %-20s: %-10s -> %-10s' % \
(k, self._rc_cache[k], rcParams[k])
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG: print 'DEBUG RE-INIT\nold fontconfig:', self._fontconfig
self.__init__()
if DEBUG: print 'DEBUG fontconfig:', self._fontconfig
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def _get_shell_cmd(self, *args):
"""
On windows, changing directories can be complicated by the presence of
multiple drives. get_shell_cmd deals with this issue.
"""
if sys.platform == 'win32':
command = ['%s'% os.path.splitdrive(self.texcache)[0]]
else:
command = []
command.extend(args)
return ' && '.join(command)
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex'%basefile
fh = file(texfile, 'w')
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[papersize={72in,72in}, body={70in,70in}, margin={1in,1in}]{geometry}
\pagestyle{empty}
\begin{document}
\fontsize{%f}{%f}%s
\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize*1.25, tex)
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s)
except UnicodeEncodeError, err:
mpl.verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
fh.close()
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi'% basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'latex -interaction=nonstopmode %s > "%s"'\
%(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No latex error report available.'
if exit_status:
raise RuntimeError(('LaTeX was not able to process the following \
string:\n%s\nHere is the full report generated by LaTeX: \n\n'% repr(tex)) + report)
else: mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile+'*'):
if fname.endswith('dvi'): pass
elif fname.endswith('tex'): pass
else:
try: os.remove(fname)
except OSError: pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png'% basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"' % self.texcache,
'dvipng -bg Transparent -D %s -T tight -o \
"%s" "%s" > "%s"'%(dpi, os.path.split(pngfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No dvipng error report available.'
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + report)
else: mpl.verbose.report(report, 'debug')
try: os.remove(outfile)
except OSError: pass
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf'% basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'dvips -q -E -o "%s" "%s" > "%s"'\
%(os.path.split(psfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + fh.read())
else: mpl.verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
ps = file(psfile)
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s'%psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
if rcParams['text.dvipnghack'] is not None:
hack = rcParams['text.dvipnghack']
else:
hack = self._dvipng_hack_alpha
if hack:
# hack the alpha channel
# dvipng assumed a constant background, whereas we want to
# overlay these rasters with antialiasing over arbitrary
# backgrounds that may have other figure elements under them.
# When you set dvipng -bg Transparent, it actually makes the
# alpha channel 1 and does the background compositing and
# antialiasing itself and puts the blended data in the rgb
# channels. So what we do is extract the alpha information
# from the red channel, which is a blend of the default dvipng
# background (white) and foreground (black). So the amount of
# red (or green or blue for that matter since white and black
# blend to a grayscale) is the alpha intensity. Once we
# extract the correct alpha information, we assign it to the
# alpha channel properly and let the users pick their rgb. In
# this way, we can overlay tex strings on arbitrary
# backgrounds with antialiasing
#
# red = alpha*red_foreground + (1-alpha)*red_background
#
# Since the foreground is black (0) and the background is
# white (1) this reduces to red = 1-alpha or alpha = 1-red
#alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here?
alpha = 1-X[:,:,0]
else:
alpha = X[:,:,-1]
self.grey_arrayd[key] = alpha
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0,0,0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize: fontsize = rcParams['font.size']
if not dpi: dpi = rcParams['savefig.dpi']
r,g,b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float)
Z[:,:,0] = r
Z[:,:,1] = g
Z[:,:,2] = b
Z[:,:,3] = alpha
self.rgba_arrayd[key] = Z
return Z
| agpl-3.0 |
strawlab/drosophila_eye_map | drosophila_eye_map/inspect_weightmap.py | 1 | 8586 | # -*- coding: utf-8 -*-
# Copyright (c) 2005-2008, California Institute of Technology
# Copyright (c) 2017, Albert-Ludwigs-Universität Freiburg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Andrew D. Straw
"""Graphical program to inspect weightmap
This GUI program is used to inspect the precomputed_buchner_1971.py
file to make sure its results are what is expected.
WARNING: all dependencies are in this program -- will not load
anything other than precomputed_buchner_1971.py from current
directory or an installed drosophila_eye_map package. (The reason
is that because this program might be used outside the normal
environment of a drosophila_eye_map package directory, it carries
all its dependencies with it.) This could be a problem if, for
example, the cube_order ever changes.
"""
from __future__ import division, print_function
import numpy as np
import math
import precomputed_buchner71 as precomputed_buchner_1971
from mpl_toolkits.basemap import Basemap # basemap > 0.9.9.1
import matplotlib
rcParams = matplotlib.rcParams
rcParams['font.size'] = 10
rcParams['font.family'] = 'serif'
#rcParams['font.serif'] = 'Times'
#rcParams['font.sans-serif'] = 'Arial'
if 0:
from matplotlib import verbose
verbose.level = 'debug-annoying'
import matplotlib.pyplot as plt
cube_order = ['posx', 'negx', 'posy', 'negy', 'posz', 'negz']
def xyz2lonlat(x,y,z):
R2D = 180.0/math.pi
try:
lat = math.asin(z)*R2D
except ValueError as err:
if z>1 and z < 1.1:
lat = math.asin(1.0)*R2D
else:
raise
lon1 = math.atan2(y,x)*R2D
return lon1,lat
def unflatten_cubemap( rank1 ):
rank1 = np.asarray(rank1)
assert rank1.ndim==1
total_n_pixels = rank1.shape[0]
n_pixels_per_face = total_n_pixels//6
n_pixels_per_side = int(np.sqrt(n_pixels_per_face))
assert 6*n_pixels_per_side**2==total_n_pixels
cubemap = {}
for count,dir in enumerate(cube_order):
start_idx = count*n_pixels_per_face
this_face_pixels = rank1[ start_idx:start_idx+n_pixels_per_face ]
this_face_pixels = np.reshape(this_face_pixels,(n_pixels_per_side,n_pixels_per_side))
cubemap[dir]=this_face_pixels
return cubemap
def do_projection( proj, lon_lats):
xys = np.array([proj( lon, lat ) for (lon,lat) in lon_lats ])
x = xys[:,0]
y = xys[:,1]
return x,y
class App:
def on_pick(self,event):
self.show_index(event.ind[0]) # only show first point
def show_index(self,ind):
vec = self.left_weights[ind,:]
cubemap = unflatten_cubemap( vec )
for dir in cube_order:
self.cubeax[dir].imshow( cubemap[dir],
aspect='auto',
vmin=0.0,
vmax=0.1,
origin='lower',
)
ax = self.picker_axes
if self.highlight is None:
self.highlight, = ax.plot([self.x[ind]],[self.y[ind]],'ro',
ms=4.0,
markeredgecolor='k',
picker=0, # don't pick this red dot
)
else:
self.highlight.set_xdata( [self.x[ind]] )
self.highlight.set_ydata( [self.y[ind]] )
plt.draw()
def __init__(self):
self.highlight = None
# modified from make_buchner_interommatidial_distance_figure
rdirs = precomputed_buchner_1971.receptor_dirs
rdir_slicer = precomputed_buchner_1971.receptor_dir_slicer
triangles = precomputed_buchner_1971.triangles
weights = precomputed_buchner_1971.receptor_weight_matrix_64
weights = np.asarray(weights.todense())
self.left_rdirs = rdirs[rdir_slicer['left']]
self.left_weights = weights[rdir_slicer['left']]
lon_lats = [xyz2lonlat(*rdir) for rdir in self.left_rdirs]
stere = Basemap(projection='stere',
resolution=None,
lat_ts = 0.0,
lat_0 = 0,
lon_0 = 90,
llcrnrlon = -45,
urcrnrlon = -135,
llcrnrlat= -30,
urcrnrlat = 30,
)
self.x,self.y = do_projection(stere,lon_lats)
# pcolor figure -- stereographic projection
self.fig = plt.figure(figsize=(8,12))
ax = plt.subplot(2,1,1)
ax.set_title('click on an ommatidium to show weightmap')
self.picker_axes = ax
good = self.x < 1e29 # basemap seems to set bad values to 1e30
ax.plot(self.x[good],self.y[good],'wo',
ms=4.0,
markeredgecolor='k',
picker=5, # 5 points tolerance
)
ax.text( 0.5,0.99, 'dorsal',
horizontalalignment='center',
verticalalignment='top',
transform=ax.transAxes)
ax.text( 0.01,0.5, 'anterior',
horizontalalignment='left',
transform=ax.transAxes)
ax.text( 0.99,0.5, 'posterior',
horizontalalignment='right',
transform=ax.transAxes)
# draw parallels and meridians.
delat = 20.
circles = np.arange(0.,90.,delat).tolist()+\
np.arange(-delat,-90,-delat).tolist()
#biw.stere.drawparallels(circles,ax=ax)
stere.drawparallels(circles,ax=ax)
delon = 45.
meridians = np.arange(-180,180,delon)
stere.drawmeridians(meridians,ax=ax)
# create axes for cube map
xinc = 0.2
yinc = 0.13
xbase = 0.05
ybase = 0.05
self.cubeax = {}
frameon = True
# +y
rect = xbase, (ybase+yinc), xinc, yinc
self.cubeax['posy'] = self.fig.add_axes(rect,frameon=frameon)
# +x
rect = (xbase+xinc), (ybase+yinc), xinc, yinc
self.cubeax['posx'] = self.fig.add_axes(rect,frameon=frameon)
# -y
rect = (xbase+2*xinc), (ybase+yinc), xinc, yinc
self.cubeax['negy'] = self.fig.add_axes(rect,frameon=frameon)
# -x
rect = (xbase+3*xinc), (ybase+yinc), xinc, yinc
self.cubeax['negx'] = self.fig.add_axes(rect,frameon=frameon)
# +z
rect = (xbase+xinc), (ybase+2*yinc), xinc, yinc
self.cubeax['posz'] = self.fig.add_axes(rect,frameon=frameon)
# -z
rect = (xbase+xinc), ybase, xinc, yinc
self.cubeax['negz'] = self.fig.add_axes(rect,frameon=frameon)
for dir in cube_order:
## if dir not in self.cubeax:
## continue
self.cubeax[dir].set_xticks([])
self.cubeax[dir].set_yticks([])
title = dir
title = title.replace('pos','+')
title = title.replace('neg','-')
self.cubeax[dir].text(0,0,title,color='white')
self.show_index(0)
def mainloop(self):
self.fig.canvas.mpl_connect('pick_event', self.on_pick)
plt.show()
def main():
app = App()
app.mainloop()
if __name__=='__main__':
main()
| bsd-2-clause |
pacoqueen/ginn | extra/install/ipython2/ipython-5.10.0/IPython/core/shellapp.py | 1 | 16462 | # encoding: utf-8
"""
A mixin for :class:`~IPython.core.application.Application` classes that
launch InteractiveShell instances, load extensions, etc.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
from __future__ import print_function
import glob
from itertools import chain
import os
import sys
from traitlets.config.application import boolean_flag
from traitlets.config.configurable import Configurable
from traitlets.config.loader import Config
from IPython.core.application import SYSTEM_CONFIG_DIRS, ENV_CONFIG_DIRS
from IPython.core import pylabtools
from IPython.utils import py3compat
from IPython.utils.contexts import preserve_keys
from IPython.utils.path import filefind
from traitlets import (
Unicode, Instance, List, Bool, CaselessStrEnum, observe,
)
from IPython.terminal import pt_inputhooks
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
gui_keys = tuple(sorted(pt_inputhooks.backends) + sorted(pt_inputhooks.aliases))
backend_keys = sorted(pylabtools.backends.keys())
backend_keys.insert(0, 'auto')
shell_flags = {}
addflag = lambda *args: shell_flags.update(boolean_flag(*args))
addflag('autoindent', 'InteractiveShell.autoindent',
'Turn on autoindenting.', 'Turn off autoindenting.'
)
addflag('automagic', 'InteractiveShell.automagic',
"""Turn on the auto calling of magic commands. Type %%magic at the
IPython prompt for more information.""",
'Turn off the auto calling of magic commands.'
)
addflag('pdb', 'InteractiveShell.pdb',
"Enable auto calling the pdb debugger after every exception.",
"Disable auto calling the pdb debugger after every exception."
)
addflag('pprint', 'PlainTextFormatter.pprint',
"Enable auto pretty printing of results.",
"Disable auto pretty printing of results."
)
addflag('color-info', 'InteractiveShell.color_info',
"""IPython can display information about objects via a set of functions,
and optionally can use colors for this, syntax highlighting
source code and various other elements. This is on by default, but can cause
problems with some pagers. If you see such problems, you can disable the
colours.""",
"Disable using colors for info related things."
)
nosep_config = Config()
nosep_config.InteractiveShell.separate_in = ''
nosep_config.InteractiveShell.separate_out = ''
nosep_config.InteractiveShell.separate_out2 = ''
shell_flags['nosep']=(nosep_config, "Eliminate all spacing between prompts.")
shell_flags['pylab'] = (
{'InteractiveShellApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""
)
shell_flags['matplotlib'] = (
{'InteractiveShellApp' : {'matplotlib' : 'auto'}},
"""Configure matplotlib for interactive use with
the default matplotlib backend."""
)
# it's possible we don't want short aliases for *all* of these:
shell_aliases = dict(
autocall='InteractiveShell.autocall',
colors='InteractiveShell.colors',
logfile='InteractiveShell.logfile',
logappend='InteractiveShell.logappend',
c='InteractiveShellApp.code_to_run',
m='InteractiveShellApp.module_to_run',
ext='InteractiveShellApp.extra_extension',
gui='InteractiveShellApp.gui',
pylab='InteractiveShellApp.pylab',
matplotlib='InteractiveShellApp.matplotlib',
)
shell_aliases['cache-size'] = 'InteractiveShell.cache_size'
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
class InteractiveShellApp(Configurable):
"""A Mixin for applications that start InteractiveShell instances.
Provides configurables for loading extensions and executing files
as part of configuring a Shell environment.
The following methods should be called by the :meth:`initialize` method
of the subclass:
- :meth:`init_path`
- :meth:`init_shell` (to be implemented by the subclass)
- :meth:`init_gui_pylab`
- :meth:`init_extensions`
- :meth:`init_code`
"""
extensions = List(Unicode(),
help="A list of dotted module names of IPython extensions to load."
).tag(config=True)
extra_extension = Unicode('',
help="dotted module name of an IPython extension to load."
).tag(config=True)
reraise_ipython_extension_failures = Bool(False,
help="Reraise exceptions encountered loading IPython extensions?",
).tag(config=True)
# Extensions that are always loaded (not configurable)
default_extensions = List(Unicode(), [u'storemagic']).tag(config=False)
hide_initial_ns = Bool(True,
help="""Should variables loaded at startup (by startup files, exec_lines, etc.)
be hidden from tools like %who?"""
).tag(config=True)
exec_files = List(Unicode(),
help="""List of files to run at IPython startup."""
).tag(config=True)
exec_PYTHONSTARTUP = Bool(True,
help="""Run the file referenced by the PYTHONSTARTUP environment
variable at IPython startup."""
).tag(config=True)
file_to_run = Unicode('',
help="""A file to be run""").tag(config=True)
exec_lines = List(Unicode(),
help="""lines of code to run at IPython startup."""
).tag(config=True)
code_to_run = Unicode('',
help="Execute the given command string."
).tag(config=True)
module_to_run = Unicode('',
help="Run the module as a script."
).tag(config=True)
gui = CaselessStrEnum(gui_keys, allow_none=True,
help="Enable GUI event loop integration with any of {0}.".format(gui_keys)
).tag(config=True)
matplotlib = CaselessStrEnum(backend_keys, allow_none=True,
help="""Configure matplotlib for interactive use with
the default matplotlib backend."""
).tag(config=True)
pylab = CaselessStrEnum(backend_keys, allow_none=True,
help="""Pre-load matplotlib and numpy for interactive use,
selecting a particular matplotlib backend and loop integration.
"""
).tag(config=True)
pylab_import_all = Bool(True,
help="""If true, IPython will populate the user namespace with numpy, pylab, etc.
and an ``import *`` is done from numpy and pylab, when using pylab mode.
When False, pylab mode should not import any names into the user namespace.
"""
).tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
# whether interact-loop should start
interact = Bool(True)
user_ns = Instance(dict, args=None, allow_none=True)
@observe('user_ns')
def _user_ns_changed(self, change):
if self.shell is not None:
self.shell.user_ns = change['new']
self.shell.init_user_ns()
def init_path(self):
"""Add current working directory, '', to sys.path"""
if sys.path[0] != '':
sys.path.insert(0, '')
def init_shell(self):
raise NotImplementedError("Override in subclasses")
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
enable = False
shell = self.shell
if self.pylab:
enable = lambda key: shell.enable_pylab(key, import_all=self.pylab_import_all)
key = self.pylab
elif self.matplotlib:
enable = shell.enable_matplotlib
key = self.matplotlib
elif self.gui:
enable = shell.enable_gui
key = self.gui
if not enable:
return
try:
r = enable(key)
except ImportError:
self.log.warning("Eventloop or matplotlib integration failed. Is matplotlib installed?")
self.shell.showtraceback()
return
except Exception:
self.log.warning("GUI event loop or pylab initialization failed")
self.shell.showtraceback()
return
if isinstance(r, tuple):
gui, backend = r[:2]
self.log.info("Enabling GUI event loop integration, "
"eventloop=%s, matplotlib=%s", gui, backend)
if key == "auto":
print("Using matplotlib backend: %s" % backend)
else:
gui = r
self.log.info("Enabling GUI event loop integration, "
"eventloop=%s", gui)
def init_extensions(self):
"""Load all IPython extensions in IPythonApp.extensions.
This uses the :meth:`ExtensionManager.load_extensions` to load all
the extensions listed in ``self.extensions``.
"""
try:
self.log.debug("Loading IPython extensions...")
extensions = self.default_extensions + self.extensions
if self.extra_extension:
extensions.append(self.extra_extension)
for ext in extensions:
try:
self.log.info("Loading IPython extension: %s" % ext)
self.shell.extension_manager.load_extension(ext)
except:
if self.reraise_ipython_extension_failures:
raise
msg = ("Error in loading extension: {ext}\n"
"Check your config files in {location}".format(
ext=ext,
location=self.profile_dir.location
))
self.log.warning(msg, exc_info=True)
except:
if self.reraise_ipython_extension_failures:
raise
self.log.warning("Unknown error in loading extensions:", exc_info=True)
def init_code(self):
"""run the pre-flight code, specified via exec_lines"""
self._run_startup_files()
self._run_exec_lines()
self._run_exec_files()
# Hide variables defined here from %who etc.
if self.hide_initial_ns:
self.shell.user_ns_hidden.update(self.shell.user_ns)
# command-line execution (ipython -i script.py, ipython -m module)
# should *not* be excluded from %whos
self._run_cmd_line_code()
self._run_module()
# flush output, so itwon't be attached to the first cell
sys.stdout.flush()
sys.stderr.flush()
def _run_exec_lines(self):
"""Run lines of code in IPythonApp.exec_lines in the user's namespace."""
if not self.exec_lines:
return
try:
self.log.debug("Running code from IPythonApp.exec_lines...")
for line in self.exec_lines:
try:
self.log.info("Running code in user namespace: %s" %
line)
self.shell.run_cell(line, store_history=False)
except:
self.log.warning("Error in executing line in user "
"namespace: %s" % line)
self.shell.showtraceback()
except:
self.log.warning("Unknown error in handling IPythonApp.exec_lines:")
self.shell.showtraceback()
def _exec_file(self, fname, shell_futures=False):
try:
full_filename = filefind(fname, [u'.', self.ipython_dir])
except IOError:
self.log.warning("File not found: %r"%fname)
return
# Make sure that the running script gets a proper sys.argv as if it
# were run from a system shell.
save_argv = sys.argv
sys.argv = [full_filename] + self.extra_args[1:]
# protect sys.argv from potential unicode strings on Python 2:
if not py3compat.PY3:
sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
try:
if os.path.isfile(full_filename):
self.log.info("Running file in user namespace: %s" %
full_filename)
# Ensure that __file__ is always defined to match Python
# behavior.
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns['__file__'] = fname
if full_filename.endswith('.ipy'):
self.shell.safe_execfile_ipy(full_filename,
shell_futures=shell_futures)
else:
# default to python, even without extension
self.shell.safe_execfile(full_filename,
self.shell.user_ns,
shell_futures=shell_futures,
raise_exceptions=True)
finally:
sys.argv = save_argv
def _run_startup_files(self):
"""Run files from profile startup directory"""
startup_dirs = [self.profile_dir.startup_dir] + [
os.path.join(p, 'startup') for p in chain(ENV_CONFIG_DIRS, SYSTEM_CONFIG_DIRS)
]
startup_files = []
if self.exec_PYTHONSTARTUP and os.environ.get('PYTHONSTARTUP', False) and \
not (self.file_to_run or self.code_to_run or self.module_to_run):
python_startup = os.environ['PYTHONSTARTUP']
self.log.debug("Running PYTHONSTARTUP file %s...", python_startup)
try:
self._exec_file(python_startup)
except:
self.log.warning("Unknown error in handling PYTHONSTARTUP file %s:", python_startup)
self.shell.showtraceback()
for startup_dir in startup_dirs[::-1]:
startup_files += glob.glob(os.path.join(startup_dir, '*.py'))
startup_files += glob.glob(os.path.join(startup_dir, '*.ipy'))
if not startup_files:
return
self.log.debug("Running startup files from %s...", startup_dir)
try:
for fname in sorted(startup_files):
self._exec_file(fname)
except:
self.log.warning("Unknown error in handling startup files:")
self.shell.showtraceback()
def _run_exec_files(self):
"""Run files from IPythonApp.exec_files"""
if not self.exec_files:
return
self.log.debug("Running files in IPythonApp.exec_files...")
try:
for fname in self.exec_files:
self._exec_file(fname)
except:
self.log.warning("Unknown error in handling IPythonApp.exec_files:")
self.shell.showtraceback()
def _run_cmd_line_code(self):
"""Run code or file specified at the command-line"""
if self.code_to_run:
line = self.code_to_run
try:
self.log.info("Running code given at command line (c=): %s" %
line)
self.shell.run_cell(line, store_history=False)
except:
self.log.warning("Error in executing line in user namespace: %s" %
line)
self.shell.showtraceback()
if not self.interact:
self.exit(1)
# Like Python itself, ignore the second if the first of these is present
elif self.file_to_run:
fname = self.file_to_run
if os.path.isdir(fname):
fname = os.path.join(fname, "__main__.py")
try:
self._exec_file(fname, shell_futures=True)
except:
self.shell.showtraceback(tb_offset=4)
if not self.interact:
self.exit(1)
def _run_module(self):
"""Run module specified at the command-line."""
if self.module_to_run:
# Make sure that the module gets a proper sys.argv as if it were
# run using `python -m`.
save_argv = sys.argv
sys.argv = [sys.executable] + self.extra_args
try:
self.shell.safe_run_module(self.module_to_run,
self.shell.user_ns)
finally:
sys.argv = save_argv
| gpl-2.0 |
zzcclp/spark | python/pyspark/pandas/missing/window.py | 16 | 5201 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.pandas.missing import unsupported_function, unsupported_property
def _unsupported_function_expanding(method_name, deprecated=False, reason=""):
return unsupported_function(
class_name="pandas.core.window.Expanding",
method_name=method_name,
deprecated=deprecated,
reason=reason,
)
def _unsupported_property_expanding(property_name, deprecated=False, reason=""):
return unsupported_property(
class_name="pandas.core.window.Expanding",
property_name=property_name,
deprecated=deprecated,
reason=reason,
)
def _unsupported_function_rolling(method_name, deprecated=False, reason=""):
return unsupported_function(
class_name="pandas.core.window.Rolling",
method_name=method_name,
deprecated=deprecated,
reason=reason,
)
def _unsupported_property_rolling(property_name, deprecated=False, reason=""):
return unsupported_property(
class_name="pandas.core.window.Rolling",
property_name=property_name,
deprecated=deprecated,
reason=reason,
)
class MissingPandasLikeExpanding(object):
agg = _unsupported_function_expanding("agg")
aggregate = _unsupported_function_expanding("aggregate")
apply = _unsupported_function_expanding("apply")
corr = _unsupported_function_expanding("corr")
cov = _unsupported_function_expanding("cov")
kurt = _unsupported_function_expanding("kurt")
median = _unsupported_function_expanding("median")
quantile = _unsupported_function_expanding("quantile")
skew = _unsupported_function_expanding("skew")
validate = _unsupported_function_expanding("validate")
exclusions = _unsupported_property_expanding("exclusions")
is_datetimelike = _unsupported_property_expanding("is_datetimelike")
is_freq_type = _unsupported_property_expanding("is_freq_type")
ndim = _unsupported_property_expanding("ndim")
class MissingPandasLikeRolling(object):
agg = _unsupported_function_rolling("agg")
aggregate = _unsupported_function_rolling("aggregate")
apply = _unsupported_function_rolling("apply")
corr = _unsupported_function_rolling("corr")
cov = _unsupported_function_rolling("cov")
kurt = _unsupported_function_rolling("kurt")
median = _unsupported_function_rolling("median")
quantile = _unsupported_function_rolling("quantile")
skew = _unsupported_function_rolling("skew")
validate = _unsupported_function_rolling("validate")
exclusions = _unsupported_property_rolling("exclusions")
is_datetimelike = _unsupported_property_rolling("is_datetimelike")
is_freq_type = _unsupported_property_rolling("is_freq_type")
ndim = _unsupported_property_rolling("ndim")
class MissingPandasLikeExpandingGroupby(object):
agg = _unsupported_function_expanding("agg")
aggregate = _unsupported_function_expanding("aggregate")
apply = _unsupported_function_expanding("apply")
corr = _unsupported_function_expanding("corr")
cov = _unsupported_function_expanding("cov")
kurt = _unsupported_function_expanding("kurt")
median = _unsupported_function_expanding("median")
quantile = _unsupported_function_expanding("quantile")
skew = _unsupported_function_expanding("skew")
validate = _unsupported_function_expanding("validate")
exclusions = _unsupported_property_expanding("exclusions")
is_datetimelike = _unsupported_property_expanding("is_datetimelike")
is_freq_type = _unsupported_property_expanding("is_freq_type")
ndim = _unsupported_property_expanding("ndim")
class MissingPandasLikeRollingGroupby(object):
agg = _unsupported_function_rolling("agg")
aggregate = _unsupported_function_rolling("aggregate")
apply = _unsupported_function_rolling("apply")
corr = _unsupported_function_rolling("corr")
cov = _unsupported_function_rolling("cov")
kurt = _unsupported_function_rolling("kurt")
median = _unsupported_function_rolling("median")
quantile = _unsupported_function_rolling("quantile")
skew = _unsupported_function_rolling("skew")
validate = _unsupported_function_rolling("validate")
exclusions = _unsupported_property_rolling("exclusions")
is_datetimelike = _unsupported_property_rolling("is_datetimelike")
is_freq_type = _unsupported_property_rolling("is_freq_type")
ndim = _unsupported_property_rolling("ndim")
| apache-2.0 |
ningchi/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
apache-spark-on-k8s/spark | python/pyspark/sql/dataframe.py | 3 | 70232 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
if sys.version >= '3':
basestring = unicode = str
long = int
from functools import reduce
else:
from itertools import imap as map
import warnings
from pyspark import copy_func, since
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.serializers import BatchedSerializer, PickleSerializer, UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import *
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(object):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SQLContext`::
people = sqlContext.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the data frame, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SQLContext
people = sqlContext.read.parquet("...")
department = sqlContext.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
@ignore_unicode_prefix
@since(1.3)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
>>> df.toJSON().first()
u'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
@since(1.3)
def registerTempTable(self, name):
"""Registers this RDD as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SQLContext`
that was used to create this :class:`DataFrame`.
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
.. note:: Deprecated in 2.0, use createOrReplaceTempView instead.
"""
self._jdf.createOrReplaceTempView(name)
@since(2.0)
def createTempView(self, name):
"""Creates a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
@since(2.0)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
@since(2.1)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this DataFrame.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
@since(2.2)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
@since(1.4)
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
:return: :class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
@since(2.0)
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. note:: Evolving.
:return: :class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
@since(1.3)
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
@since(1.3)
def printSchema(self):
"""Prints out the schema in the tree format.
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
@since(1.3)
def explain(self, extended=False):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
>>> df.explain()
== Physical Plan ==
Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
"""
if extended:
print(self._jdf.queryExecution().toString())
else:
print(self._jdf.queryExecution().simpleString())
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
@since(2.0)
def isStreaming(self):
"""Returns true if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. note:: Evolving
"""
return self._jdf.isStreaming()
@since(1.3)
def show(self, n=20, truncate=True):
"""Prints the first ``n`` rows to the console.
:param n: Number of rows to show.
:param truncate: If set to True, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
"""
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20))
else:
print(self._jdf.showString(n, int(truncate)))
def __repr__(self):
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
@since(2.1)
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this DataFrame, which is especially useful in iterative algorithms where the
plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with L{SparkContext.setCheckpointDir()}.
:param eager: Whether to checkpoint this DataFrame immediately
.. note:: Experimental
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.1)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
:param eventTime: the name of the column that contains the event time of the row.
:param delayThreshold: the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
.. note:: Evolving
>>> sdf.select('name', sdf.time.cast('timestamp')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
@since(2.2)
def hint(self, name, *parameters):
"""Specifies some hint on the current DataFrame.
:param name: A name of the hint.
:param parameters: Optional parameters.
:return: :class:`DataFrame`
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
for p in parameters:
if not isinstance(p, str):
raise TypeError(
"all parameters should be str, got {0} of type {1}".format(p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
>>> df.count()
2
"""
return int(self._jdf.count())
@ignore_unicode_prefix
@since(1.3)
def collect(self):
"""Returns all the records as a list of :class:`Row`.
>>> df.collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
port = self._jdf.collectToPython()
return list(_load_from_socket(port, BatchedSerializer(PickleSerializer())))
@ignore_unicode_prefix
@since(2.0)
def toLocalIterator(self):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this DataFrame.
>>> list(df.toLocalIterator())
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
port = self._jdf.toPythonIterator()
return _load_from_socket(port, BatchedSerializer(PickleSerializer()))
@ignore_unicode_prefix
@since(1.3)
def limit(self, num):
"""Limits the result count to the number specified.
>>> df.limit(1).collect()
[Row(age=2, name=u'Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
>>> df.take(2)
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
return self.limit(num).collect()
@since(1.3)
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
@since(1.3)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
@since(1.3)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (C{MEMORY_AND_DISK}).
.. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
@since(1.3)
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_AND_DISK}).
.. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
@since(2.1)
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
@since(1.3)
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. note:: `blocking` default has changed to False to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
@since(1.4)
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
@since(1.3)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
``numPartitions`` can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
@since(1.3)
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
@since(1.3)
def sample(self, withReplacement, fraction, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> df.sample(False, 0.5, 42).count()
2
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd = self._jdf.sample(withReplacement, fraction, long(seed))
return DataFrame(rdd, self.sql_ctx)
@since(1.5)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
:param col: column that defines strata
:param fractions:
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
:param seed: random seed
:return: a new DataFrame that represents the stratified sample
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 5|
| 1| 9|
+---+-----+
"""
if not isinstance(col, str):
raise ValueError("col must be a string, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, long, basestring)):
raise ValueError("key must be float, int, long, or string, but got %r" % type(k))
fractions[k] = float(v)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
@since(1.4)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
:param weights: list of doubles as weights with which to split the DataFrame. Weights will
be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
1
>>> splits[1].count()
3
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
@since(1.3)
def dtypes(self):
"""Returns all column names and their data types as a list.
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
@since(1.3)
def columns(self):
"""Returns all column names as a list.
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
@ignore_unicode_prefix
@since(1.3)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
"""
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
@ignore_unicode_prefix
@since(2.1)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
:param other: Right side of the join
:param on: a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
:param how: str, default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``full_outer``, ``left``, ``left_outer``, ``right``, ``right_outer``,
``left_semi``, and ``left_anti``.
The following performs a full outer join between ``df1`` and ``df2``.
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect()
[Row(name=None, height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name=u'Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name=u'Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], basestring):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
assert isinstance(how, basestring), "how should be basestring"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
@since(1.6)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
@since("1.3.1")
def describe(self, *cols):
"""Computes statistics for numeric and string columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
@ignore_unicode_prefix
@since(1.3)
def first(self):
"""Returns the first row as a :class:`Row`.
>>> df.first()
Row(age=2, name=u'Alice')
"""
return self.head()
@ignore_unicode_prefix
@since(1.3)
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name=u'Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name=u'Bob')]
"""
if isinstance(item, basestring):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
@since(1.3)
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current DataFrame.
>>> df.select('*').collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.select('name', 'age').collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
:param condition: a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
>>> df.filter(df.age > 3).collect()
[Row(age=5, name=u'Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name=u'Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name=u'Alice')]
"""
if isinstance(condition, basestring):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.4)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.4)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.3)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy.agg()``).
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by a distinct.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by a distinct.
Also as standard in SQL, this function resolves columns by position (not by name).
.. note:: Deprecated in 2.0, use union instead.
"""
return self.union(other)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this frame and another frame.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this frame
but not in another frame.
This is equivalent to `EXCEPT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
@since(1.4)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
| 10| 80|Alice|
+---+------+-----+
>>> df.dropDuplicates(['name', 'height']).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
+---+------+-----+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
@since("1.3.1")
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
@since("1.3.1")
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
:param value: int, long, float, string, or dict.
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, long, float, boolean, or string.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, long, basestring, dict)):
raise ValueError("value should be a float, int, long, string, or dict")
if isinstance(value, (int, long)):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
@since(1.4)
def replace(self, to_replace, value=None, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value should contain either all numerics, all booleans,
or all strings. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
:param to_replace: bool, int, long, float, string, list or dict.
Value to be replaced.
If the value is a dict, then `value` is ignored and `to_replace` must be a
mapping between a value and a replacement.
:param value: int, long, float, string, or list.
The replacement value must be an int, long, float, or string. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(basestring)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(basestring)
all_of_numeric = all_of((float, int, long))
# Validate input types
valid_types = (bool, float, int, long, basestring, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise ValueError(
"to_replace should be a float, int, long, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and not isinstance(to_replace, dict):
raise ValueError("If to_replace is not a dict, value should be "
"a float, int, long, string, list, or tuple. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, basestring))):
raise ValueError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, long, basestring)):
to_replace = [to_replace]
if isinstance(value, (float, int, long, basestring)):
value = [value for _ in range(len(to_replace))]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, basestring):
subset = [subset]
# Verify we were not passed in mixed type generics."
if not any(all_of_type(rep_dict.keys()) and all_of_type(rep_dict.values())
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
@since(2.0)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
DataFrame.
The result of this algorithm has the following deterministic bound:
If the DataFrame has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the DataFrame so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[http://dx.doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
:param col: str, list.
Can be a single column name, or a list of names for multiple columns.
:param probabilities: a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
:param relativeError: The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
:return: the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
.. versionchanged:: 2.2
Added support for multiple columns.
"""
if not isinstance(col, (str, list, tuple)):
raise ValueError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, str)
if isinstance(col, tuple):
col = list(col)
elif isinstance(col, str):
col = [col]
for c in col:
if not isinstance(c, str):
raise ValueError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int, long)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int, long) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int, long)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int, long) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
@since(1.4)
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a DataFrame as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
:param col1: The name of the first column
:param col2: The name of the second column
:param method: The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
@since(1.4)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
:param col1: The name of the first column
:param col2: The name of the second column
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
@since(1.4)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
:param col1: The name of the first column. Distinct items will make the first item of
each row.
:param col2: The name of the second column. Distinct items will make the column names
of the DataFrame.
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
@since(1.4)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"http://dx.doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
:param colName: string, name of the new column.
:param col: a :class:`Column` expression for the new column.
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
:param existing: string, name of the existing column to rename.
:param col: string, new name of the column.
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
@since(1.4)
@ignore_unicode_prefix
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
:param cols: a string name of the column to drop, or a
:class:`Column` to drop, or a list of string name of the columns to drop.
>>> df.drop('age').collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.drop(df.age).collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name=u'Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name=u'Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, basestring):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, basestring):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
def toDF(self, *cols):
"""Returns a new class:`DataFrame` that with new specified column names
:param cols: list of new column names (string)
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def toPandas(self):
"""Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
This is only available if Pandas is installed and available.
.. note:: This method should only be used if the resulting Pandas's DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
"""
import pandas as pd
return pd.DataFrame.from_records(self.collect(), columns=self.columns)
##########################################################################################
# Pandas compatibility
##########################################################################################
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
from pyspark.sql.functions import from_unixtime
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()
globs['df3'] = sc.parallelize([Row(name='Alice', age=2),
Row(name='Bob', age=5)]).toDF()
globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),
Row(name='Bob', age=5, height=None),
Row(name='Tom', age=None, height=None),
Row(name=None, age=None, height=None)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
dials/dials | command_line/cluster_unit_cell.py | 1 | 3086 | # LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1
import os
import iotbx.mtz
import iotbx.phil
from cctbx import crystal
from xfel.clustering.cluster import Cluster
from xfel.clustering.cluster_groups import unit_cell_info
import dials.util
from dials.util.options import OptionParser, flatten_experiments
help_message = """
"""
phil_scope = iotbx.phil.parse(
"""
threshold = 5000
.type = float(value_min=0)
.help = 'Threshold value for the clustering'
plot {
show = False
.type = bool
name = 'cluster_unit_cell.png'
.type = path
log = False
.type = bool
.help = 'Display the dendrogram with a log scale'
}
"""
)
@dials.util.show_mail_handle_errors()
def run(args=None):
usage = "dials.cluster_unit_cell [options] models.expt"
parser = OptionParser(
usage=usage,
phil=phil_scope,
read_experiments=True,
check_format=False,
epilog=help_message,
)
params, options, args = parser.parse_args(
args, show_diff_phil=True, return_unhandled=True
)
experiments = flatten_experiments(params.input.experiments)
crystal_symmetries = []
if len(experiments) == 0:
if not args:
parser.print_help()
exit(0)
for arg in args:
assert os.path.isfile(arg), arg
mtz_object = iotbx.mtz.object(file_name=arg)
arrays = mtz_object.as_miller_arrays(
merge_equivalents=False, anomalous=False
)
crystal_symmetries.append(arrays[0].crystal_symmetry())
else:
crystal_symmetries = [
crystal.symmetry(
unit_cell=expt.crystal.get_unit_cell(),
space_group=expt.crystal.get_space_group(),
)
for expt in experiments
]
do_cluster_analysis(crystal_symmetries, params)
def do_cluster_analysis(crystal_symmetries, params):
ucs = Cluster.from_crystal_symmetries(crystal_symmetries)
if params.plot.show or params.plot.name is not None:
if not params.plot.show:
import matplotlib
# http://matplotlib.org/faq/howto_faq.html#generate-images-without-having-a-window-appear
matplotlib.use("Agg") # use a non-interactive backend
import matplotlib.pyplot as plt
plt.figure("Andrews-Bernstein distance dendogram", figsize=(12, 8))
ax = plt.gca()
clusters, cluster_axes = ucs.ab_cluster(
params.threshold,
log=params.plot.log,
ax=ax,
write_file_lists=False,
doplot=True,
)
print(unit_cell_info(clusters))
plt.tight_layout()
if params.plot.name is not None:
plt.savefig(params.plot.name)
if params.plot.show:
plt.show()
else:
clusters, cluster_axes = ucs.ab_cluster(
params.threshold, log=params.plot.log, write_file_lists=False, doplot=False
)
print(unit_cell_info(clusters))
return clusters
if __name__ == "__main__":
run()
| bsd-3-clause |
khkaminska/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
kambysese/mne-python | examples/stats/plot_cluster_stats_evoked.py | 18 | 3021 | """
=======================================================
Permutation F-test on sensor data with 1D cluster level
=======================================================
One tests if the evoked response is significantly different
between conditions. Multiple comparison problem is addressed
with cluster level permutation test.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data() # as 3D matrix
condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([condition1, condition2], n_permutations=1000,
threshold=threshold, tail=1, n_jobs=1,
out_type='mask')
###############################################################################
# Plot
times = epochs1.times
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
h = plt.axvspan(times[c.start], times[c.stop - 1],
color='r', alpha=0.3)
else:
plt.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = plt.plot(times, T_obs, 'g')
plt.legend((h, ), ('cluster p-value < 0.05', ))
plt.xlabel("time (ms)")
plt.ylabel("f-values")
plt.show()
| bsd-3-clause |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/numpy/lib/function_base.py | 3 | 115310 | __docformat__ = "restructuredtext en"
__all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov',
'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning',
'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc',
'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp',
'add_newdoc_ufunc']
import warnings
import types
import sys
import numpy.core.numeric as _nx
from numpy.core import linspace
from numpy.core.numeric import ones, zeros, arange, concatenate, array, \
asarray, asanyarray, empty, empty_like, ndarray, around
from numpy.core.numeric import ScalarType, dot, where, newaxis, intp, \
integer, isscalar
from numpy.core.umath import pi, multiply, add, arctan2, \
frompyfunc, isnan, cos, less_equal, sqrt, sin, mod, exp, log10
from numpy.core.fromnumeric import ravel, nonzero, choose, sort, mean
from numpy.core.numerictypes import typecodes, number
from numpy.core import atleast_1d, atleast_2d
from numpy.lib.twodim_base import diag
from _compiled_base import _insert, add_docstring
from _compiled_base import digitize, bincount, interp as compiled_interp
from arraysetops import setdiff1d
from utils import deprecate
from _compiled_base import add_newdoc_ufunc
import numpy as np
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try: iter(y)
except: return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None, density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi+0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins+1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero,], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, ie, the bin count divided by the bin hypervolume.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights for
the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal'\
' to the dimension of the sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError("Element at index %s in `bins` should be "
"a positive integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1)
else:
edges[i] = asarray(bins[i], float)
nbin[i] = len(edges[i])+1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError("""
Found bin edge of size <= 0. Did you specify `bins` with
non-monotonic sequence?""")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:,i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1],
decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i,j)
ni[i],ni[j] = ni[j],ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1,-1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix) :
a = np.asarray(a)
if weights is None :
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else :
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape :
if axis is None :
raise TypeError(
"Axis must be specified when shapes of a "\
"and weights differ.")
if wgt.ndim != 1 :
raise TypeError(
"1D weights expected when shapes of a and "\
"weights differ.")
if wgt.shape[0] != a.shape[axis] :
raise ValueError(
"Length of weights not compatible with "\
"specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have undefined values.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.arange(6) - 2.5
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray)):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
n = len(condlist)
n2 = len(choicelist)
if n2 != n:
raise ValueError(
"list of cases must be same length as list of conditions")
choicelist = [default] + choicelist
S = 0
pfac = 1
for k in range(1, n+1):
S += k * pfac * asarray(condlist[k-1])
if k < n:
pfac *= (1-asarray(condlist[k-1]))
# handle special case of a 1-element condition but
# a multi-element choice
if type(S) in ScalarType or max(asarray(S).shape)==1:
pfac = asarray(1)
for k in range(n2+1):
pfac = pfac + asarray(choicelist[k])
if type(S) in ScalarType:
S = S*ones(asarray(pfac).shape, type(S))
else:
S = S*ones(asarray(pfac).shape, S.dtype)
return choose(S, tuple(choicelist))
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
`*varargs` : scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: `dx`, `dy`, `dz`, ... The default distance is 1.
Returns
-------
gradient : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]),
array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
# use central differences on interior and first differences on endpoints
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M' :
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm' :
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
for axis in range(N):
# select out appropriate parts for this dimension
out = np.empty_like(f, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
out[slice1] = (f[slice2] - f[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (f[1] - f[0])
out[slice1] = (f[slice2] - f[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (f[-1] - f[-2])
out[slice1] = (f[slice2] - f[slice3])
# divide by step size
outvals.append(out / dx[axis])
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, defaults is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasingness is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd+pi, 2*pi)-pi
_nx.copyto(ddmod, pi, where=(ddmod==-pi) & (dd > 0))
ph_correct = ddmod - dd;
_nx.copyto(ph_correct, 0, where=abs(dd)<discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a,copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.: break
else: first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.: break
else: last = last - 1
return filt[first:last]
import sys
if sys.hexversion < 0x2040000:
from sets import Set as set
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True],tmp[1:]!=tmp[:-1]))
return tmp[idx]
except AttributeError:
items = list(set(x))
items.sort()
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def _nanop(op, fill, a, axis=None):
"""
General operation on arrays with not-a-number values.
Parameters
----------
op : callable
Operation to perform.
fill : float
NaN values are set to fill before doing the operation.
a : array-like
Input array.
axis : {int, None}, optional
Axis along which the operation is computed.
By default the input is flattened.
Returns
-------
y : {ndarray, scalar}
Processed data.
"""
y = array(a, subok=True)
# We only need to take care of NaN's in floating point arrays
dt = y.dtype
if np.issubdtype(dt, np.integer) or np.issubdtype(dt, np.bool_):
return op(y, axis=axis)
mask = isnan(a)
# y[mask] = fill
# We can't use fancy indexing here as it'll mess w/ MaskedArrays
# Instead, let's fill the array directly...
np.copyto(y, fill, where=mask)
res = op(y, axis=axis)
mask_all_along_axis = mask.all(axis=axis)
# Along some axes, only nan's were encountered. As such, any values
# calculated along that axis should be set to nan.
if mask_all_along_axis.any():
if np.isscalar(res):
res = np.nan
else:
res[mask_all_along_axis] = np.nan
return res
def nansum(a, axis=None):
"""
Return the sum of array elements over a given axis treating
Not a Numbers (NaNs) as zero.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute
the sum of the flattened array.
Returns
-------
y : ndarray
An array with the same shape as a, with the specified axis removed.
If a is a 0-d array, or if axis is None, a scalar is returned with
the same dtype as `a`.
See Also
--------
numpy.sum : Sum across array including Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
If positive or negative infinity are present the result is positive or
negative infinity. But if both positive and negative infinity are present,
the result is Not A Number (NaN).
Arithmetic is modular when using integer types (all elements of `a` must
be finite i.e. no elements that are NaNs, positive infinity and negative
infinity because NaNs are floating point types), and no error is raised
on overflow.
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
When positive infinity and negative infinity are present
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, np.NINF])
nan
"""
return _nanop(np.sum, 0, a, axis)
def nanmin(a, axis=None):
"""
Return the minimum of an array or minimum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired.
axis : int, optional
Axis along which the minimum is computed.The default is to compute
the minimum of the flattened array.
Returns
-------
nanmin : ndarray
A new array or a scalar array with the result.
See Also
--------
numpy.amin : Minimum across array including any Not a Numbers.
numpy.nanmax : Maximum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmin.reduce(a, axis)
else:
return np.fmin.reduce(a.flat)
def nanargmin(a, axis=None):
"""
Return indices of the minimum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
return _nanop(np.argmin, np.inf, a, axis)
def nanmax(a, axis=None):
"""
Return the maximum of an array or maximum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not
an array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, a ndarray scalar is
returned. The the same dtype as `a` is returned.
See Also
--------
numpy.amax : Maximum across array including any Not a Numbers.
numpy.nanmin : Minimum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmax.reduce(a, axis)
else:
return np.fmax.reduce(a.flat)
def nanargmax(a, axis=None):
"""
Return indices of the maximum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
return _nanop(np.argmax, -np.inf, a, axis)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
import sys
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the first
argument will be used to determine the number of outputs. The results of
this call will be cached if `cache` is `True` to prevent calling the
function twice. However, to implement the cache, the original function must
be wrapped which will slow down subsequent calls, so only do this if your
function is expensive.
The new keyword argument interface and `excluded` argument support further
degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None, cache=False):
self.pyfunc = pyfunc
self.cache = cache
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError("Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError("Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
if self.otypes and not self.excluded:
self._ufunc = None # Caching to improve default performance
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
assert args
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple functions
# at least -- this wrapping can almost double the execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
X = array(m, ndmin=2, dtype=float)
if X.size == 0:
# handle empty arrays
return np.array(m)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None),newaxis)
else:
axis = 1
tup = (newaxis, slice(None))
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=float)
X = concatenate((X,y), axis)
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
if c.size == 0:
# handle empty arrays
return c
try:
d = diag(c)
except ValueError: # scalar covariance
return 1
return c/sqrt(multiply.outer(d,d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meterologist. It is
also known as the Cosine Bell. Some authors prefer that it be called a
Hann window, to help avoid confusion with the very similar Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.5-0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1,float)
n = arange(0,M)
return 0.54-0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in xrange(1,len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is partitioned
into the two intervals [0,8] and (8,inf), and Chebyshev polynomial
expansions are employed in each interval. Relative error on the domain
[0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16
with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x<0)
x[ind] = -x[ind]
ind = (x<=8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M,beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0,M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a
Lanczos resampling filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.arange(-20., 21.)/5.
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.arange(-200., 201.)/50.
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi* where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a,subok=True,copy=True)
b.sort(0)
return b
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if sorted.shape == ():
# make 0-D arrays work
return sorted.item()
if axis is None:
axis = 0
indexer = [slice(None)] * sorted.ndim
index = int(sorted.shape[axis]/2)
if sorted.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(sorted[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None, overwrite_input=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted.
Default is False. Note that, if `overwrite_input` is True and the
input is not already an array, an error will be raised.
Returns
-------
pcntile : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the qth percentile of V is the qth ranked
value in a sorted copy of V. A weighted average of the two nearest
neighbors is used if the normalized ranking does not match q exactly.
The same as the median if ``q=50``, the same as the minimum if ``q=0``
and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 0.5, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
3.5
"""
a = np.asarray(a)
if q == 0:
return a.min(axis=axis, out=out)
elif q == 100:
return a.max(axis=axis, out=out)
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, q, axis, out)
# handle sequence of q's without calling sort multiple times
def _compute_qth_percentile(sorted, q, axis, out):
if not isscalar(q):
p = [_compute_qth_percentile(sorted, qi, axis, None)
for qi in q]
if out is not None:
out.flat = p
return p
q = q / 100.0
if (q < 0) or (q > 1):
raise ValueError("percentile must be either in the range [0,100]")
indexer = [slice(None)] * sorted.ndim
Nx = sorted.shape[axis]
index = q*(Nx-1)
i = int(index)
if i == index:
indexer[axis] = slice(i, i+1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i+2)
j = i + 1
weights = array([(j - index), (index - i)],float)
wshape = [1]*sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use add.reduce in both cases to coerce data type as well as
# check and use out array.
return add.reduce(sorted[indexer]*weights, axis=axis, out=out)/sumval
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will
be taken from `y` array, by default x-axis distances between points will be
1.0, alternatively they can be provided with `x` array or with `dx` scalar.
Return value will be equal to combined area under the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1,None)
slice2[axis] = slice(None,-1)
try:
ret = (d * (y[slice1] +y [slice2]) / 2.0).sum(axis)
except ValueError: # Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = {}
exec 'from %s import %s' % (place, obj) in new
if isinstance(doc, str):
add_docstring(new[obj], doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new[obj], doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new[obj], val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from two or more coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
copy : bool, optional
If False, a view into the original arrays are returned in
order to conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous arrays.
Furthermore, more than one element of a broadcast array may refer to
a single memory location. If you need to write to the arrays, make
copies first.
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing keyword
argument. Giving the string 'ij' returns a meshgrid with matrix indexing,
while 'xy' returns a meshgrid with Cartesian indexing. In the 2-D case
with inputs of length M and N, the outputs are of shape (N, M) for 'xy'
indexing and (M, N) for 'ij' indexing. In the 3-D case with inputs of
length M, N and P, outputs are of shape (N, M, P) for 'xy' indexing and (M,
N, P) for 'ij' indexing. The difference is illustrated by the following
code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
if len(xi) < 2:
msg = 'meshgrid() takes 2 or more arguments (%d given)' % int(len(xi) > 0)
raise ValueError(msg)
args = np.atleast_1d(*xi)
ndim = len(args)
copy_ = kwargs.get('copy', True)
sparse = kwargs.get('sparse', False)
indexing = kwargs.get('indexing', 'xy')
if not indexing in ['xy', 'ij']:
raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [x.reshape(s0[:i] + (-1,) + s0[i + 1::]) for i, x in enumerate(args)]
shape = [x.size for x in output]
if indexing == 'xy':
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim;
axis = ndim-1;
if ndim == 0:
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if (obj < 0 or obj >=N):
raise ValueError(
"invalid entry")
newshape[axis]-=1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1,None)
new[slobj] = arr[slobj2]
elif isinstance(obj, slice):
start, stop, step = obj.indices(N)
numtodel = len(xrange(start, stop, step))
if numtodel <= 0:
if wrap:
return wrap(new)
else:
return arr.copy()
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
obj = arange(start, stop, step, dtype=intp)
all = arange(start, stop, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = obj
new[slobj] = arr[slobj2]
else: # default behavior
obj = array(obj, dtype=intp, copy=0, ndmin=1)
all = arange(N, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = obj
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
delete : Delete elements from an array.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim-1
if (ndim == 0):
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if obj < 0 or obj > N:
raise ValueError(
"index (%d) out of range (0<=index<=%d) "\
"in dimension %d" % (obj, N, axis))
values = array(values, copy=False, ndmin=arr.ndim)
values = np.rollaxis(values, 0, axis+1)
obj = [obj] * values.shape[axis]
elif isinstance(obj, slice):
# turn it into a range object
obj = arange(*obj.indices(N),**{'dtype':intp})
# get two sets of indices
# one is the indices which will hold the new stuff
# two is the indices where arr will be copied over
obj = asarray(obj, dtype=intp)
numnew = len(obj)
index1 = obj + arange(numnew)
index2 = setdiff1d(arange(numnew+N),index1)
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = index1
slobj2[axis] = index2
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| mit |
CChengz/dot.r | workspace/fits/.metadata/.plugins/org.eclipse.wst.server.core/tmp0/wtpwebapps/amil/WEB-INF/files/map_outputs/20150208230724GLVL/map_gen.py | 11 | 6786 | #!/usr/bin/env python
"""
Created on Feb 4, 2015
@author: Cheng Zeng, University of Aberdeen
"""
import os.path
import matplotlib as mpl
mpl.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
from matplotlib.colors import from_levels_and_colors
import mplleaflet
def save(fig=None, path='_map.html', template='base.html' ,**kwargs):
fullpath = os.path.abspath(path)
with open(fullpath, 'w') as f:
mplleaflet.save_html(fig, fileobj=f, template =template, **kwargs)
file_path = os.path.dirname(os.path.realpath(__file__))
data_x = np.loadtxt(file_path+'/data/StartLng.txt')
data_y = np.loadtxt(file_path+'/data/StartLat.txt')
data_z = np.loadtxt(file_path+'/data/PT time.txt')
numcols, numrows = 100, 100
xi = np.linspace(data_x.min(), data_x.max(), numcols)
yi = np.linspace(data_y.min(), data_y.max(), numrows)
xi, yi = np.meshgrid(xi, yi)
#-- Interpolating at the points in xi, yi
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
bands_time = [0, 5, 10, 15, 20, 25, 30, 40, 50, 60, 75 , 90, 105, 120 , 150, 1000]
bands_cost = [0, 0.5, 1, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, 6.0, 7.5, 9.0, 10.5, 12.0, 15.0, 100.0]
bands_gen_cost = [0, 1.5, 3, 4.5, 6.0, 7.5, 9.0, 12.0, 15.0, 18.0, 22.5, 27.0, 31.5, 36.0, 45.0, 100.0] #
my_rgbs =['#800026', '#800026', '#bd0026', '#e31a1c', '#fc4e2a', '#fd8d3c', '#feb24c', '#fed976',
'#ffeda0', '#ffffcc', '#d0d1e6', '#a6bddb', '#74a9cf', '#3690c0', '#0570b0', '#034e7b']
##################################
# generate PT time contour map ###
##################################
cmap_time, norm_time = from_levels_and_colors(bands_time, my_rgbs, extend='min')
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_time, cmap = cmap_time, norm=norm_time)
# set the path to generate PT time map
mapfile = file_path + '/map/PT_time.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_time.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
##################################
# generate PT cost contour map ###
##################################
data_z = np.loadtxt(file_path+'/data/PT cost.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
cmap_cost, norm_cost = from_levels_and_colors(bands_cost, my_rgbs, extend='min')
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_cost, cmap = cmap_cost, norm=norm_cost)
# set the path to generate PT time map
mapfile = file_path + '/map/PT_cost.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_cost.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
######################################
# generate PT gen_cost contour map ###
######################################
data_z = np.loadtxt(file_path+'/data/PT gen_cost.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
cmap_gen_cost, norm_gen_cost = from_levels_and_colors(bands_gen_cost, my_rgbs, extend='min')
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_gen_cost, cmap = cmap_gen_cost, norm=norm_gen_cost)
# set the path to generate PT time map
mapfile = file_path + '/map/PT_gen_cost.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_gen_cost.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
####################################
# generate Car time contour map ###
####################################
data_z = np.loadtxt(file_path+'/data/Car time.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_time, cmap = cmap_time, norm=norm_time)
# set the path to generate PT time map
mapfile = file_path + '/map/Car_time.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_time.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
####################################
# generate Car cost contour map ###
####################################
data_z = np.loadtxt(file_path+'/data/Car cost.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_cost, cmap = cmap_cost, norm=norm_cost)
# set the path to generate PT time map
mapfile = file_path + '/map/Car_cost.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_cost.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
#######################################
# generate Car gen_cost contour map ###
#######################################
data_z = np.loadtxt(file_path+'/data/Car gen_cost.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_gen_cost, cmap = cmap_gen_cost, norm=norm_gen_cost)
# set the path to generate PT time map
mapfile = file_path + '/map/Car_gen_cost.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_gen_cost.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
#######################################
# generate Carpool time contour map ###
#######################################
data_z = np.loadtxt(file_path+'/data/Carpool time.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_time, cmap = cmap_time, norm=norm_time)
# set the path to generate PT time map
mapfile = file_path + '/map/Carpool_time.html'
# convert to leaflet map
save(fig = fig, path=mapfile, template = 'base_time.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
#######################################
# generate Carpool cost contour map ###
#######################################
data_z = np.loadtxt(file_path+'/data/Carpool cost.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_cost, cmap = cmap_cost, norm=norm_cost)
# set the path to generate PT time map
mapfile = file_path + '/map/Carpool_cost.html'
save(fig = fig, path=mapfile, template = 'base_cost.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
#######################################
# generate Carpool gen_cost contour map ###
#######################################
data_z = np.loadtxt(file_path+'/data/Carpool gen_cost.txt')
x, y, z = data_x, data_y, data_z
zi = griddata(x, y, z, xi, yi)
fig = plt.figure()
m = plt.contourf(xi, yi, zi, latlon=True, levels= bands_gen_cost, cmap = cmap_gen_cost, norm=norm_gen_cost)
# set the path to generate PT time map
mapfile = file_path + '/map/Carpool_gen_cost.html'
save(fig = fig, path=mapfile, template = 'base_gen_cost.html', tiles='mapbox bright')
# fig.colorbar(m)
# plt.show()
| apache-2.0 |
chenyyx/scikit-learn-doc-zh | examples/zh/svm/plot_svm_anova.py | 33 | 2024 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature selection before running a
SVC (support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
# #############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
# #############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
# #############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using 1 CPU
this_scores = cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| gpl-3.0 |
gef756/statsmodels | statsmodels/examples/ex_emplike_1.py | 34 | 3682 | """
This is a basic tutorial on how to conduct basic empirical likelihood
inference for descriptive statistics. If matplotlib is installed
it also generates plots.
"""
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
print('Welcome to El')
np.random.seed(634) # No significance of the seed.
# Let's first generate some univariate data.
univariate = np.random.standard_normal(30)
# Now let's play with it
# Initiate an empirical likelihood descriptive statistics instance
eldescriptive = sm.emplike.DescStat(univariate)
# Empirical likelihood is (typically) a method of inference,
# not estimation. Therefore, there is no attribute eldescriptive.mean
# However, we can check the mean:
eldescriptive_mean = eldescriptive.endog.mean() #.42
#Let's conduct a hypothesis test to see if the mean is 0
print('Hypothesis test results for the mean:')
print(eldescriptive.test_mean(0))
# The first value is is -2 *log-likelihood ratio, which is distributed
#chi2. The second value is the p-value.
# Let's see what the variance is:
eldescriptive_var = eldescriptive.endog.var() # 1.01
#Let's test if the variance is 1:
print('Hypothesis test results for the variance:')
print(eldescriptive.test_var(1))
# Let's test if Skewness and Kurtosis are 0
print('Hypothesis test results for Skewness:')
print(eldescriptive.test_skew(0))
print('Hypothesis test results for the Kurtosis:')
print(eldescriptive.test_kurt(0))
# Note that the skewness and Kurtosis take longer. This is because
# we have to optimize over the nuisance parameters (mean, variance).
# We can also test for the joint skewness and kurtoses
print(' Joint Skewness-Kurtosis test')
eldescriptive.test_joint_skew_kurt(0, 0)
# Let's try and get some confidence intervals
print('Confidence interval for the mean')
print(eldescriptive.ci_mean())
print('Confidence interval for the variance')
print(eldescriptive.ci_var())
print('Confidence interval for skewness')
print(eldescriptive.ci_skew())
print('Confidence interval for kurtosis')
print(eldescriptive.ci_kurt())
# if matplotlib is installed, we can get a contour plot for the mean
# and variance.
mean_variance_contour = eldescriptive.plot_contour(-.5, 1.2, .2, 2.5, .05, .05)
# This returns a figure instance. Just type mean_var_contour.show()
# to see the plot.
# Once you close the plot, we can start some multivariate analysis.
x1 = np.random.exponential(2, (30, 1))
x2 = 2 * x1 + np.random.chisquare(4, (30, 1))
mv_data = np.concatenate((x1, x2), axis=1)
mv_elmodel = sm.emplike.DescStat(mv_data)
# For multivariate data, the only methods are mv_test_mean,
# mv mean contour and ci_corr and test_corr.
# Let's test the hypthesis that x1 has a mean of 2 and x2 has a mean of 7
print('Multivaraite mean hypothesis test')
print(mv_elmodel.mv_test_mean(np.array([2, 7])))
# Now let's get the confidence interval for correlation
print('Correlation Coefficient CI')
print(mv_elmodel.ci_corr())
# Note how this took much longer than previous functions. That is
# because the function is optimizing over 4 nuisance parameters.
# We can also do a hypothesis test for correlation
print('Hypothesis test for correlation')
print(mv_elmodel.test_corr(.7))
# Finally, let's create a contour plot for the means of the data
means_contour = mv_elmodel.mv_mean_contour(1, 3, 6,9, .15,.15, plot_dta=1)
# This also returns a fig so we can type mean_contour.show() to see the figure
# Sometimes, the data is very dispersed and we would like to see the confidence
# intervals without the plotted data. Let's see the difference when we set
# plot_dta=0
means_contour2 = mv_elmodel.mv_mean_contour(1, 3, 6,9, .05,.05, plot_dta=0)
| bsd-3-clause |
benoitsteiner/tensorflow-opencl | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 19 | 60823 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class ActivationFunctionTest(test.TestCase):
def _getModelForActivation(self, activation_fn):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
'activation_fn': activation_fn,
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
return dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testValidActivation(self):
_ = self._getModelForActivation('relu')
def testRaisesOnBadActivationName(self):
with self.assertRaisesRegexp(ValueError,
'Activation name should be one of'):
self._getModelForActivation('max_pool')
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertClassificationPredictions(
self, expected_len, n_classes, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, range(n_classes))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
fc_core.embedding_column(language_column, dimension=1),
fc_core.numeric_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=300)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, n_classes, predicted_proba)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def _assertRegressionOutputs(
self, predictions, expected_shape):
predictions_nparray = np.array(predictions)
self.assertAllEqual(expected_shape, predictions_nparray.shape)
self.assertTrue(np.issubdtype(predictions_nparray.dtype, np.float))
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self._assertRegressionOutputs(predicted_scores, [3])
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self._assertRegressionOutputs(predicted_scores, [3])
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
grundgruen/zipline | zipline/utils/tradingcalendar_tse.py | 17 | 10125 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from datetime import datetime
from dateutil import rrule
from zipline.utils.tradingcalendar import end, canonicalize_datetime, \
get_open_and_closes
start = pd.Timestamp('1994-01-01', tz='UTC')
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
new_years = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years)
new_years_sunday = rrule.rrule(
rrule.MONTHLY,
byyearday=2,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years_sunday)
new_years_saturday = rrule.rrule(
rrule.MONTHLY,
byyearday=3,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years_saturday)
# Family day in Ontario, starting in 2008, third monday of February
family_day = rrule.rrule(
rrule.MONTHLY,
bymonth=2,
byweekday=(rrule.MO(3)),
cache=True,
dtstart=datetime(2008, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(family_day)
good_friday = rrule.rrule(
rrule.DAILY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(good_friday)
# Monday prior to May 25th.
victoria_day = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
byweekday=rrule.MO,
bymonthday=[24, 23, 22, 21, 20, 19, 18],
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(victoria_day)
july_1st = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_1st)
july_1st_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=2,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_1st_sunday)
july_1st_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_1st_saturday)
civic_holiday = rrule.rrule(
rrule.MONTHLY,
bymonth=8,
byweekday=rrule.MO(1),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(civic_holiday)
labor_day = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
byweekday=(rrule.MO(1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(labor_day)
thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=10,
byweekday=(rrule.MO(2)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(thanksgiving)
christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas)
# If Christmas is a Sunday then the 26th, a Monday is observed.
# (but that would be boxing day), so the 27th is also observed.
christmas_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=27,
byweekday=rrule.TU,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_sunday)
# If Christmas is a Saturday then the 27th, a monday is observed.
christmas_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=27,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_saturday)
boxing_day = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(boxing_day)
# if boxing day is a sunday, the Christmas was saturday.
# Christmas is observed on the 27th, a month and boxing day is observed
# on the 28th, a tuesday.
boxing_day_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=28,
byweekday=rrule.TU,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(boxing_day_sunday)
# If boxing day is a Saturday then the 28th, a monday is observed.
boxing_day_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=28,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(boxing_day_saturday)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
# Add September 11th closings
# The TSX was open for 71 minutes on September 11, 2011.
# It was closed on the 12th and reopened on the 13th.
# http://www.cbc.ca/news2/interactives/map-tsx/
#
# September 2001
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
non_trading_days.append(
datetime(2001, 9, 12, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
# Days in Environment but not in Calendar (using ^GSPTSE as bm_symbol):
# --------------------------------------------------------------------
# Used http://web.tmxmoney.com/pricehistory.php?qm_page=61468&qm_symbol=^TSX
# to check whether exchange was open on these days.
# 1994-07-01 - July 1st, Yahoo Finance has Volume = 0
# 1996-07-01 - July 1st, Yahoo Finance has Volume = 0
# 1996-08-05 - Civic Holiday, Yahoo Finance has Volume = 0
# 1997-07-01 - July 1st, Yahoo Finance has Volume = 0
# 1997-08-04 - Civic Holiday, Yahoo Finance has Volume = 0
# 2001-05-21 - Victoria day, Yahoo Finance has Volume = 0
# 2004-10-11 - Closed, Thanksgiving - Confirmed closed
# 2004-12-28 - Closed, Boxing Day - Confirmed closed
# 2012-10-08 - Closed, Thanksgiving - Confirmed closed
# Days in Calendar but not in Environment using ^GSPTSE as bm_symbol:
# --------------------------------------------------------------------
# Used http://web.tmxmoney.com/pricehistory.php?qm_page=61468&qm_symbol=^TSX
# to check whether exchange was open on these days.
# 2000-06-28 - No data this far back, can't confirm
# 2000-08-28 - No data this far back, can't confirm
# 2000-08-29 - No data this far back, can't confirm
# 2001-09-11 - TSE Open for 71 min.
# 2002-02-01 - Confirm TSE Open
# 2002-06-14 - Confirm TSE Open
# 2002-07-02 - Confirm TSE Open
# 2002-11-11 - TSX website has no data for 2 weeks in 2002
# 2003-07-07 - Confirm TSE Open
# 2003-12-16 - Confirm TSE Open
def get_early_closes(start, end):
# TSX closed at 1:00 PM on december 24th.
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
start = max(start, datetime(1993, 1, 1, tzinfo=pytz.utc))
end = max(end, datetime(1993, 1, 1, tzinfo=pytz.utc))
# Not included here are early closes prior to 1993
# or unplanned early closes
early_close_rules = []
christmas_eve = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH, rrule.FR),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(christmas_eve)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_close(day, early_closes):
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=9,
minute=31),
tz='US/Eastern').tz_convert('UTC')
# 1 PM if early close, 4 PM otherwise
close_hour = 13 if day in early_closes else 16
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=close_hour),
tz='US/Eastern').tz_convert('UTC')
return market_open, market_close
open_and_closes = get_open_and_closes(trading_days, early_closes,
get_open_and_close)
| apache-2.0 |
aetilley/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
xdnian/pyml | code/ch03/share.py | 2 | 1904 | import numpy as np
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import warnings
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02, xlabel='', ylabel='', title=''):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
# highlight test samples
if test_idx:
# plot all samples
if not versiontuple(np.__version__) >= versiontuple('1.9.0'):
X_test, y_test = X[list(test_idx), :], y[list(test_idx)]
warnings.warn('Please update to NumPy 1.9.0 or newer')
else:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0],
X_test[:, 1],
c='',
alpha=1.0,
linewidths=1,
marker='o',
s=55, label='test set')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
| mit |
lthurlow/Network-Grapher | proj/external/numpy-1.7.0/build/lib.linux-i686-2.7/numpy/lib/npyio.py | 9 | 65323 | __all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import re
import sys
import itertools
import warnings
import weakref
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
from _compiled_base import packbits, unpackbits
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError("Illegal argument")
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
try:
name = f.name
except AttributeError:
# Backward compatibility for <= 2.5
name = f.filename
mode = f.mode
f = GzipFile(fileobj=f.fileobj, filename=name)
f.mode = mode
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load an array(s) or pickled objects from .npy, .npz, or pickled files.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap` for a detailed description of the modes).
A memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful for
accessing small fragments of large files without reading the entire
file into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For '.npz' files, the returned instance of
NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the context
manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlyling file descriptor is closed when exiting the 'with' block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
own_fid = False
return NpzFile(fid, own_fid=own_fid)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed .npz file format
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError("Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
record data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a record
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
fh = iter(seek_gzip_factory(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
else:
fh = iter(open(fname, 'U'))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing == None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.next()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = fh.next()
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if not ndmin in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
Character separating columns.
newline : str, optional
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt),] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError("fname mustbe a string, filehandle, or generator. "\
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn(\
"The use of `skiprows` is deprecated, it will be removed in numpy 2.0.\n" \
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.next()
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = fhd.next()
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn(\
"The use of `missing` is deprecated, it will be removed in Numpy 2.0.\n" \
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| mit |
luigift/pybrain | examples/rl/environments/linear_fa/bicycle.py | 26 | 14462 | from __future__ import print_function
"""An attempt to implement Randlov and Alstrom (1998). They successfully
use reinforcement learning to balance a bicycle, and to control it to drive
to a specified goal location. Their work has been used since then by a few
researchers as a benchmark problem.
We only implement the balance task. This implementation differs at least
slightly, since Randlov and Alstrom did not mention anything about how they
annealed/decayed their learning rate, etc. As a result of differences, the
results do not match those obtained by Randlov and Alstrom.
"""
__author__ = 'Chris Dembia, Bruce Cam, Johnny Israeli'
from scipy import asarray
from numpy import sin, cos, tan, sqrt, arcsin, arctan, sign, clip, argwhere
from matplotlib import pyplot as plt
import pybrain.rl.environments
from pybrain.rl.environments.environment import Environment
from pybrain.rl.learners.valuebased.linearfa import SARSALambda_LinFA
from pybrain.rl.agents.linearfa import LinearFA_Agent
from pybrain.rl.experiments import EpisodicExperiment
from pybrain.utilities import one_to_n
class BicycleEnvironment(Environment):
"""Randlov and Alstrom's bicycle model. This code matches nearly exactly
some c code we found online for simulating Randlov and Alstrom's
bicycle. The bicycle travels at a fixed speed.
"""
# For superclass.
indim = 2
outdim = 10
# Environment parameters.
time_step = 0.01
# Goal position and radius
# Lagouakis (2002) uses angle to goal, not heading, as a state
max_distance = 1000.
# Acceleration on Earth's surface due to gravity (m/s^2):
g = 9.82
# See the paper for a description of these quantities:
# Distances (in meters):
c = 0.66
dCM = 0.30
h = 0.94
L = 1.11
r = 0.34
# Masses (in kilograms):
Mc = 15.0
Md = 1.7
Mp = 60.0
# Velocity of a bicycle (in meters per second), equal to 10 km/h:
v = 10.0 * 1000.0 / 3600.0
# Derived constants.
M = Mc + Mp # See Randlov's code.
Idc = Md * r**2
Idv = 1.5 * Md * r**2
Idl = 0.5 * Md * r**2
Itot = 13.0 / 3.0 * Mc * h**2 + Mp * (h + dCM)**2
sigmad = v / r
def __init__(self):
Environment.__init__(self)
self.reset()
self.actions = [0.0, 0.0]
self._save_wheel_contact_trajectories = False
def performAction(self, actions):
self.actions = actions
self.step()
def saveWheelContactTrajectories(self, opt):
self._save_wheel_contact_trajectories = opt
def step(self):
# Unpack the state and actions.
# -----------------------------
# Want to ignore the previous value of omegadd; it could only cause a
# bug if we assign to it.
(theta, thetad, omega, omegad, _,
xf, yf, xb, yb, psi) = self.sensors
(T, d) = self.actions
# For recordkeeping.
# ------------------
if self._save_wheel_contact_trajectories:
self.xfhist.append(xf)
self.yfhist.append(yf)
self.xbhist.append(xb)
self.ybhist.append(yb)
# Intermediate time-dependent quantities.
# ---------------------------------------
# Avoid divide-by-zero, just as Randlov did.
if theta == 0:
rf = 1e8
rb = 1e8
rCM = 1e8
else:
rf = self.L / np.abs(sin(theta))
rb = self.L / np.abs(tan(theta))
rCM = sqrt((self.L - self.c)**2 + self.L**2 / tan(theta)**2)
phi = omega + np.arctan(d / self.h)
# Equations of motion.
# --------------------
# Second derivative of angular acceleration:
omegadd = 1 / self.Itot * (self.M * self.h * self.g * sin(phi)
- cos(phi) * (self.Idc * self.sigmad * thetad
+ sign(theta) * self.v**2 * (
self.Md * self.r * (1.0 / rf + 1.0 / rb)
+ self.M * self.h / rCM)))
thetadd = (T - self.Idv * self.sigmad * omegad) / self.Idl
# Integrate equations of motion using Euler's method.
# ---------------------------------------------------
# yt+1 = yt + yd * dt.
# Must update omega based on PREVIOUS value of omegad.
omegad += omegadd * self.time_step
omega += omegad * self.time_step
thetad += thetadd * self.time_step
theta += thetad * self.time_step
# Handlebars can't be turned more than 80 degrees.
theta = np.clip(theta, -1.3963, 1.3963)
# Wheel ('tyre') contact positions.
# ---------------------------------
# Front wheel contact position.
front_temp = self.v * self.time_step / (2 * rf)
# See Randlov's code.
if front_temp > 1:
front_temp = sign(psi + theta) * 0.5 * np.pi
else:
front_temp = sign(psi + theta) * arcsin(front_temp)
xf += self.v * self.time_step * -sin(psi + theta + front_temp)
yf += self.v * self.time_step * cos(psi + theta + front_temp)
# Rear wheel.
back_temp = self.v * self.time_step / (2 * rb)
# See Randlov's code.
if back_temp > 1:
back_temp = np.sign(psi) * 0.5 * np.pi
else:
back_temp = np.sign(psi) * np.arcsin(back_temp)
xb += self.v * self.time_step * -sin(psi + back_temp)
yb += self.v * self.time_step * cos(psi + back_temp)
# Preventing numerical drift.
# ---------------------------
# Copying what Randlov did.
current_wheelbase = sqrt((xf - xb)**2 + (yf - yb)**2)
if np.abs(current_wheelbase - self.L) > 0.01:
relative_error = self.L / current_wheelbase - 1.0
xb += (xb - xf) * relative_error
yb += (yb - yf) * relative_error
# Update heading, psi.
# --------------------
delta_y = yf - yb
if (xf == xb) and delta_y < 0.0:
psi = np.pi
else:
if delta_y > 0.0:
psi = arctan((xb - xf) / delta_y)
else:
psi = sign(xb - xf) * 0.5 * np.pi - arctan(delta_y / (xb - xf))
self.sensors = np.array([theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi])
def reset(self):
theta = 0
thetad = 0
omega = 0
omegad = 0
omegadd = 0
xf = 0
yf = self.L
xb = 0
yb = 0
psi = np.arctan((xb - xf) / (yf - yb))
self.sensors = np.array([theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi])
self.xfhist = []
self.yfhist = []
self.xbhist = []
self.ybhist = []
def getSteer(self):
return self.sensors[0]
def getTilt(self):
return self.sensors[2]
def get_xfhist(self):
return self.xfhist
def get_yfhist(self):
return self.yfhist
def get_xbhist(self):
return self.xbhist
def get_ybhist(self):
return self.ybhist
def getSensors(self):
return self.sensors
class BalanceTask(pybrain.rl.environments.EpisodicTask):
"""The rider is to simply balance the bicycle while moving with the
speed perscribed in the environment. This class uses a continuous 5
dimensional state space, and a discrete state space.
This class is heavily guided by
pybrain.rl.environments.cartpole.balancetask.BalanceTask.
"""
max_tilt = np.pi / 6.
nactions = 9
def __init__(self, max_time=1000.0):
super(BalanceTask, self).__init__(BicycleEnvironment())
self.max_time = max_time
# Keep track of time in case we want to end episodes based on number of
# time steps.
self.t = 0
@property
def indim(self):
return 1
@property
def outdim(self):
return 5
def reset(self):
super(BalanceTask, self).reset()
self.t = 0
def performAction(self, action):
"""Incoming action is an int between 0 and 8. The action we provide to
the environment consists of a torque T in {-2 N, 0, 2 N}, and a
displacement d in {-.02 m, 0, 0.02 m}.
"""
self.t += 1
assert round(action[0]) == action[0]
# -1 for action in {0, 1, 2}, 0 for action in {3, 4, 5}, 1 for
# action in {6, 7, 8}
torque_selector = np.floor(action[0] / 3.0) - 1.0
T = 2 * torque_selector
# Random number in [-1, 1]:
p = 2.0 * np.random.rand() - 1.0
# -1 for action in {0, 3, 6}, 0 for action in {1, 4, 7}, 1 for
# action in {2, 5, 8}
disp_selector = action[0] % 3 - 1.0
d = 0.02 * disp_selector + 0.02 * p
super(BalanceTask, self).performAction([T, d])
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi) = self.env.getSensors()
return self.env.getSensors()[0:5]
def isFinished(self):
# Criterion for ending an episode. From Randlov's paper:
# "When the agent can balance for 1000 seconds, the task is considered
# learned."
if np.abs(self.env.getTilt()) > self.max_tilt:
return True
elapsed_time = self.env.time_step * self.t
if elapsed_time > self.max_time:
return True
return False
def getReward(self):
# -1 reward for falling over; no reward otherwise.
if np.abs(self.env.getTilt()) > self.max_tilt:
return -1.0
return 0.0
class LinearFATileCoding3456BalanceTask(BalanceTask):
"""An attempt to exactly implement Randlov's function approximation. He
discretized (tiled) the state space into 3456 bins. We use the same action
space as in the superclass.
"""
# From Randlov, 1998:
theta_bounds = np.array(
[-0.5 * np.pi, -1.0, -0.2, 0, 0.2, 1.0, 0.5 * np.pi])
thetad_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
omega_bounds = np.array(
[-BalanceTask.max_tilt, -0.15, -0.06, 0, 0.06, 0.15,
BalanceTask.max_tilt])
omegad_bounds = np.array(
[-np.inf, -0.5, -0.25, 0, 0.25, 0.5, np.inf])
omegadd_bounds = np.array(
[-np.inf, -2.0, 0, 2.0, np.inf])
# http://stackoverflow.com/questions/3257619/numpy-interconversion-between-multidimensional-and-linear-indexing
nbins_across_dims = [
len(theta_bounds) - 1,
len(thetad_bounds) - 1,
len(omega_bounds) - 1,
len(omegad_bounds) - 1,
len(omegadd_bounds) - 1]
# This array, when dotted with the 5-dim state vector, gives a 'linear'
# index between 0 and 3455.
magic_array = np.cumprod([1] + nbins_across_dims)[:-1]
@property
def outdim(self):
# Used when constructing LinearFALearner's.
return 3456
def getBin(self, theta, thetad, omega, omegad, omegadd):
bin_indices = [
np.digitize([theta], self.theta_bounds)[0] - 1,
np.digitize([thetad], self.thetad_bounds)[0] - 1,
np.digitize([omega], self.omega_bounds)[0] - 1,
np.digitize([omegad], self.omegad_bounds)[0] - 1,
np.digitize([omegadd], self.omegadd_bounds)[0] - 1,
]
return np.dot(self.magic_array, bin_indices)
def getBinIndices(self, linear_index):
"""Given a linear index (integer between 0 and outdim), returns the bin
indices for each of the state dimensions.
"""
return linear_index / self.magic_array % self.nbins_across_dims
def getObservation(self):
(theta, thetad, omega, omegad, omegadd,
xf, yf, xb, yb, psi) = self.env.getSensors()
state = one_to_n(self.getBin(theta, thetad, omega, omegad, omegadd),
self.outdim)
return state
class SARSALambda_LinFA_ReplacingTraces(SARSALambda_LinFA):
"""Randlov used replacing traces, but this doesn't exist in PyBrain's
SARSALambda.
"""
def _updateEtraces(self, state, action, responsibility=1.):
self._etraces *= self.rewardDiscount * self._lambda * responsibility
# This assumes that state is an identity vector (like, from one_to_n).
self._etraces[action] = clip(self._etraces[action] + state, -np.inf, 1.)
# Set the trace for all other actions in this state to 0:
action_bit = one_to_n(action, self.num_actions)
for argstate in argwhere(state == 1) :
self._etraces[argwhere(action_bit != 1), argstate] = 0.
task = LinearFATileCoding3456BalanceTask()
env = task.env
# The learning is very sensitive to the learning rate decay.
learner = SARSALambda_LinFA_ReplacingTraces(task.nactions, task.outdim,
learningRateDecay=2000)
learner._lambda = 0.95
task.discount = learner.rewardDiscount
agent = LinearFA_Agent(learner)
agent.logging = False
exp = EpisodicExperiment(task, agent)
performance_agent = LinearFA_Agent(learner)
performance_agent.logging = False
performance_agent.greedy = True
performance_agent.learning = False
env.saveWheelContactTrajectories(True)
plt.ion()
plt.figure(figsize=(8, 4))
ax1 = plt.subplot(1, 2, 1)
ax2 = plt.subplot(1, 2, 2)
def update_wheel_trajectories():
front_lines = ax2.plot(env.get_xfhist(), env.get_yfhist(), 'r')
back_lines = ax2.plot(env.get_xbhist(), env.get_ybhist(), 'b')
plt.axis('equal')
perform_cumrewards = []
for irehearsal in range(7000):
# Learn.
# ------
r = exp.doEpisodes(1)
# Discounted reward.
cumreward = exp.task.getTotalReward()
#print 'cumreward: %.4f; nsteps: %i; learningRate: %.4f' % (
# cumreward, len(r[0]), exp.agent.learner.learningRate)
if irehearsal % 50 == 0:
# Perform (no learning).
# ----------------------
# Swap out the agent.
exp.agent = performance_agent
# Perform.
r = exp.doEpisodes(1)
perform_cumreward = task.getTotalReward()
perform_cumrewards.append(perform_cumreward)
print('PERFORMANCE: cumreward:', perform_cumreward, 'nsteps:', len(r[0]))
# Swap back the learning agent.
performance_agent.reset()
exp.agent = agent
ax1.cla()
ax1.plot(perform_cumrewards, '.--')
# Wheel trajectories.
update_wheel_trajectories()
plt.pause(0.001)
| bsd-3-clause |
yask123/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
shaoguangleo/code-for-blog | 2008/wx_mpl_dynamic_graph.py | 13 | 11139 | """
This demo demonstrates how to draw a dynamic mpl (matplotlib)
plot in a wxPython application.
It allows "live" plotting as well as manual zooming to specific
regions.
Both X and Y axes allow "auto" or "manual" settings. For Y, auto
mode sets the scaling of the graph to see all the data points.
For X, auto mode makes the graph "follow" the data. Set it X min
to manual 0 to always see the whole data from the beginning.
Note: press Enter in the 'manual' text box to make a new value
affect the plot.
Eli Bendersky ([email protected])
License: this code is in the public domain
Last modified: 31.07.2008
"""
import os
import pprint
import random
import sys
import wx
# The recommended way to use wx with mpl is with the WXAgg
# backend.
#
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import numpy as np
import pylab
class DataGen(object):
""" A silly class that generates pseudo-random data for
display in the plot.
"""
def __init__(self, init=50):
self.data = self.init = init
def next(self):
self._recalc_data()
return self.data
def _recalc_data(self):
delta = random.uniform(-0.5, 0.5)
r = random.random()
if r > 0.9:
self.data += delta * 15
elif r > 0.8:
# attraction to the initial value
delta += (0.5 if self.init > self.data else -0.5)
self.data += delta
else:
self.data += delta
class BoundControlBox(wx.Panel):
""" A static box with a couple of radio buttons and a text
box. Allows to switch between an automatic mode and a
manual mode with an associated value.
"""
def __init__(self, parent, ID, label, initval):
wx.Panel.__init__(self, parent, ID)
self.value = initval
box = wx.StaticBox(self, -1, label)
sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
self.radio_auto = wx.RadioButton(self, -1,
label="Auto", style=wx.RB_GROUP)
self.radio_manual = wx.RadioButton(self, -1,
label="Manual")
self.manual_text = wx.TextCtrl(self, -1,
size=(35,-1),
value=str(initval),
style=wx.TE_PROCESS_ENTER)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_manual_text, self.manual_text)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.manual_text)
manual_box = wx.BoxSizer(wx.HORIZONTAL)
manual_box.Add(self.radio_manual, flag=wx.ALIGN_CENTER_VERTICAL)
manual_box.Add(self.manual_text, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.radio_auto, 0, wx.ALL, 10)
sizer.Add(manual_box, 0, wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
def on_update_manual_text(self, event):
self.manual_text.Enable(self.radio_manual.GetValue())
def on_text_enter(self, event):
self.value = self.manual_text.GetValue()
def is_auto(self):
return self.radio_auto.GetValue()
def manual_value(self):
return self.value
class GraphFrame(wx.Frame):
""" The main frame of the application
"""
title = 'Demo: dynamic matplotlib graph'
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.datagen = DataGen()
self.data = [self.datagen.next()]
self.paused = False
self.create_menu()
self.create_status_bar()
self.create_main_panel()
self.redraw_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.redraw_timer.Start(100)
def create_menu(self):
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_expt = menu_file.Append(-1, "&Save plot\tCtrl-S", "Save plot to file")
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-X", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
self.menubar.Append(menu_file, "&File")
self.SetMenuBar(self.menubar)
def create_main_panel(self):
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.xmin_control = BoundControlBox(self.panel, -1, "X min", 0)
self.xmax_control = BoundControlBox(self.panel, -1, "X max", 50)
self.ymin_control = BoundControlBox(self.panel, -1, "Y min", 0)
self.ymax_control = BoundControlBox(self.panel, -1, "Y max", 100)
self.pause_button = wx.Button(self.panel, -1, "Pause")
self.Bind(wx.EVT_BUTTON, self.on_pause_button, self.pause_button)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_pause_button, self.pause_button)
self.cb_grid = wx.CheckBox(self.panel, -1,
"Show Grid",
style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, self.on_cb_grid, self.cb_grid)
self.cb_grid.SetValue(True)
self.cb_xlab = wx.CheckBox(self.panel, -1,
"Show X labels",
style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, self.on_cb_xlab, self.cb_xlab)
self.cb_xlab.SetValue(True)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.Add(self.pause_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(20)
self.hbox1.Add(self.cb_grid, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.cb_xlab, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox2.Add(self.xmin_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.xmax_control, border=5, flag=wx.ALL)
self.hbox2.AddSpacer(24)
self.hbox2.Add(self.ymin_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.ymax_control, border=5, flag=wx.ALL)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, flag=wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.hbox1, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.vbox.Add(self.hbox2, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def create_status_bar(self):
self.statusbar = self.CreateStatusBar()
def init_plot(self):
self.dpi = 100
self.fig = Figure((3.0, 3.0), dpi=self.dpi)
self.axes = self.fig.add_subplot(111)
self.axes.set_axis_bgcolor('black')
self.axes.set_title('Very important random data', size=12)
pylab.setp(self.axes.get_xticklabels(), fontsize=8)
pylab.setp(self.axes.get_yticklabels(), fontsize=8)
# plot the data as a line series, and save the reference
# to the plotted line series
#
self.plot_data = self.axes.plot(
self.data,
linewidth=1,
color=(1, 1, 0),
)[0]
def draw_plot(self):
""" Redraws the plot
"""
# when xmin is on auto, it "follows" xmax to produce a
# sliding window effect. therefore, xmin is assigned after
# xmax.
#
if self.xmax_control.is_auto():
xmax = len(self.data) if len(self.data) > 50 else 50
else:
xmax = int(self.xmax_control.manual_value())
if self.xmin_control.is_auto():
xmin = xmax - 50
else:
xmin = int(self.xmin_control.manual_value())
# for ymin and ymax, find the minimal and maximal values
# in the data set and add a mininal margin.
#
# note that it's easy to change this scheme to the
# minimal/maximal value in the current display, and not
# the whole data set.
#
if self.ymin_control.is_auto():
ymin = round(min(self.data), 0) - 1
else:
ymin = int(self.ymin_control.manual_value())
if self.ymax_control.is_auto():
ymax = round(max(self.data), 0) + 1
else:
ymax = int(self.ymax_control.manual_value())
self.axes.set_xbound(lower=xmin, upper=xmax)
self.axes.set_ybound(lower=ymin, upper=ymax)
# anecdote: axes.grid assumes b=True if any other flag is
# given even if b is set to False.
# so just passing the flag into the first statement won't
# work.
#
if self.cb_grid.IsChecked():
self.axes.grid(True, color='gray')
else:
self.axes.grid(False)
# Using setp here is convenient, because get_xticklabels
# returns a list over which one needs to explicitly
# iterate, and setp already handles this.
#
pylab.setp(self.axes.get_xticklabels(),
visible=self.cb_xlab.IsChecked())
self.plot_data.set_xdata(np.arange(len(self.data)))
self.plot_data.set_ydata(np.array(self.data))
self.canvas.draw()
def on_pause_button(self, event):
self.paused = not self.paused
def on_update_pause_button(self, event):
label = "Resume" if self.paused else "Pause"
self.pause_button.SetLabel(label)
def on_cb_grid(self, event):
self.draw_plot()
def on_cb_xlab(self, event):
self.draw_plot()
def on_save_plot(self, event):
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def on_redraw_timer(self, event):
# if paused do not add data, but still redraw the plot
# (to respond to scale modifications, grid change, etc.)
#
if not self.paused:
self.data.append(self.datagen.next())
self.draw_plot()
def on_exit(self, event):
self.Destroy()
def flash_status_message(self, msg, flash_len_ms=1500):
self.statusbar.SetStatusText(msg)
self.timeroff = wx.Timer(self)
self.Bind(
wx.EVT_TIMER,
self.on_flash_status_off,
self.timeroff)
self.timeroff.Start(flash_len_ms, oneShot=True)
def on_flash_status_off(self, event):
self.statusbar.SetStatusText('')
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = GraphFrame()
app.frame.Show()
app.MainLoop()
| unlicense |
rohanp/scikit-learn | sklearn/cluster/bicluster.py | 66 | 19850 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
GeoMop/PythonOCC_Examples | src/interpolate_bezier-cubic-filter.py | 1 | 2192 | import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
import sys
eps = sys.float_info.epsilon
# Coordinates of points P=[P_{1}, P_{2}, ..., P_{n}]
x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0])
y = np.array([3.1, 3.1, 3.1, 3.1, 3.1, 3.1, 3.1, 3.2, 3.4, 3.7, 3.9, 4.0, 4.1, 4.1, 4.1, 4.1])
# Estimation of t parameters mapping t_{i} to point P_{i}
t = np.linspace(0.0, 1.0, len(x))
new_t = []
new_x = []
new_y = []
yi_1 = None
i = 0
li = 0
di = 2
for yi in y:
if yi_1 is None:
new_t.append(t[i])
new_x.append(x[i])
new_y.append(y[i])
elif abs(yi - yi_1) > eps or i - li > di:
new_t.append(t[i])
new_x.append(x[i])
new_y.append(y[i])
li = i
yi_1 = yi
i += 1
if new_t[-1] != t[-1]:
new_t.append(t[i - 1])
new_x.append(x[i - 1])
new_y.append(y[i - 1])
x = np.array(new_x)
y = np.array(new_y)
t = np.array(new_t)
n = len(x)
print x
print y
print t
# Base matrix of Bezier cubic curve
M = np.matrix([
[-1, 3, -3, 1],
[3, -6, 3, 0],
[-3, 3, 0, 0],
[1, 0, 0, 0]])
iM = inv(M)
t3 = np.array([item**3 for item in t])
t2 = np.array([item**2 for item in t])
t1 = np.array([item for item in t])
t0 = np.array([1 for item in t])
T = np.mat([t3, t2, t1, t0]).transpose()
tT = T.transpose()
X = np.mat(x).transpose()
Y = np.mat(y).transpose()
Cx = iM * inv(tT * T) * tT * X
Cy = iM * inv(tT * T) * tT * Y
diff_x0 = x[0] - Cx[0]
diff_xn = x[n - 1] - Cx[3]
diff_y0 = y[0] - Cy[0]
diff_yn = y[n - 1] - Cy[3]
dy = ( diff_y0 + diff_yn ) / 2.0
Kx = np.array([x[0], Cx[1], Cx[2], x[n - 1]])
Ky = np.array([y[0], Cy[1] - dy, Cy[2] - dy, y[n - 1]])
# Draw coordinates of P points
plt.scatter(x,y)
Px = []
Py = []
t = 0.0
dt = 0.02
while t < 1.0 + dt:
P = np.array([t**3, t**2, t, 1]) * M * np.bmat([Cx, Cy])
Px.append(P[0, 0])
Py.append(P[0, 1])
t += dt
plt.plot(Px, Py, c='red')
Px = []
Py = []
t = 0.0
while t < 1.0 + dt:
P = np.array([t**3, t**2, t, 1]) * M * np.mat([Kx, Ky]).transpose()
Px.append(P[0, 0])
Py.append(P[0, 1])
t += dt
plt.plot(Px, Py, c='blue')
plt.show()
| gpl-2.0 |
tomlof/scikit-learn | sklearn/linear_model/tests/test_omp.py | 76 | 7752 | # Author: Vlad Niculae
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
nicolas998/Op_Alarmas | 02_Codigos/alarmas.py | 1 | 6511 | #!/usr/bin/env python
import os
import pandas as pd
from wmf import wmf
import numpy as np
import glob
########################################################################
# VARIABLES GLOBALES
ruta_store = None
ruta_store_bck = None
########################################################################
# FUNCIONES PARA OBTENER RUTAS
def get_rutesList(rutas):
f = open(rutas,'r')
L = f.readlines()
f.close()
return L
def get_ruta(RutesList, key):
for i in RutesList:
if i.startswith('- **'+key+'**'):
return i.split(' ')[-1][:-1]
return 'Aviso: no se ha podido leer el key especificado'
def get_rain_last_hours(ruta, rutaTemp, hours, DeltaT = 300):
#calcula los pasos
Min = DeltaT/60.0
MinInHours = 60.0 / Min
Pasos = int(hours * MinInHours)
#Escribe la cola de informacion
comando = 'tail '+ruta+' -n '+str(Pasos)+' > '+rutaTemp
os.system(comando)
def get_modelConfig_lines(RutesList, key, Calib_Storage = None, PlotType = None):
List = []
for i in RutesList:
if i.startswith('|'+key) or i.startswith('| '+key):
List.append(i)
if len(List)>0:
if Calib_Storage == 'Calib':
return get_modelCalib(List)
if Calib_Storage == 'Store':
return get_modelStore(List)
if Calib_Storage == 'Update':
return get_modelStoreLastUpdate(List)
if Calib_Storage == 'Plot':
return get_modelPlot(List, PlotType=PlotType)
return List
else:
return 'Aviso: no se encuentran lineas con el key de inicio especificado'
def get_modelPlot(RutesList, PlotType = 'Qsim_map'):
for l in RutesList:
key = l.split('|')[1].rstrip().lstrip()
if key[3:] == PlotType:
EjecsList = [i.rstrip().lstrip() for i in l.split('|')[2].split(',')]
return EjecsList
return key
def get_modelCalib(RutesList):
DCalib = {}
for l in RutesList:
c = [float(i) for i in l.split('|')[3:-1]]
name = l.split('|')[2]
DCalib.update({name.rstrip().lstrip(): c})
return DCalib
def get_modelStore(RutesList):
DStore = {}
for l in RutesList:
l = l.split('|')
DStore.update({l[1].rstrip().lstrip():
{'Nombre': l[2].rstrip().lstrip(),
'Actualizar': l[3].rstrip().lstrip(),
'Tiempo': float(l[4].rstrip().lstrip()),
'Condition': l[5].rstrip().lstrip(),
'Calib': l[6].rstrip().lstrip(),
'BackSto': l[7].rstrip().lstrip(),
'Slides': l[8].rstrip().lstrip()}})
return DStore
def get_modelStoreLastUpdate(RutesList):
DStoreUpdate = {}
for l in RutesList:
l = l.split('|')
DStoreUpdate.update({l[1].rstrip().lstrip():
{'Nombre': l[2].rstrip().lstrip(),
'LastUpdate': l[3].rstrip().lstrip()}})
return DStoreUpdate
########################################################################
# FUNCIONES PARA LIDIAR CON CAMPOS DE LLUVIA
def Rain_NoCero(rutaRain):
f = open(rutarain,'r')
L = f.readlines()
f.close()
return float(L[3].split()[-1])
def Rain_Cumulated(rutaCampo, cu, rutaAcum = None):
rutabin, rutahdr = wmf.__Add_hdr_bin_2route__(rutaCampo)
#Lee el esquema del campo
D = pd.read_csv(rutahdr,skiprows=5,
index_col=2, parse_dates=True,
infer_datetime_format=True,
usecols = (1,2,3))
Nrecords = D[u' Record'][-1]
#Acumula la precipitacion para esa consulta
Vsum = np.zeros(cu.ncells)
for i in range(1,18):
v,r = wmf.models.read_int_basin(rutabin,i, cu.ncells)
v = v.astype(float); v = v/1000.0
Vsum+=v
#Entrga Fecha Inicial y Fecha final.
FechaI = D[u' Record'].index[0]
FechaF = D[u' Record'].index[-1]
FechaI = FechaI + pd.Timedelta('5 hours')
FechaF = FechaF + pd.Timedelta('5 hours')
#si hay ruta de guardado guarda
if rutaAcum <> None:
#Obtiene rutas binaria y hdr
rutabin, rutahdr = wmf.__Add_hdr_bin_2route__(rutaAcum)
#Escribe el binario
Temp = np.zeros((1, cu.ncells))
Temp[0] = Vsum
wmf.models.write_float_basin(rutabin, Temp, 1, cu.ncells, 1)
#Escribe el encabezado con fecha inicio y fecha fin del binario
f = open(rutahdr, 'w')
f.write('Fecha y hora de inicio y fin del binario acumulado:\n')
f.write('Fecha1: %s\n' % FechaI.to_pydatetime().strftime('%Y%m%d%H%M'))
f.write('Fecha2: %s\n' % FechaF.to_pydatetime().strftime('%Y%m%d%H%M'))
f.write('Lluvia Media: %.4f \n' % Vsum.mean())
f.close()
return Vsum, FechaI, FechaF
def Rain_Cumulated_Dates(rutaAcum, rutaNC):
#Obtiene las fechas
f = open(rutaAcum, 'r')
L = f.readlines()
f.close()
f1 = L[1].split()[1]
f2 = L[2].split()[1]
Df = {'Fecha1': L[1].split()[1], 'Fecha2': L[2].split()[1]}
Df1 = {'Fecha1': {'atras': pd.to_datetime(f1)-pd.Timedelta('30 minutes'),
'adelante':pd.to_datetime(f1)+pd.Timedelta('30 minutes')},
'Fecha2': {'atras': pd.to_datetime(f2)-pd.Timedelta('30 minutes'),
'adelante':pd.to_datetime(f2)+pd.Timedelta('30 minutes')}}
Fechas = []
for k in ['Fecha1','Fecha2']:
#Obtuiene fechas atras y adelante
f11 = Df1[k]['atras'].to_pydatetime().strftime('%Y%m%d')
f12 = Df1[k]['adelante'].to_pydatetime().strftime('%Y%m%d')
#Lista lo que hay alrededor
List = glob.glob(rutaNC+f11+'*.nc')
List.extend(glob.glob(rutaNC+f12+'*.nc'))
List.sort()
List = np.array([pd.to_datetime(i[43:55]) for i in List])
#Diferenciass de fecha
Diff = np.abs(List - pd.to_datetime(Df[k]))
for i in range(4):
try:
Fechas.append(List[Diff.argmin()+i])
except:
Fechas.append(pd.to_datetime('200001010000'))
#Fechas[1] = List[Diff.argmin()+1]
return Fechas
########################################################################
# FUNCIONES PARA SET DEL MODELO
def model_get_constStorage(RutesList, ncells):
Storage = np.zeros((5, ncells))
for i,c in enumerate(['Inicial Capilar','Inicial Escorrentia','Inicial Subsup','Inicial Subterraneo','Inicial Corriente']):
Cs = float(get_ruta(List, c))
Storage[i] = Cs
return Storage.astype(float)
def model_write_qsim(ruta,Qsim, index, pcont):
#se fija si ya esta
L = glob.glob(ruta)
if len(L)>0:
Existe = True
Nuevo = False
else:
Existe = False
Nuevo = True
#Obtiene el caudale n un Data Frame
D = {}
for c,i in enumerate(pcont):
D.update({i:Qsim[c]})
date = index.to_pydatetime().strftime('%Y-%m-%d-%H:%M')
Qsim = {date:D}
Qsim = pd.DataFrame(Qsim).T
#Escribe el Caudal
with open(ruta, 'a') as f:
Qsim.to_csv(f, header=Nuevo,float_format='%.3f')
def model_update_norain():
print 'no rain'
def model_update_norain_next():
print 'no next'
def model_update_norain_last(RainRute, Hours):
# Lee el archivo de lluvia
print 'no last'
def model_def_rutes(ruteStore, ruteStoreHist):
ruta_store = ruteStore
ruta_store_bck = ruteStoreHist
| gpl-3.0 |
denniszollo/mavlink | pymavlink/tools/mavgpslag.py | 43 | 3446 | #!/usr/bin/env python
'''
calculate GPS lag from DF log
'''
import sys, time, os
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--plot", action='store_true', default=False, help="plot errors")
parser.add_argument("--minspeed", type=float, default=6, help="minimum speed")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
from pymavlink.mavextra import *
from pymavlink.rotmat import Vector3, Matrix3
'''
Support having a $HOME/.pymavlink/mavextra.py for extra graphing functions
'''
home = os.getenv('HOME')
if home is not None:
extra = os.path.join(home, '.pymavlink', 'mavextra.py')
if os.path.exists(extra):
import imp
mavuser = imp.load_source('pymavlink.mavuser', extra)
from pymavlink.mavuser import *
def velocity_error(timestamps, vel, gaccel, accel_indexes, imu_dt, shift=0):
'''return summed velocity error'''
sum = 0
count = 0
for i in range(0, len(vel)-1):
dv = vel[i+1] - vel[i]
da = Vector3()
for idx in range(1+accel_indexes[i]-shift, 1+accel_indexes[i+1]-shift):
da += gaccel[idx]
dt1 = timestamps[i+1] - timestamps[i]
dt2 = (accel_indexes[i+1] - accel_indexes[i]) * imu_dt
da *= imu_dt
da *= dt1/dt2
#print(accel_indexes[i+1] - accel_indexes[i])
ex = abs(dv.x - da.x)
ey = abs(dv.y - da.y)
sum += 0.5*(ex+ey)
count += 1
if count == 0:
return None
return sum/count
def gps_lag(logfile):
'''work out gps velocity lag times for a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename)
timestamps = []
vel = []
gaccel = []
accel_indexes = []
ATT = None
IMU = None
dtsum = 0
dtcount = 0
while True:
m = mlog.recv_match(type=['GPS','IMU','ATT'])
if m is None:
break
t = m.get_type()
if t == 'GPS' and m.Status==3 and m.Spd>args.minspeed:
v = Vector3(m.Spd*cos(radians(m.GCrs)), m.Spd*sin(radians(m.GCrs)), m.VZ)
vel.append(v)
timestamps.append(m._timestamp)
accel_indexes.append(max(len(gaccel)-1,0))
elif t == 'ATT':
ATT = m
elif t == 'IMU':
if ATT is not None:
gaccel.append(earth_accel_df(m, ATT))
if IMU is not None:
dt = m._timestamp - IMU._timestamp
dtsum += dt
dtcount += 1
IMU = m
imu_dt = dtsum / dtcount
print("Loaded %u samples imu_dt=%.3f" % (len(vel), imu_dt))
besti = -1
besterr = 0
delays = []
errors = []
for i in range(0,100):
err = velocity_error(timestamps, vel, gaccel, accel_indexes, imu_dt, shift=i)
if err is None:
break
errors.append(err)
delays.append(i*imu_dt)
if besti == -1 or err < besterr:
besti = i
besterr = err
print("Best %u (%.3fs) %f" % (besti, besti*imu_dt, besterr))
if args.plot:
import matplotlib.pyplot as plt
plt.plot(delays, errors, 'bo-')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,y2))
plt.ylabel('Error')
plt.xlabel('Delay(s)')
plt.show()
for filename in args.logs:
gps_lag(filename)
| lgpl-3.0 |
yl565/statsmodels | statsmodels/examples/ex_lowess.py | 34 | 2827 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:26:06 2011
Author: Chris Jordan Squire
extracted from test suite by josef-pktd
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
# this is just to check direct import
import statsmodels.nonparametric.smoothers_lowess
statsmodels.nonparametric.smoothers_lowess.lowess
x = np.arange(20.)
#standard normal noise
noise = np.array([-0.76741118, -0.30754369,
0.39950921, -0.46352422, -1.67081778,
0.6595567 , 0.66367639, -2.04388585,
0.8123281 , 1.45977518,
1.21428038, 1.29296866, 0.78028477,
-0.2402853 , -0.21721302,
0.24549405, 0.25987014, -0.90709034,
-1.45688216, -0.31780505])
y = x + noise
expected_lowess = np.array([[ 0. , -0.58337912],
[ 1. , 0.61951246],
[ 2. , 1.82221628],
[ 3. , 3.02536876],
[ 4. , 4.22667951],
[ 5. , 5.42387723],
[ 6. , 6.60834945],
[ 7. , 7.7797691 ],
[ 8. , 8.91824348],
[ 9. , 9.94997506],
[ 10. , 10.89697569],
[ 11. , 11.78746276],
[ 12. , 12.62356492],
[ 13. , 13.41538492],
[ 14. , 14.15745254],
[ 15. , 14.92343948],
[ 16. , 15.70019862],
[ 17. , 16.48167846],
[ 18. , 17.26380699],
[ 19. , 18.0466769 ]])
actual_lowess = lowess(y, x)
print(actual_lowess)
print(np.max(np.abs(actual_lowess-expected_lowess)))
plt.plot(y, 'o')
plt.plot(actual_lowess[:,1])
plt.plot(expected_lowess[:,1])
import os.path
import statsmodels.nonparametric.tests.results
rpath = os.path.split(statsmodels.nonparametric.tests.results.__file__)[0]
rfile = os.path.join(rpath, 'test_lowess_frac.csv')
test_data = np.genfromtxt(open(rfile, 'rb'),
delimiter = ',', names = True)
expected_lowess_23 = np.array([test_data['x'], test_data['out_2_3']]).T
expected_lowess_15 = np.array([test_data['x'], test_data['out_1_5']]).T
actual_lowess_23 = lowess(test_data['y'], test_data['x'] ,frac = 2./3)
actual_lowess_15 = lowess(test_data['y'], test_data['x'] ,frac = 1./5)
#plt.show()
| bsd-3-clause |
hitszxp/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
brad-kaiser/spark | examples/src/main/python/sql/arrow.py | 13 | 3997 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Arrow in Spark.
Run with:
./bin/spark-submit examples/src/main/python/sql/arrow.py
"""
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.sql.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
def dataframe_with_arrow_example(spark):
# $example on:dataframe_with_arrow$
import numpy as np
import pandas as pd
# Enable Arrow-based columnar data transfers
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# Generate a Pandas DataFrame
pdf = pd.DataFrame(np.random.rand(100, 3))
# Create a Spark DataFrame from a Pandas DataFrame using Arrow
df = spark.createDataFrame(pdf)
# Convert the Spark DataFrame back to a Pandas DataFrame using Arrow
result_pdf = df.select("*").toPandas()
# $example off:dataframe_with_arrow$
print("Pandas DataFrame result statistics:\n%s\n" % str(result_pdf.describe()))
def scalar_pandas_udf_example(spark):
# $example on:scalar_pandas_udf$
import pandas as pd
from pyspark.sql.functions import col, pandas_udf
from pyspark.sql.types import LongType
# Declare the function and create the UDF
def multiply_func(a, b):
return a * b
multiply = pandas_udf(multiply_func, returnType=LongType())
# The function for a pandas_udf should be able to execute with local Pandas data
x = pd.Series([1, 2, 3])
print(multiply_func(x, x))
# 0 1
# 1 4
# 2 9
# dtype: int64
# Create a Spark DataFrame, 'spark' is an existing SparkSession
df = spark.createDataFrame(pd.DataFrame(x, columns=["x"]))
# Execute function as a Spark vectorized UDF
df.select(multiply(col("x"), col("x"))).show()
# +-------------------+
# |multiply_func(x, x)|
# +-------------------+
# | 1|
# | 4|
# | 9|
# +-------------------+
# $example off:scalar_pandas_udf$
def grouped_map_pandas_udf_example(spark):
# $example on:grouped_map_pandas_udf$
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
@pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP)
def substract_mean(pdf):
# pdf is a pandas.DataFrame
v = pdf.v
return pdf.assign(v=v - v.mean())
df.groupby("id").apply(substract_mean).show()
# +---+----+
# | id| v|
# +---+----+
# | 1|-0.5|
# | 1| 0.5|
# | 2|-3.0|
# | 2|-1.0|
# | 2| 4.0|
# +---+----+
# $example off:grouped_map_pandas_udf$
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Arrow-in-Spark example") \
.getOrCreate()
print("Running Pandas to/from conversion example")
dataframe_with_arrow_example(spark)
print("Running pandas_udf scalar example")
scalar_pandas_udf_example(spark)
print("Running pandas_udf grouped map example")
grouped_map_pandas_udf_example(spark)
spark.stop()
| apache-2.0 |
amaggi/bda | plotall_distributions.py | 1 | 1517 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm, beta
NPTS = 100
COLS = ['red', 'orange', 'yellow', 'green', 'blue', 'violet']
# normal distribution
mu_vals = [-1, 0, +1]
sig_vals = [0.5, 1, 2]
fig, axes = plt.subplots(1, 2)
plt.sca(axes[0])
n_vals = len(mu_vals)
for i in xrange(n_vals):
rv = norm(mu_vals[i], 1)
x = np.linspace(rv.ppf(0.001), rv.ppf(0.999), NPTS)
plt.title('Norm, sigma = 1')
plt.plot(x, rv.pdf(x), color=COLS[i], label='mu=%d'%mu_vals[i])
plt.legend()
plt.sca(axes[1])
n_vals = len(sig_vals)
for i in xrange(n_vals):
rv = norm(0, sig_vals[i])
x = np.linspace(rv.ppf(0.001), rv.ppf(0.999), NPTS)
plt.title('Norm, mu = 0')
plt.plot(x, rv.pdf(x), color=COLS[i], label='sigma=%d'%sig_vals[i])
plt.legend()
plt.savefig('norm.png')
plt.close()
# beta distribution
a_vals = [0.9, 1., 2., 3., 4.]
b_vals = [0.9, 1., 2., 3., 4.]
fig, axes = plt.subplots(1, 2)
plt.sca(axes[0])
n_vals = len(a_vals)
for i in xrange(n_vals):
rv = beta(a_vals[i], 1)
x = np.linspace(rv.ppf(0.001), rv.ppf(0.999), NPTS)
plt.title('Beta, b = 1')
plt.plot(x, rv.pdf(x), color=COLS[i], label='a=%.1f'%a_vals[i])
plt.legend()
plt.sca(axes[1])
n_vals = len(b_vals)
for i in xrange(n_vals):
rv = beta(4, b_vals[i])
x = np.linspace(rv.ppf(0.001), rv.ppf(0.999), NPTS)
plt.title('Beta, a = 4')
plt.plot(x, rv.pdf(x), color=COLS[i], label='b=%.1f'%b_vals[i])
plt.legend()
plt.savefig('beta.png')
plt.close()
| gpl-2.0 |
dhuang/incubator-airflow | airflow/hooks/dbapi_hook.py | 2 | 9351 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
from past.builtins import basestring
from datetime import datetime
from contextlib import closing
import sys
from sqlalchemy import create_engine
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
return '{conn.conn_type}://{login}{host}/{conn.schema}'.format(
conn=conn, login=login, host=host)
def get_sqlalchemy_engine(self, engine_kwargs=None):
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, basestring):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if sys.version_info[0] < 3:
s = s.encode('utf-8')
self.log.info(s)
if parameters is not None:
cur.execute(s, parameters)
else:
cur.execute(s)
conn.commit()
def set_autocommit(self, conn, autocommit):
conn.autocommit = autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
l = []
for cell in row:
l.append(self._serialize_cell(cell, conn))
values = tuple(l)
placeholders = ["%s",]*len(values)
sql = "INSERT INTO {0} {1} VALUES ({2});".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded {i} into {table} rows so far".format(**locals())
)
conn.commit()
self.log.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
Ecogenomics/GroopM | groopm/ellipsoid.py | 1 | 15293 | #!/usr/bin/python
###############################################################################
# #
# ellipsoid.py #
# #
# Playing with ellipses! #
# #
# Copyright (C) Michael Imelfort #
# #
###############################################################################
# #
# .d8888b. 888b d888 #
# d88P Y88b 8888b d8888 #
# 888 888 88888b.d88888 #
# 888 888d888 .d88b. .d88b. 88888b. 888Y88888P888 #
# 888 88888 888P" d88""88b d88""88b 888 "88b 888 Y888P 888 #
# 888 888 888 888 888 888 888 888 888 888 Y8P 888 #
# Y88b d88P 888 Y88..88P Y88..88P 888 d88P 888 " 888 #
# "Y8888P88 888 "Y88P" "Y88P" 88888P" 888 888 #
# 888 #
# 888 #
# 888 #
# #
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = "Michael Imelfort"
__copyright__ = "Copyright 2012/2013"
__credits__ = ["Michael Imelfort"]
__license__ = "GPL3"
__maintainer__ = "Michael Imelfort"
__email__ = "[email protected]"
###############################################################################
import matplotlib.pyplot as plt
import numpy as np
from numpy import linalg
np.seterr(all='raise')
###############################################################################
###############################################################################
###############################################################################
###############################################################################
class EllipsoidTool:
"""Some stuff for playing with ellipsoids"""
def __init__(self): pass
def getMinVolEllipse(self, P, tolerance=0.01, retA=False):
""" Find the minimum volume ellipsoid which holds all the points
Based on work by Nima Moshtagh
http://www.mathworks.com/matlabcentral/fileexchange/9542
and also by looking at:
http://cctbx.sourceforge.net/current/python/scitbx.math.minimum_covering_ellipsoid.html
Which is based on the first reference anyway!
Here, P is a numpy array of 3D points like this:
P = [[x,y,z],
[x,y,z],
[x,y,z]]
Returns:
(center, radii, rotation)
"""
(N, d) = np.shape(P)
# Q will be out working array
Q = np.copy(P.T)
Q = np.vstack([Q, np.ones(N)])
QT = Q.T
# initializations
err = 1 + tolerance
u = np.array([1.0 / N for i in range(N)]) # first iteration
# Khachiyan Algorithm
singular = False
while err > tolerance:
V = np.dot(Q, np.dot(np.diag(u), QT))
try:
M = np.diag(np.dot(QT , np.dot(linalg.inv(V), Q))) # M the diagonal vector of an NxN matrix
except linalg.linalg.LinAlgError:
# most likely a singular matrix
# permute the values a little and then we'll try again
from random import random, randint
PP = np.copy(P)
for i in range(N):
if randint(0,3) == 0:
j = randint(0,2)
if randint(0,1) != 0:
PP[i,j] += random()
else:
PP[i,j] -= random()
(A, center, radii, rotation) = self.getMinVolEllipse(PP, retA=True)
singular = True
break
j = np.argmax(M)
maximum = M[j]
step_size = (maximum - d - 1.0) / ((d + 1.0) * (maximum - 1.0))
new_u = (1.0 - step_size) * u
new_u[j] += step_size
err = np.linalg.norm(new_u - u)
u = new_u
if not singular:
# center of the ellipse
center = np.dot(P.T, u)
# the A matrix for the ellipse
try:
A = linalg.inv(
np.dot(P.T, np.dot(np.diag(u), P)) -
np.array([[a * b for b in center] for a in center])
) / d
except linalg.linalg.LinAlgError:
# the matrix is singular so we need to return a degenerate ellipse
#print '[Notice] Degenerate ellipse constructed indicating a bin with extremely small coverage divergence.'
center = np.mean(P, axis=0)
radii = np.max(P,axis=0) - np.min(P, axis=0)
if len(P[0]) == 3:
rotation = [[0,0,0],[0,0,0],[0,0,0]]
else:
rotation = [[0,0],[0,0]]
if retA:
return (None, center, radii, rotation)
else:
return (center, radii, rotation)
# Get the values we'd like to return
try:
U, s, rotation = linalg.svd(A)
radii = 1.0/np.sqrt(s)
except np.linalg.linalg.LinAlgError:
# hack -> better than crashing...
rotation = np.eye(3)
radii = np.ones(3)
else:
# hack -> better than crashing...
rotation = np.eye(3)
radii = np.ones(3)
if retA:
return (A, center, radii, rotation)
else:
return (center, radii, rotation)
def getEllipsoidVolume(self, radii):
"""Calculate the volume of the blob"""
if len(radii) == 2:
return np.pi*radii[0]*radii[1]
else:
return (4.0/3.0)*np.pi*radii[0]*radii[1]*radii[2]
def doesIntersect3D(self, A, cA, B, cB):
"""Rough test to see if ellipsoids A and B intersect
Not perfect, should work for "well overlapping" ones
We assume that the volume of B is less than (or =) volume of A
"""
#To make things simple, we just check if the points on a wire frame of
#B lie within A
# Quick check if the centre of B is within ellipse A. This deals with
# degenerate cases where B is only a single point or an otherwise
# degenerate ellipse.
p_c = cB - cA
try:
if np.dot(p_c.T, np.dot(A, p_c)) <= 1:
return True
except (TypeError, ValueError):
return False
if A is None or B is None: # degenerate ellipse that can't be processed
return False
U, s, rotation = linalg.svd(B)
try:
radii_B = 1.0/np.sqrt(s)
except FloatingPointError:
# the given matrix B was made on a group of only one point
# we need only check if the one point (the center)
# in in A
p_c = cB - cA
return np.dot(p_c.T, np.dot(A, p_c)) <= 1
u = np.linspace(0.0, 2.0 * np.pi, 100)
v = np.linspace(0.0, np.pi, 100)
# cartesian coordinates that correspond to the spherical angles:
x = radii_B[0] * np.outer(np.cos(u), np.sin(v))
y = radii_B[1] * np.outer(np.sin(u), np.sin(v))
z = radii_B[2] * np.outer(np.ones_like(u), np.cos(v))
# rotate accordingly
for i in range(len(x)):
for j in range(len(x)):
# make a point on the wireFrame
wire_point = np.dot([x[i,j],y[i,j],z[i,j]], rotation) + cB
# test if it's inside
# work out (p-c)'A(p-c) and see if it's <= 1
p_c = wire_point - cA
if np.dot(p_c.T, np.dot(A, p_c)) <= 1:
return True
return False
def doesIntersect2D(self, A, cA, B, cB):
"""Rough test to see if ellipsoids A and B intersect
Not perfect, should work for "well overlapping" ones
We assume that the volume of B is less than (or =) volume of A
"""
#To make things simple, we just check if the points on a wire frame of
#B lie within A
# Quick check if the centre of B is within ellipse A. This deals with
# degenerate cases where B is only a single point or an otherwise
# degenerate ellipse.
p_c = cB - cA
if np.dot(p_c.T, np.dot(A, p_c)) <= 1:
return True
if A == None or B == None: # degenerate ellipse that can't be processed
return False
U, s, rotation = linalg.svd(B)
try:
radii_B = 1.0/np.sqrt(s)
except FloatingPointError:
# the given matrix B was made on a group of only one point
# we need only check if the one point (the center)
# in in A
p_c = cB - cA
return np.dot(p_c.T, np.dot(A, p_c)) <= 1
u = np.linspace(0.0, 2.0 * np.pi, 100)
# cartesian coordinates that correspond to the spherical angles:
x = radii_B[0] * np.cos(u)
y = radii_B[1] * np.sin(u)
# rotate accordingly
for i in range(len(x)):
# make a point on the wireFrame
edge_point = np.dot([x[i],y[i]], rotation) + cB
# test if it's inside
# work out (p-c)'A(p-c) and see if it's <= 1
p_c = edge_point - cA
if np.dot(p_c.T, np.dot(A, p_c)) <= 1:
return True
return False
def plotEllipsoid(self, center, radii, rotation, ax=None, plotAxes=False, cageColor='b', cageAlpha=0.2, label=None):
"""Plot an ellipsoid"""
make_ax = ax == None
if make_ax:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
u = np.linspace(0.0, 2.0 * np.pi, 100)
v = np.linspace(0.0, np.pi, 100)
# cartesian coordinates that correspond to the spherical angles:
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
# rotate accordingly
for i in range(len(x)):
for j in range(len(x)):
[x[i,j],y[i,j],z[i,j]] = np.dot([x[i,j],y[i,j],z[i,j]], rotation) + center
if plotAxes:
# make some purdy axes
axes = np.array([[radii[0],0.0,0.0],
[0.0,radii[1],0.0],
[0.0,0.0,radii[2]]])
# rotate accordingly
for i in range(len(axes)):
axes[i] = np.dot(axes[i], rotation)
# plot axes
for p in axes:
X3 = np.linspace(-p[0], p[0], 100) + center[0]
Y3 = np.linspace(-p[1], p[1], 100) + center[1]
Z3 = np.linspace(-p[2], p[2], 100) + center[2]
ax.plot(X3, Y3, Z3, color=cageColor)
# plot ellipsoid
ax.plot_wireframe(x, y, z, rstride=4, cstride=4, color=cageColor, alpha=cageAlpha)
if label is not None:
ax.text(center[0],
center[1],
center[2],
label,
color=[0,0,0],
weight='bold'
)
if make_ax:
plt.show()
plt.close(fig)
del fig
def plotEllipse(self, center, radii, rotation, ax=None, plotAxes=False, cageColor='b', cageAlpha=0.2, label=None, linewidth=-1):
"""plot an ellipse"""
make_ax = ax == None
if make_ax:
fig = plt.figure()
ax = fig.add_subplot(111)
u = np.linspace(0.0, 2.0 * np.pi, 100)
# cartesian coordinates that correspond to the spherical angles:
x = radii[0] * np.cos(u)
y = radii[1] * np.sin(u)
# rotate accordingly
for i in range(len(x)):
[x[i],y[i]] = np.dot([x[i],y[i]], rotation) + center
if plotAxes:
# make some purdy axes
axes = np.array([[radii[0],0.0],[0.0,radii[1]]])
# rotate accordingly
for i in range(len(axes)):
axes[i] = np.dot(axes[i], rotation)
# plot axes
for p in axes:
X3 = np.linspace(-p[0], p[0], 100) + center[0]
Y3 = np.linspace(-p[1], p[1], 100) + center[1]
ax.plot(X3, Y3, color=cageColor)
# plot ellipsoid
if linewidth == -1:
ax.plot(x, y, color=cageColor, alpha=cageAlpha)
else:
ax.plot(x, y, color=cageColor, alpha=cageAlpha, linewidth=linewidth, zorder = 10)
if label is not None:
ax.text(center[0],
center[1],
label,
color=[0,0,0],
weight='bold'
)
if make_ax:
plt.show()
plt.close(fig)
del fig
###############################################################################
###############################################################################
###############################################################################
###############################################################################
| gpl-3.0 |
toastedcornflakes/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
MJuddBooth/pandas | pandas/core/frame.py | 1 | 295066 | # pylint: disable=E1101
# pylint: disable=W0212,W0703,W0622
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
import collections
from collections import OrderedDict
import functools
import itertools
import sys
import warnings
from textwrap import dedent
import numpy as np
import numpy.ma as ma
from pandas._libs import lib, algos as libalgos
from pandas.util._decorators import (Appender, Substitution,
rewrite_axis_style_signature,
deprecate_kwarg)
from pandas.util._validators import (validate_bool_kwarg,
validate_axis_style_args)
from pandas import compat
from pandas.compat import (range, map, zip, lmap, lzip, StringIO, u,
PY36, raise_with_traceback, Iterator,
string_and_binary_types)
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.cast import (
maybe_upcast,
cast_scalar_to_array,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_infer_to_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
invalidate_string_dtypes,
coerce_to_dtypes,
maybe_upcast_putmask,
find_common_type)
from pandas.core.dtypes.common import (
is_dict_like,
is_datetime64tz_dtype,
is_object_dtype,
is_extension_type,
is_extension_array_dtype,
is_datetime64_any_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
infer_dtype_from_object,
ensure_float64,
ensure_int64,
ensure_platform_int,
is_list_like,
is_nested_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass, ABCMultiIndex
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms
from pandas.core import common as com
from pandas.core import nanops
from pandas.core import ops
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.arrays.datetimelike import (
DatetimeLikeArrayMixin as DatetimeLikeArray
)
from pandas.core.config import get_option
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, ensure_index,
ensure_index_from_sequences)
from pandas.core.indexes import base as ibase
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import BlockManager
from pandas.core.internals.construction import (
masked_rec_array_to_mgr, get_names_from_index, to_arrays,
reorder_arrays, init_ndarray, init_dict,
arrays_to_mgr, sanitize_index)
from pandas.core.series import Series
from pandas.io.formats import console
from pandas.io.formats import format as fmt
from pandas.io.formats.printing import pprint_thing
import pandas.plotting._core as gfx
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
axis="""axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
optional_by="""
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels
.. versionchanged:: 0.23.0
Allow specifying index or column level names.""",
versionadded_to_excel='',
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right
side, respectively. To raise an exception on overlapping columns use
(False, False).
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 0.21.0
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
"""
Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
DataFrame.from_items : From sequence of (key, value) pairs
read_csv, pandas.read_table, pandas.read_clipboard.
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
_deprecations = NDFrame._deprecations | frozenset(
['get_value', 'set_value', 'from_csv', 'from_items'])
_accessors = set()
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
# ----------------------------------------------------------------------
# Constructors
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
mgr = init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None) is not None:
mgr = init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif (isinstance(data, compat.Iterable)
and not isinstance(data, string_and_binary_types)):
if not isinstance(data, compat.Sequence):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = init_dict({}, index, columns, dtype=dtype)
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: {e}'.format(e=e))
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array((len(index), len(columns)),
data, dtype=dtype)
mgr = init_ndarray(values, index, columns,
dtype=values.dtype, copy=False)
else:
raise ValueError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
# ----------------------------------------------------------------------
@property
def axes(self):
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self):
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._data.any_extension_types:
return len({block.dtype for block in self._data.blocks}) == 1
else:
return not self._data.is_mixed_type
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case off non-interactive session, no boundaries apply.
`ignore_width` is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if (get_option('display.width') is not None or
console.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(l) for l in value.split('\n'))
return repr_width < width
def _info_repr(self):
"""
True if the repr should show the info view.
"""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
@Substitution(header='Write out the column names. If a list of strings '
'is given, it is assumed to be aliases for the '
'column names')
@Substitution(shared_params=fmt.common_docstring,
returns=fmt.return_docstring)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
line_width=line_width)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
# ----------------------------------------------------------------------
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
def iteritems(self):
r"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content, sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
See Also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Yields
-------
collections.namedtuple
Yields a namedtuple for each row in the DataFrame with the first
field possibly being the index and following fields being the
column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.iteritems : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name, fields, rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
items = iteritems
def __len__(self):
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
def dot(self, other):
"""
Compute the matrix mutiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Serie. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, '
'{s} vs {r}'.format(s=lvals.shape,
r=rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: {oth}'.format(oth=type(other)))
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.T.dot(np.transpose(other)).T
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None, columns=None):
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == 'columns':
if columns is not None:
raise ValueError("cannot use columns parameter with "
"orient='columns'")
else: # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_numpy(self, dtype=None, copy=False):
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogenous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
result = np.array(self.values, dtype=dtype, copy=copy)
return result
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
dict, list or collections.Mapping
Return a collections.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning,
stacklevel=2)
# GH16122
into_c = com.standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
(k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
('data', [
list(map(com.maybe_box_datetimelike, t))
for t in self.itertuples(index=False, name=None)
])))
elif orient.lower().startswith('s'):
return into_c((k, com.maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
columns = self.columns.tolist()
rows = (dict(zip(columns, row))
for row in self.itertuples(index=False, name=None))
return [
into_c((k, com.maybe_box_datetimelike(v))
for k, v in compat.iteritems(row))
for row in rows]
elif orient.lower().startswith('i'):
if not self.index.is_unique:
raise ValueError(
"DataFrame index must be unique for orient='index'."
)
return into_c((t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None))
else:
raise ValueError("orient '{o}' not understood".format(o=orient))
def to_gbq(self, destination_table, project_id=None, chunksize=None,
reauth=False, if_exists='fail', auth_local_webserver=False,
table_schema=None, location=None, progress_bar=True,
credentials=None, verbose=None, private_key=None):
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
verbose : bool, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
return gbq.to_gbq(
self, destination_table, project_id=project_id,
chunksize=chunksize, reauth=reauth, if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema, location=location,
progress_bar=progress_bar, credentials=credentials,
verbose=verbose, private_key=private_key)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)]
for field in index]
result_index = ensure_index_from_sequences(index_data,
names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
.. deprecated:: 0.23.0
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = "<S{}".format(df.index.str.len().max())
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if convert_datetime64 is not None:
warnings.warn("The 'convert_datetime64' parameter is "
"deprecated and will be removed in a future "
"version",
FutureWarning, stacklevel=2)
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = (lmap(compat.text_type, index_names) +
lmap(compat.text_type, self.columns))
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype,
compat.string_types)):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = ("Invalid dtype {dtype} specified for "
"{element} {name}").format(dtype=dtype_mapping,
element=element, name=name)
raise ValueError(msg)
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""
Construct a DataFrame from a list of tuples.
.. deprecated:: 0.23.0
`from_items` is deprecated and will be removed in a future version.
Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>`
instead.
:meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>`
may be used to preserve the key order.
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
DataFrame
"""
warnings.warn("from_items is deprecated. Please use "
"DataFrame.from_dict(dict(items), ...) instead. "
"DataFrame.from_dict(OrderedDict(items)) may be used to "
"preserve the key order.",
FutureWarning, stacklevel=2)
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = ensure_index(keys)
arrays = values
# GH 17312
# Provide more informative error msg when scalar values passed
try:
return cls._from_arrays(arrays, columns, None)
except ValueError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = ensure_index(keys)
# GH 17312
# Provide more informative error msg when scalar values passed
try:
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
except TypeError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=None,
infer_datetime_format=False):
"""
Read CSV file.
.. deprecated:: 0.21.0
Use :func:`read_csv` instead.
It is preferable to use the more powerful :func:`read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format : boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
Returns
-------
DataFrame
See Also
--------
read_csv
"""
warnings.warn("from_csv is deprecated. Please use read_csv(...) "
"instead. Note that some of the default arguments are "
"different, so please refer to the documentation "
"for from_csv when changing your function calls",
FutureWarning, stacklevel=2)
from pandas.io.parsers import read_csv
return read_csv(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame.
Implement the sparse version of the DataFrame meaning that any data
matching a specific value it's omitted in the representation.
The sparse DataFrame allows for a more efficient storage.
Parameters
----------
fill_value : float, default None
The specific value that should be omitted in the representation.
kind : {'block', 'integer'}, default 'block'
The kind of the SparseIndex tracking where data is not equal to
the fill value:
- 'block' tracks only the locations and sizes of blocks of data.
- 'integer' keeps an array with all the locations of the data.
In most cases 'block' is recommended, since it's more memory
efficient.
Returns
-------
SparseDataFrame
The sparse representation of the DataFrame.
See Also
--------
DataFrame.to_dense :
Converts the DataFrame back to the its dense form.
Examples
--------
>>> df = pd.DataFrame([(np.nan, np.nan),
... (1., np.nan),
... (np.nan, 1.)])
>>> df
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
>>> sdf = df.to_sparse()
>>> sdf
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(sdf)
<class 'pandas.core.sparse.frame.SparseDataFrame'>
"""
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
.. deprecated:: 0.20.0
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
Panel
"""
raise NotImplementedError("Panel is being removed in pandas 0.25.0.")
@deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None, version=114,
convert_strl=None):
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
fname : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
version : {114, 117}, default 114
Version to use in the output dta file. Version 114 can be used
read by Stata 10 and later. Version 117 can be read by Stata 13
or later. Version 114 limits string variables to 244 characters or
fewer while 117 allows strings with lengths up to 2,000,000
characters.
.. versionadded:: 0.23.0
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
.. versionadded:: 0.23.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
kwargs = {}
if version not in (114, 117):
raise ValueError('Only formats 114 and 117 supported.')
if version == 114:
if convert_strl is not None:
raise ValueError('strl support is only available when using '
'format 117')
from pandas.io.stata import StataWriter as statawriter
else:
from pandas.io.stata import StataWriter117 as statawriter
kwargs['convert_strl'] = convert_strl
writer = statawriter(fname, self, convert_dates=convert_dates,
byteorder=byteorder, time_stamp=time_stamp,
data_label=data_label, write_index=write_index,
variable_labels=variable_labels, **kwargs)
writer.write_file()
def to_feather(self, fname):
"""
Write out the binary feather-format for DataFrames.
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname)
def to_parquet(self, fname, engine='auto', compression='snappy',
index=None, partition_cols=None, **kwargs):
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
fname : str
File path or Root Directory path. Will be used as Root Directory
path while writing a partitioned dataset.
.. versionchanged:: 0.24.0
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file. If ``None``,
the behavior depends on the chosen engine.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset
Columns are partitioned in the order they are given
.. versionadded:: 0.24.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
"""
from pandas.io.parquet import to_parquet
to_parquet(self, fname, engine,
compression=compression, index=index,
partition_cols=partition_cols, **kwargs)
@Substitution(header='Whether to print column labels, default True')
@Substitution(shared_params=fmt.common_docstring,
returns=fmt.return_docstring)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, max_rows=None,
max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False,
border=None, table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
.. versionadded:: 0.23.0
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if (justify is not None and
justify not in fmt._VALID_JUSTIFY_PARAMETERS):
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal, table_id=table_id,
render_links=render_links)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
# ----------------------------------------------------------------------
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
int_col 5 non-null int64
text_col 5 non-null object
float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index._summary())
if len(self.columns) == 0:
lines.append('Empty {name}'.format(name=type(self).__name__))
fmt.buffer_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max(len(pprint_thing(k)) for k in self.columns) + 4
counts = None
tmpl = "{count}{dtype}"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
'Columns must equal counts '
'({cols:d} != {counts:d})'.format(
cols=len(cols), counts=len(counts)))
tmpl = "{count} non-null {dtype}"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl.format(count=count,
dtype=dtype))
def _non_verbose_repr():
lines.append(self.columns._summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return ("{num:3.1f}{size_q} "
"{x}".format(num=num, size_q=size_qualifier, x=x))
num /= 1024.0
return "{num:3.1f}{size_q} {pb}".format(num=num,
size_q=size_qualifier,
pb='PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k
in sorted(compat.iteritems(counts))]
lines.append('dtypes: {types}'.format(types=', '.join(dtypes)))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: {mem}\n".format(
mem=_sizeof_fmt(mem_usage, size_qualifier)))
fmt.buffer_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True`` the memory usage of the
index the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 (1+0j) 1 True
1 1 1.0 (1+0j) 1 True
2 1 1.0 (1+0j) 1 True
3 1 1.0 (1+0j) 1 True
4 1 1.0 (1+0j) 1 True
>>> df.memory_usage()
Index 80
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 80
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5168
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
copy : bool, default False
If True, the underlying data is copied. Otherwise (default), no
copy is made if possible.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = com._unpickle_array(cols)
index = com._unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
# old unpickling
(vals, idx, cols), object_state = state
index = com._unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=com._unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=com._unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable:
series = self._iget_item_cache(col)
return com.maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except (TypeError, ValueError):
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
Returns
-------
DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object.
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
try:
if takeable is True:
series = self._iget_item_cache(col)
return series._set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
_set_value.__doc__ = set_value.__doc__
def _ixs(self, i, axis=0):
"""
Parameters
----------
i : int, slice, or sequence of integers
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self._take(i, axis=1)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._box_col_values(values, label)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = lib.item_from_zerodim(key)
key = com.apply_if_callable(key, self)
# shortcut if the key is in columns
try:
if self.columns.is_unique and key in self.columns:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
return self._get_item_cache(key)
except (TypeError, ValueError):
# The TypeError correctly catches non hashable "key" (e.g. list)
# The ValueError can be removed once GH #21729 is fixed
pass
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self._getitem_frame(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._convert_to_indexer(key, axis=1,
raise_missing=True)
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
data = data[key]
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take(indexer, axis=0)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == '':
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
.. versionadded:: 0.18.0
Returns
-------
DataFrame
DataFrame resulting from the provided query expression.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.query('A > B')
A B
4 5 2
The previous expression is equivalent to
>>> df[df.A > df.B]
A B
4 5 2
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
.. versionadded:: 0.18.0.
kwargs : dict
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, or pandas object
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
msg = 'object of type {typ!r} has no info axis'
raise TypeError(msg.format(typ=type(obj).__name__))
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(infer_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=(include & exclude)))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(idx, dtype):
return idx, functools.partial(issubclass, dtype.type)
for idx, f in itertools.starmap(is_dtype_instance_mapper,
enumerate(self.dtypes)):
if include: # checks for the case of empty include or exclude
include_these.iloc[idx] = any(map(f, include))
if exclude:
exclude_these.iloc[idx] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[_get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
"""
Provide boxed values for a column.
"""
klass = self._constructor_sliced
return klass(values, index=self.index, name=items, fastpath=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError(
'Array conditional must be same shape as self'
)
key = self._constructor(key, **self._construct_axes_dict())
if key.values.size and not is_bool_dtype(key.values):
raise TypeError(
'Must pass DataFrame or 2-d ndarray with boolean values only'
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError):
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
For Python 3.6 and above, later items in '\*\*kwargs' may refer to
newly created or modified columns in 'df'; items are computed and
assigned into 'df' in order. For Python 3.5 and below, the order of
keyword arguments is not specified, you cannot refer to newly created
or modified columns. All items are computed first, and then assigned
in alphabetical order.
.. versionchanged :: 0.23.0
Keyword argument order is maintained for Python 3.6 and later.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
In Python 3.6+, you can create multiple columns within the same assign
where one of the columns depends on another one defined within the same
assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
# >= 3.6 preserve order of kwargs
if PY36:
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
else:
# <= 3.5: do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com.apply_if_callable(v, data)
# <= 3.5 and earlier
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
numpy.ndarray
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = sanitize_index(value, self.index, copy=False)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
value = sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(
value, pandas_dtype=True)
# upcast
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_type(value) or is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
return {item: Series(self._data.iget(idx), index=self.index, name=item)
for idx, item in enumerate(self.columns)}
def lookup(self, row_labels, col_labels):
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = [df.get_value(row, col)
for row, col in zip(row_labels, col_labels)]
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, method, copy, level,
fill_value, limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, method, copy, level,
fill_value=None, limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature('labels', [('method', None),
('copy', True),
('level', None),
('fill_value', np.nan),
('limit', None),
('tolerance', None)])
def reindex(self, *args, **kwargs):
axes = validate_axis_style_args(self, args, kwargs, 'labels',
'reindex')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('labels', None)
return super(DataFrame, self).reindex(**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
def drop(self, labels=None, axis=0, index=None, columns=None,
level=None, inplace=False, errors='raise'):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
.. versionadded:: 0.21.0
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame
DataFrame without the removed index or column labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super(DataFrame, self).drop(labels=labels, axis=axis,
index=index, columns=columns,
level=level, inplace=inplace,
errors=errors)
@rewrite_axis_style_signature('mapper', [('copy', True),
('inplace', False),
('level', None),
('errors', 'ignore')])
def rename(self, *args, **kwargs):
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : int or str
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame
DataFrame with the renamed axis labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('mapper', None)
return super(DataFrame, self).rename(**kwargs)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.fillna.__doc__)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['replace'] % _shared_doc_kwargs)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad'):
return super(DataFrame, self).replace(to_replace=to_replace,
value=value, inplace=inplace,
limit=limit, regex=regex,
method=method)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis, fill_value=fill_value)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
err_msg = ('The parameter "keys" may be a column key, one-dimensional '
'array, or a list containing only valid column keys and '
'one-dimensional arrays.')
missing = []
for col in keys:
if isinstance(col, (ABCIndexClass, ABCSeries, np.ndarray,
list, Iterator)):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, 'ndim', 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError:
raise TypeError(err_msg + ' Received column of '
'type {}'.format(type(col)))
else:
if not found:
missing.append(col)
if missing:
raise KeyError('None of {} are in the columns'.format(missing))
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, ABCMultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, ABCMultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (ABCIndexClass, ABCSeries)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif isinstance(col, Iterator):
arrays.append(list(col))
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError('Length mismatch: Expected {len_self} rows, '
'received array of length {len_col}'.format(
len_self=len(self),
len_col=len(arrays[-1])
))
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError('Index has duplicate keys: {dup}'.format(
dup=duplicates))
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
# TODO(https://github.com/pandas-dev/pandas/issues/24206)
# Push this into maybe_upcast_putmask?
# We can't pass EAs there right now. Looks a bit
# complicated.
# So we unbox the ndarray_values, op, re-box.
values_type = type(values)
values_dtype = values.dtype
if issubclass(values_type, DatetimeLikeArray):
values = values._data
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
if issubclass(values_type, DatetimeLikeArray):
values = values_type(values, dtype=values_dtype)
return values
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.codes)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return super(DataFrame, self).isna()
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return super(DataFrame, self).isnull()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return super(DataFrame, self).notna()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return super(DataFrame, self).notnull()
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. deprecated:: 0.23.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
# GH20987
msg = ("supplying multiple axes to axis is deprecated and "
"will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated)._ndarray_values.nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return Series(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.iteritems()
if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = [self._get_label_or_level_values(x, axis=axis)
for x in by]
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_index.__doc__)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, "
"please use .sort_values(by=...)",
FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
if level is not None:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer(labels._get_codes_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def nlargest(self, n, columns, keep='first'):
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest()
def nsmallest(self, n, columns, keep='first'):
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 11300 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Nauru 11300 182 NR
Anguilla 11300 311 AI
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
DataFrame
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order. May not drop or
duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
if ops.should_series_dispatch(this, other, func):
# iterate over columns
return ops.dispatch_to_series(this, other, _arith_op)
else:
result = _arith_op(this.values, other.values)
return self._constructor(result,
index=new_index, columns=new_columns,
copy=False)
def _combine_match_index(self, other, func, level=None):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
assert left.index.equals(right.index)
if left._is_mixed_type or right._is_mixed_type:
# operate column-wise; avoid costly object-casting in `.values`
return ops.dispatch_to_series(left, right, func)
else:
# fastpath --> operate directly on values
with np.errstate(all="ignore"):
new_data = func(left.values.T, right.values).T
return self._constructor(new_data,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None):
assert isinstance(other, Series)
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
assert left.columns.equals(right.index)
return ops.dispatch_to_series(left, right, func, axis="columns")
def _combine_const(self, other, func):
assert lib.is_scalar(other) or np.ndim(other) == 0
return ops.dispatch_to_series(self, other, func)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)
def combine_first(self, other):
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def extract_values(arr):
# Does two things:
# 1. maybe gets the values from the Series / Index
# 2. convert datelike to i8
if isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr._values
if needs_i8_conversion(arr):
if is_extension_array_dtype(arr.dtype):
arr = arr.asi8
else:
arr = arr.view('i8')
return arr
def combiner(x, y):
mask = isna(x)
if isinstance(mask, (ABCIndexClass, ABCSeries)):
mask = mask._values
x_values = extract_values(x)
y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
@deprecate_kwarg(old_arg_name='raise_conflict', new_arg_name='errors',
mapping={False: 'ignore', True: 'raise'})
def update(self, other, join='left', overwrite=True, filter_func=None,
errors='ignore'):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ['ignore', 'raise']:
raise ValueError("The parameter errors must be either "
"'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if errors == 'raise':
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
_shared_docs['pivot'] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : string or object, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column to use to make new frame's columns.
values : string, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
.. versionchanged :: 0.23.0
Also accept list of column names.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution('')
@Appender(_shared_docs['pivot'])
def pivot(self, index=None, columns=None, values=None):
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs['pivot_table'] = """
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
margins_name : string, default 'All'
Name of the row / column that will contain the totals
when margins is True.
Returns
-------
DataFrame
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
We can also fill missing values using the `fill_value` parameter.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9.0 7.500000 6.0
small 5.500000 9.0 8.500000 8.0
foo large 2.000000 5.0 4.500000 4.0
small 2.333333 6.0 4.333333 2.0
"""
@Substitution('')
@Appender(_shared_docs['pivot_table'])
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
from pandas.core.reshape.pivot import pivot_table
return pivot_table(self, values=values, index=index, columns=columns,
aggfunc=aggfunc, fill_value=fill_value,
margins=margins, dropna=dropna,
margins_name=margins_name)
def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs['melt'] = ("""
Unpivot a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See Also
--------
%(other)s
pivot_table
DataFrame.pivot
Examples
--------
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
""")
@Appender(_shared_docs['melt'] %
dict(caller='df.melt(',
versionadded='.. versionadded:: 0.20.0\n',
other='melt'))
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from pandas.core.reshape.melt import melt
return melt(self, id_vars=id_vars, value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another
element in the DataFrame (default is the element in the same column
of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded:: 0.16.1.
Returns
-------
DataFrame
See Also
--------
Series.diff: First discrete difference for a Series.
DataFrame.pct_change: Percent change over given number of periods.
DataFrame.shift: Shift index by desired number of periods with an
optional time freq.
Examples
--------
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0.0 0.0
1 NaN -1.0 3.0
2 NaN -1.0 7.0
3 NaN -1.0 13.0
4 NaN 0.0 20.0
5 NaN 2.0 28.0
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(self,
key, # type: Union[str, List[str]]
ndim, # type: int
subset=None # type: Union[Series, DataFrame, None]
):
# type: (...) -> Union[Series, DataFrame]
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent("""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to ``numpy.mean(arr_2d,
axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
core.groupby.GroupBy : Perform operations over groups.
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
core.window.EWM : Perform operation over exponential weighted
window.
""")
_agg_examples_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
""")
@Substitution(see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs)
@Appender(_shared_docs['aggregate'])
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
result = None
try:
result, how = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, axis=0, *args, **kwargs):
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result, how = (super(DataFrame, self.T)
._aggregate(arg, *args, **kwargs))
result = result.T if result is not None else result
return result, how
return super(DataFrame, self)._aggregate(arg, *args, **kwargs)
agg = aggregate
@Appender(_shared_docs['transform'] % _shared_doc_kwargs)
def transform(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
if axis == 1:
return super(DataFrame, self.T).transform(func, *args, **kwargs).T
return super(DataFrame, self).transform(func, *args, **kwargs)
def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None,
result_type=None, args=(), **kwds):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
broadcast : bool, optional
Only relevant for aggregation functions:
* ``False`` or ``None`` : returns a Series whose length is the
length of the index or the number of columns (based on the
`axis` parameter)
* ``True`` : results will be broadcast to the original shape
of the frame, the original index and columns will be retained.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
raw : bool, default False
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
reduce : bool or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
`apply` will use `reduce` to determine whether the result
should be a Series or a DataFrame. If ``reduce=None`` (the
default), `apply`'s return value will be guessed by calling
`func` on an empty Series
(note: while guessing, exceptions raised by `func` will be
ignored).
If ``reduce=True`` a Series will always be returned, and if
``reduce=False`` a DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by ``result_type='reduce'``.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Notes
-----
In the current implementation apply calls `func` twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Retuning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing result_type='expand' will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
broadcast=broadcast,
raw=raw,
reduce=reduce,
result_type=result_type,
args=args,
kwds=kwds)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Notes
-----
In the current implementation applymap calls `func` twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.astype(object).values, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False,
verify_integrity=False, sort=None):
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default None
Sort columns if the columns of `self` and `other` are not aligned.
The default sorting is deprecated and will change to not-sorting
in a future version of pandas. Explicitly pass ``sort=True`` to
silence the warning and sort. Explicitly pass ``sort=False`` to
silence the warning and not sort.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
See Also
--------
concat : General function to concatenate DataFrame, Series
or Panel objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
from pandas.core.reshape.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator, validate=validate)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specfified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specfified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith
Series.corr
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(ensure_float64(mat),
minp=min_periods)
elif method == 'kendall' or callable(method):
if min_periods is None:
min_periods = 1
mat = ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError("method must be either 'pearson', "
"'spearman', or 'kendall', '{method}' "
"was supplied".format(method=method))
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.EWM.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False, method='pearson'):
"""
Compute pairwise correlation between rows or columns of DataFrame
with rows or columns of Series or DataFrame. DataFrames are first
aligned along both axes before computing the correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
-------
DataFrame.corr
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method),
axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
if axis == 1:
left = left.T
right = right.T
if method == 'pearson':
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ['kendall', 'spearman'] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = Series(map(c,
zip(left.values.T, right.values.T)),
index=left.columns)
else:
raise ValueError("Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable".
format(method=method))
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = (this._get_axis(raxis).
union(other._get_axis(raxis)))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff),
index=idx_diff))
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each **row**.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._data.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical "
"{ax}.".format(ax=self._get_axis_name(axis)))
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notna(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
level_codes = ensure_int64(count_axis.codes[level])
counts = lib.count_level_2d(mask, level_codes, len(level_index),
axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if axis is None and filter_type == 'bool':
labels = None
constructor = None
else:
# TODO: Make other agg func handle axis=None properly
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
constructor = self._constructor
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
# exclude timedelta/datetime unless we are uniform types
if (axis == 1 and self._is_datelike_mixed_type
and (not self._is_homogeneous_type
and not is_datetime64tz_dtype(self.dtypes[0]))):
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
if (filter_type == 'bool' and is_object_dtype(values) and
axis is None):
# work around https://github.com/numpy/numpy/issues/10489
# TODO: combine with hasattr(result, 'dtype') further down
# hard since we don't have `values` down there.
result = np.bool_(result)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
from pandas.core.apply import frame_apply
opa = frame_apply(self,
func=f,
result_type='expand',
ignore_failures=True)
result = opa.get_result()
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except Exception:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError(
"Handling exception with filter_type {f} not"
"implemented.".format(f=filter_type))
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
# GH 25101, # GH 24434
data = self._get_bool_data() if axis == 0 else self
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type {f}"
"not supported.".format(f=filter_type))
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notna(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
if constructor is not None:
result = Series(result, index=labels)
return result
def nunique(self, axis=0, dropna=True):
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of maxima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmax
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False, dropna=True):
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. The second row of species and legs contains ``NaN``,
because they have only one mode, but the DataFrame has two rows.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'} (default 0)
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 0.18.0
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format(
ax=axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format(
ax=axis))
return self._constructor(new_data)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", gfx.FramePlotMethods)
hist = gfx.hist_frame
boxplot = gfx.boxplot_frame
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0},
docs={
'index': 'The index (row labels) of the DataFrame.',
'columns': 'The column labels of the DataFrame.'})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame)
ops.add_special_arithmetic_methods(DataFrame)
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return u'{s}'.format(s=s)[:space].ljust(space)
| bsd-3-clause |
gef756/statsmodels | statsmodels/sandbox/examples/bayesprior.py | 33 | 7003 | #
# This script examines the predictive prior densities of two local level
# models given the same priors for parameters that appear to be the same.
# Reference: Del Negro and Schorfheide.
try:
import pymc
pymc_installed = 1
except:
print("pymc not imported")
pymc_installed = 0
from scipy.stats import gamma, beta, invgamma
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
from scipy.stats import rv_continuous
from scipy.special import gammaln, gammaincinv, gamma, gammainc
from numpy import log,exp
#np.random.seed(12345)
class igamma_gen(rv_continuous):
def _pdf(self, x, a, b):
return exp(self._logpdf(x,a,b))
def _logpdf(self, x, a, b):
return a*log(b) - gammaln(a) -(a+1)*log(x) - b/x
def _cdf(self, x, a, b):
return 1.0-gammainc(a,b/x) # why is this different than the wiki?
def _ppf(self, q, a, b):
return b/gammaincinv(a,1-q)
#NOTE: should be correct, work through invgamma example and 2 param inv gamma
#CDF
def _munp(self, n, a, b):
args = (a,b)
super(igamma_gen, self)._munp(self, n, *args)
#TODO: is this robust for differential entropy in this case? closed form or
#shortcuts in special?
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return val*log(val)
entr = -integrate.quad(integ, self.a, self.b)[0]
if not np.isnan(entr):
return entr
else:
raise ValueError("Problem with integration. Returned nan.")
igamma = igamma_gen(a=0.0, name='invgamma', longname="An inverted gamma",
shapes = 'a,b', extradoc="""
Inverted gamma distribution
invgamma.pdf(x,a,b) = b**a*x**(-a-1)/gamma(a) * exp(-b/x)
for x > 0, a > 0, b>0.
"""
)
#NOTE: the above is unnecessary. B takes the same role as the scale parameter
# in inverted gamma
palpha = np.random.gamma(400.,.005, size=10000)
print("First moment: %s\nSecond moment: %s" % (palpha.mean(),palpha.std()))
palpha = palpha[0]
prho = np.random.beta(49.5,49.5, size=1e5)
print("Beta Distribution")
print("First moment: %s\nSecond moment: %s" % (prho.mean(),prho.std()))
prho = prho[0]
psigma = igamma.rvs(1.,4.**2/2, size=1e5)
print("Inverse Gamma Distribution")
print("First moment: %s\nSecond moment: %s" % (psigma.mean(),psigma.std()))
# First do the univariate case
# y_t = theta_t + epsilon_t
# epsilon ~ N(0,1)
# Where theta ~ N(mu,lambda**2)
# or the model
# y_t = theta2_t + theta1_t * y_t-1 + epsilon_t
# Prior 1:
# theta1 ~ uniform(0,1)
# theta2|theta1 ~ N(mu,lambda**2)
# Prior 2:
# theta1 ~ U(0,1)
# theta2|theta1 ~ N(mu(1-theta1),lambda**2(1-theta1)**2)
draws = 400
# prior beliefs, from JME paper
mu_, lambda_ = 1.,2.
# Model 1
y1y2 = np.zeros((draws,2))
for draw in range(draws):
theta = np.random.normal(mu_,lambda_**2)
y1 = theta + np.random.normal()
y2 = theta + np.random.normal()
y1y2[draw] = y1,y2
# log marginal distribution
lnp1p2_mod1 = stats.norm.pdf(y1,loc=mu_, scale=lambda_**2+1)*\
stats.norm.pdf(y2,mu_,scale=lambda_**2+1)
# Model 2
pmu_pairsp1 = np.zeros((draws,2))
y1y2pairsp1 = np.zeros((draws,2))
# prior 1
for draw in range(draws):
theta1 = np.random.uniform(0,1)
theta2 = np.random.normal(mu_, lambda_**2)
# mu = theta2/(1-theta1)
#don't do this to maintain independence theta2 is the _location_
# y1 = np.random.normal(mu_, lambda_**2)
y1 = theta2
# pmu_pairsp1[draw] = mu, theta1
pmu_pairsp1[draw] = theta2, theta1 # mean, autocorr
y2 = theta2 + theta1 * y1 + np.random.normal()
y1y2pairsp1[draw] = y1,y2
# for a = 0, b = 1 - epsilon = .99999
# mean of u is .5*.99999
# variance is 1./12 * .99999**2
# Model 2
pmu_pairsp2 = np.zeros((draws,2))
y1y2pairsp2 = np.zeros((draws,2))
# prior 2
theta12_2 = []
for draw in range(draws):
# y1 = np.random.uniform(-4,6)
theta1 = np.random.uniform(0,1)
theta2 = np.random.normal(mu_*(1-theta1), lambda_**2*(1-theta1)**2)
theta12_2.append([theta1,theta2])
mu = theta2/(1-theta1)
y1 = np.random.normal(mu_,lambda_**2)
y2 = theta2 + theta1 * y1 + np.random.normal()
pmu_pairsp2[draw] = mu, theta1
y1y2pairsp2[draw] = y1,y2
fig = plt.figure()
fsp = fig.add_subplot(221)
fsp.scatter(pmu_pairsp1[:,0], pmu_pairsp1[:,1], color='b', facecolor='none')
fsp.set_ylabel('Autocorrelation (Y)')
fsp.set_xlabel('Mean (Y)')
fsp.set_title('Model 2 (P1)')
fsp.axis([-20,20,0,1])
fsp = fig.add_subplot(222)
fsp.scatter(pmu_pairsp2[:,0],pmu_pairsp2[:,1], color='b', facecolor='none')
fsp.set_title('Model 2 (P2)')
fsp.set_ylabel('Autocorrelation (Y)')
fsp.set_xlabel('Mean (Y)')
fsp.set_title('Model 2 (P2)')
fsp.axis([-20,20,0,1])
fsp = fig.add_subplot(223)
fsp.scatter(y1y2pairsp1[:,0], y1y2pairsp1[:,1], color='b', marker='o',
facecolor='none')
fsp.scatter(y1y2[:,0], y1y2[:,1], color ='g', marker='+')
fsp.set_title('Model 1 vs. Model 2 (P1)')
fsp.set_ylabel('Y(2)')
fsp.set_xlabel('Y(1)')
fsp.axis([-20,20,-20,20])
fsp = fig.add_subplot(224)
fsp.scatter(y1y2pairsp2[:,0], y1y2pairsp2[:,1], color='b', marker='o')
fsp.scatter(y1y2[:,0], y1y2[:,1], color='g', marker='+')
fsp.set_title('Model 1 vs. Model 2 (P2)')
fsp.set_ylabel('Y(2)')
fsp.set_xlabel('Y(1)')
fsp.axis([-20,20,-20,20])
#plt.show()
#TODO: this doesn't look the same as the working paper?
#NOTE: but it matches the language? I think mine is right!
# Contour plots.
# on the basis of observed data. ie., the mgrid
#np.mgrid[6:-4:10j,-4:6:10j]
# Example 2:
# 2 NK Phillips Curves
# Structural form
# M1: y_t = 1/alpha *E_t[y_t+1] + mu_t
# mu_t = p1 * mu_t-1 + epsilon_t
# epsilon_t ~ N(0,sigma2)
# Reduced form Law of Motion
# M1: y_t = p1*y_t-1 + 1/(1-p1/alpha)*epsilon_t
# specify prior for M1
# for i = 1,2
# theta_i = [alpha
# p_i
# sigma]
# truncate effective priors by the determinancy region
# for determinancy we need alpha > 1
# p in [0,1)
# palpha ~ Gamma(2.00,.10)
# mean = 2.00
# std = .1 which implies k = 400, theta = .005
palpha = np.random.gamma(400,.005)
# pi ~ Beta(.5,.05)
pi = np.random.beta(49.5, 49.5)
# psigma ~ InvGamma(1.00,4.00)
#def invgamma(a,b):
# return np.sqrt(b*a**2/np.sum(np.random.random(b,1)**2, axis=1))
#NOTE: Use inverse gamma distribution igamma
psigma = igamma.rvs(1.,4.0, size=1e6) #TODO: parameterization is not correct vs.
# Del Negro and Schorfheide
if pymc_installed:
psigma2 = pymc.rinverse_gamma(1.,4.0, size=1e6)
else:
psigma2 = stats.invgamma.rvs(1., scale=4.0, size=1e6)
nsims = 500
y = np.zeros((nsims))
#for i in range(1,nsims):
# y[i] = .9*y[i-1] + 1/(1-p1/alpha) + np.random.normal()
#Are these supposed to be sampled jointly?
# InvGamma(sigma|v,s) propto sigma**(-v-1)*e**(-vs**2/2*sigma**2)
#igamma =
# M2: y_t = 1/alpha * E_t[y_t+1] + p2*y_t-1 + mu_t
# mu_t ~ epsilon_t
# epsilon_t ~ n(0,sigma2)
# Reduced form Law of Motion
# y_t = 1/2 (alpha-sqrt(alpha**2-4*p2*alpha)) * y_t-1 + 2*alpha/(alpha + \
# sqrt(alpha**2 - 4*p2*alpha)) * epsilon_t
| bsd-3-clause |
loli/semisupervisedforests | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
Og192/Python | sklearnLearning/statisticalAndSupervisedLearning/adaboost.py | 2 | 2852 | print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show() | gpl-2.0 |
kknox/clBLAS | src/scripts/perf/plotPerformance.py | 16 | 12286 | # ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
# to use this script, you will need to download and install the 32-BIT VERSION of:
# - Python 2.7 x86 (32-bit) - http://www.python.org/download/releases/2.7.1
#
# you will also need the 32-BIT VERSIONS of the following packages as not all the packages are available in 64bit at the time of this writing
# The ActiveState python distribution is recommended for windows
# (make sure to get the python 2.7-compatible packages):
# - NumPy 1.5.1 (32-bit, 64-bit unofficial, supports Python 2.4 - 2.7 and 3.1 - 3.2.) - http://sourceforge.net/projects/numpy/files/NumPy/
# - matplotlib 1.0.1 (32-bit & 64-bit, supports Python 2.4 - 2.7) - http://sourceforge.net/projects/matplotlib/files/matplotlib/
#
# For ActiveState Python, all that one should need to type is 'pypm install matplotlib'
import datetime
import sys
import argparse
import subprocess
import itertools
import os
import matplotlib
import pylab
from matplotlib.backends.backend_pdf import PdfPages
from blasPerformanceTesting import *
def plotGraph(dataForAllPlots, title, plottype, plotkwargs, xaxislabel, yaxislabel):
"""
display a pretty graph
"""
colors = ['k','y','m','c','r','b','g']
#plottype = 'plot'
for thisPlot in dataForAllPlots:
getattr(pylab, plottype)(thisPlot.xdata, thisPlot.ydata,
'{}.-'.format(colors.pop()),
label=thisPlot.label, **plotkwargs)
if len(dataForAllPlots) > 1:
pylab.legend(loc='best')
pylab.title(title)
pylab.xlabel(xaxislabel)
pylab.ylabel(yaxislabel)
pylab.grid(True)
if args.outputFilename == None:
# if no pdf output is requested, spit the graph to the screen . . .
pylab.show()
else:
pylab.savefig(args.outputFilename,dpi=(1024/8))
# . . . otherwise, gimme gimme pdf
#pdf = PdfPages(args.outputFilename)
#pdf.savefig()
#pdf.close()
######## plotFromDataFile() Function to plot from data file begins ########
def plotFromDataFile():
data = []
"""
read in table(s) from file(s)
"""
for thisFile in args.datafile:
if not os.path.isfile(thisFile):
print 'No file with the name \'{}\' exists. Please indicate another filename.'.format(thisFile)
quit()
results = open(thisFile, 'r')
results_contents = results.read()
results_contents = results_contents.rstrip().split('\n')
firstRow = results_contents.pop(0)
print firstRow
print blas_table_header()
print firstRow.rstrip()==blas_table_header()
if firstRow.rstrip() != blas_table_header():
print 'ERROR: input file \'{}\' does not match expected format.'.format(thisFile)
quit()
for row in results_contents:
row = row.split(',')
row = TableRow(BlasTestCombination(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12],row[13],row[14], row[15], row[16], row[17][1:], row[17][0], row[18], row[19], row[20]), row[21])
data.append(BlasGraphPoint(row.parameters.sizem, row.parameters.sizen, row.parameters.sizek, row.parameters.lda, row.parameters.ldb, row.parameters.ldc, row.parameters.offa , row.parameters.offb , row.parameters.offc , row.parameters.device, row.parameters.order, row.parameters.transa, row.parameters.transb, row.parameters.precision + row.parameters.function, row.parameters.library, row.parameters.label, row.gflops))
"""
data sanity check
"""
# if multiple plotvalues have > 1 value among the data rows, the user must specify which to plot
multiplePlotValues = []
for option in plotvalues:
values = []
for point in data:
values.append(getattr(point, option))
multiplePlotValues.append(len(set(values)) > 1)
if multiplePlotValues.count(True) > 1 and args.plot == None:
print 'ERROR: more than one parameter of {} has multiple values. Please specify which parameter to plot with --plot'.format(plotvalues)
quit()
# if args.graphxaxis is not 'problemsize', the user should know that the results might be strange
#if args.graphxaxis != 'problemsize':
# xaxisvalueSet = []
# for option in xaxisvalues:
# if option != 'problemsize':
# values = []
# for point in data:
# values.append(getattr(point, option))
# xaxisvalueSet.append(len(set(values)) > 1)
# if xaxisvalueSet.count(True) > 1:
# print 'WARNING: more than one parameter of {} is varied. unexpected results may occur. please double check your graphs for accuracy.'.format(xaxisvalues)
# multiple rows should not have the same input values
#pointInputs = []
#for point in data:
# pointInputs.append(point.__str__().split(';')[0])
#if len(set(pointInputs)) != len(data):
# print 'ERROR: imported table has duplicate rows with identical input parameters'
# quit()
"""
figure out if we have multiple plots on this graph (and what they should be)
"""
if args.plot != None:
multiplePlots = args.plot
elif multiplePlotValues.count(True) == 1 and plotvalues[multiplePlotValues.index(True)] != 'sizek':
# we don't ever want to default to sizek, because it's probably going to vary for most plots
# we'll require the user to explicitly request multiple plots on sizek if necessary
multiplePlots = plotvalues[multiplePlotValues.index(True)]
else:
# default to device if none of the options to plot have multiple values
multiplePlots = 'device'
"""
assemble data for the graphs
"""
data.sort(key=lambda row: int(getattr(row, args.graphxaxis)))
# choose scale for x axis
if args.xaxisscale == None:
# user didn't specify. autodetect
if int(getattr(data[len(data)-1], args.graphxaxis)) > 2000: # big numbers on x-axis
args.xaxisscale = 'log2'
elif int(getattr(data[len(data)-1], args.graphxaxis)) > 10000: # bigger numbers on x-axis
args.xaxisscale = 'log10'
else: # small numbers on x-axis
args.xaxisscale = 'linear'
if args.xaxisscale == 'linear':
plotkwargs = {}
plottype = 'plot'
elif args.xaxisscale == 'log2':
plottype = 'semilogx'
plotkwargs = {'basex':2}
elif args.xaxisscale == 'log10':
plottype = 'semilogx'
plotkwargs = {'basex':10}
else:
print 'ERROR: invalid value for x-axis scale'
quit()
plots = set(getattr(row, multiplePlots) for row in data)
class DataForOnePlot:
def __init__(self, inlabel, inxdata, inydata):
self.label = inlabel
self.xdata = inxdata
self.ydata = inydata
dataForAllPlots = []
for plot in plots:
dataForThisPlot = itertools.ifilter( lambda x: getattr(x, multiplePlots) == plot, data)
dataForThisPlot = list(itertools.islice(dataForThisPlot, None))
#if args.graphxaxis == 'problemsize':
# xdata = [int(row.x) * int(row.y) * int(row.z) * int(row.batchsize) for row in dataForThisPlot]
#else:
xdata = [getattr(row, args.graphxaxis) for row in dataForThisPlot]
ydata = [getattr(row, args.graphyaxis) for row in dataForThisPlot]
dataForAllPlots.append(DataForOnePlot(plot,xdata,ydata))
"""
assemble labels for the graph or use the user-specified ones
"""
if args.graphtitle:
# use the user selection
title = args.graphtitle
else:
# autogen a lovely title
title = 'Performance vs. ' + args.graphxaxis.capitalize()
if args.xaxislabel:
# use the user selection
xaxislabel = args.xaxislabel
else:
# autogen a lovely x-axis label
if args.graphxaxis == 'cachesize':
units = '(bytes)'
else:
units = '(datapoints)'
xaxislabel = args.graphxaxis + ' ' + units
if args.yaxislabel:
# use the user selection
yaxislabel = args.yaxislabel
else:
# autogen a lovely y-axis label
if args.graphyaxis == 'gflops':
units = 'GFLOPS'
yaxislabel = 'Performance (' + units + ')'
"""
display a pretty graph
"""
colors = ['k','y','m','c','r','b','g']
for thisPlot in dataForAllPlots:
getattr(pylab, plottype)(thisPlot.xdata, thisPlot.ydata, '{}.-'.format(colors.pop()), label=thisPlot.label, **plotkwargs)
if len(dataForAllPlots) > 1:
pylab.legend(loc='best')
pylab.title(title)
pylab.xlabel(xaxislabel)
pylab.ylabel(yaxislabel)
pylab.grid(True)
if args.outputFilename == None:
# if no pdf output is requested, spit the graph to the screen . . .
pylab.show()
else:
# . . . otherwise, gimme gimme pdf
#pdf = PdfPages(args.outputFilename)
#pdf.savefig()
#pdf.close()
pylab.savefig(args.outputFilename,dpi=(1024/8))
######### plotFromDataFile() Function to plot from data file ends #########
######## "main" program begins #####
"""
define and parse parameters
"""
xaxisvalues = ['sizem','sizen','sizek']
yaxisvalues = ['gflops']
plotvalues = ['lda','ldb','ldc','sizek','device','label','order','transa','transb','function','library']
parser = argparse.ArgumentParser(description='Plot performance of the clblas\
library. clblas.plotPerformance.py reads in data tables from clblas.\
measurePerformance.py and plots their values')
fileOrDb = parser.add_mutually_exclusive_group(required=True)
fileOrDb.add_argument('-d', '--datafile',
dest='datafile', action='append', default=None, required=False,
help='indicate a file to use as input. must be in the format output by\
clblas.measurePerformance.py. may be used multiple times to indicate\
multiple input files. e.g., -d cypressOutput.txt -d caymanOutput.txt')
parser.add_argument('-x', '--x_axis',
dest='graphxaxis', default=None, choices=xaxisvalues, required=True,
help='indicate which value will be represented on the x axis. problemsize\
is defined as x*y*z*batchsize')
parser.add_argument('-y', '--y_axis',
dest='graphyaxis', default='gflops', choices=yaxisvalues,
help='indicate which value will be represented on the y axis')
parser.add_argument('--plot',
dest='plot', default=None, choices=plotvalues,
help='indicate which of {} should be used to differentiate multiple plots.\
this will be chosen automatically if not specified'.format(plotvalues))
parser.add_argument('--title',
dest='graphtitle', default=None,
help='the desired title for the graph generated by this execution. if\
GRAPHTITLE contains any spaces, it must be entered in \"double quotes\".\
if this option is not specified, the title will be autogenerated')
parser.add_argument('--x_axis_label',
dest='xaxislabel', default=None,
help='the desired label for the graph\'s x-axis. if XAXISLABEL contains\
any spaces, it must be entered in \"double quotes\". if this option\
is not specified, the x-axis label will be autogenerated')
parser.add_argument('--x_axis_scale',
dest='xaxisscale', default=None, choices=['linear','log2','log10'],
help='the desired scale for the graph\'s x-axis. if nothing is specified,\
it will be selected automatically')
parser.add_argument('--y_axis_label',
dest='yaxislabel', default=None,
help='the desired label for the graph\'s y-axis. if YAXISLABEL contains any\
spaces, it must be entered in \"double quotes\". if this option is not\
specified, the y-axis label will be autogenerated')
parser.add_argument('--outputfile',
dest='outputFilename', default=None,
help='name of the file to output graphs. Supported formats: emf, eps, pdf, png, ps, raw, rgba, svg, svgz.')
args = parser.parse_args()
if args.datafile != None:
plotFromDataFile()
else:
print "Atleast specify if you want to use text files or database for plotting graphs. Use -h or --help option for more details"
quit()
| apache-2.0 |
bikong2/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
samthor/intellij-community | python/helpers/pydev/pydevd.py | 9 | 99805 | #IMPORTANT: pydevd_constants must be the 1st thing defined because it'll keep a reference to the original sys._getframe
from __future__ import nested_scopes # Jython 2.1 support
import pydev_monkey_qt
from pydevd_utils import save_main_module
import pydevd_utils
pydev_monkey_qt.patch_qt()
import traceback
from pydevd_frame_utils import add_exception_to_frame
import pydev_imports
from pydevd_breakpoints import * #@UnusedWildImport
import fix_getpass
from pydevd_comm import CMD_CHANGE_VARIABLE, \
CMD_EVALUATE_EXPRESSION, \
CMD_EXEC_EXPRESSION, \
CMD_GET_COMPLETIONS, \
CMD_GET_FRAME, \
CMD_GET_VARIABLE, \
CMD_GET_ARRAY, \
CMD_LIST_THREADS, \
CMD_REMOVE_BREAK, \
CMD_RUN, \
CMD_SET_BREAK, \
CMD_SET_NEXT_STATEMENT,\
CMD_STEP_INTO, \
CMD_STEP_OVER, \
CMD_STEP_RETURN, \
CMD_STEP_INTO_MY_CODE, \
CMD_THREAD_KILL, \
CMD_THREAD_RUN, \
CMD_THREAD_SUSPEND, \
CMD_RUN_TO_LINE, \
CMD_RELOAD_CODE, \
CMD_VERSION, \
CMD_CONSOLE_EXEC, \
CMD_ADD_EXCEPTION_BREAK, \
CMD_REMOVE_EXCEPTION_BREAK, \
CMD_LOAD_SOURCE, \
CMD_ADD_DJANGO_EXCEPTION_BREAK, \
CMD_REMOVE_DJANGO_EXCEPTION_BREAK, \
CMD_SMART_STEP_INTO,\
InternalChangeVariable, \
InternalGetCompletions, \
InternalEvaluateExpression, \
InternalConsoleExec, \
InternalGetFrame, \
InternalGetVariable, \
InternalGetArray, \
InternalTerminateThread, \
InternalRunThread, \
InternalStepThread, \
NetCommandFactory, \
PyDBDaemonThread, \
_queue, \
ReaderThread, \
SetGlobalDebugger, \
WriterThread, \
PydevdFindThreadById, \
PydevdLog, \
StartClient, \
StartServer, \
InternalSetNextStatementThread, \
ReloadCodeCommand, \
CMD_SET_PY_EXCEPTION, \
CMD_IGNORE_THROWN_EXCEPTION_AT,\
InternalGetBreakpointException, \
InternalSendCurrExceptionTrace,\
InternalSendCurrExceptionTraceProceeded,\
CMD_ENABLE_DONT_TRACE, \
CMD_GET_FILE_CONTENTS,\
CMD_SET_PROPERTY_TRACE, \
CMD_RUN_CUSTOM_OPERATION,\
InternalRunCustomOperation, \
CMD_EVALUATE_CONSOLE_EXPRESSION, \
InternalEvaluateConsoleExpression,\
InternalConsoleGetCompletions
from pydevd_file_utils import NormFileToServer, GetFilenameAndBase
import pydevd_file_utils
import pydevd_vars
import pydevd_vm_type
import pydevd_tracing
import pydevd_io
from pydevd_additional_thread_info import PyDBAdditionalThreadInfo
from pydevd_custom_frames import CustomFramesContainer, CustomFramesContainerInit
import pydevd_dont_trace
import pydevd_traceproperty
from _pydev_imps import _pydev_time as time, _pydev_thread
import _pydev_threading as threading
import os
import atexit
SUPPORT_PLUGINS = not IS_JYTH_LESS25
PluginManager = None
if SUPPORT_PLUGINS:
from pydevd_plugin_utils import PluginManager
if IS_PY3K:
import pkgutil
else:
from _pydev_imps import _pydev_pkgutil_old as pkgutil
threadingEnumerate = threading.enumerate
threadingCurrentThread = threading.currentThread
try:
'dummy'.encode('utf-8') # Added because otherwise Jython 2.2.1 wasn't finding the encoding (if it wasn't loaded in the main thread).
except:
pass
LIB_FILE = 0
PYDEV_FILE = 1
DONT_TRACE = {
# commonly used things from the stdlib that we don't want to trace
'Queue.py':LIB_FILE,
'queue.py':LIB_FILE,
'socket.py':LIB_FILE,
'weakref.py':LIB_FILE,
'_weakrefset.py':LIB_FILE,
'linecache.py':LIB_FILE,
'threading.py':LIB_FILE,
# thirs party libs that we don't want to trace
'_pydev_pluginbase.py':PYDEV_FILE,
'_pydev_pkgutil_old.py':PYDEV_FILE,
'_pydev_uuid_old.py':PYDEV_FILE,
#things from pydev that we don't want to trace
'_pydev_execfile.py':PYDEV_FILE,
'_pydev_jython_execfile.py':PYDEV_FILE,
'_pydev_threading':PYDEV_FILE,
'_pydev_Queue':PYDEV_FILE,
'django_debug.py':PYDEV_FILE,
'jinja2_debug.py':PYDEV_FILE,
'pydev_log.py':PYDEV_FILE,
'pydev_monkey.py':PYDEV_FILE,
'pydev_monkey_qt.py':PYDEV_FILE,
'pydevd.py':PYDEV_FILE,
'pydevd_additional_thread_info.py':PYDEV_FILE,
'pydevd_breakpoints.py':PYDEV_FILE,
'pydevd_comm.py':PYDEV_FILE,
'pydevd_console.py':PYDEV_FILE,
'pydevd_constants.py':PYDEV_FILE,
'pydevd_custom_frames.py':PYDEV_FILE,
'pydevd_dont_trace.py':PYDEV_FILE,
'pydevd_exec.py':PYDEV_FILE,
'pydevd_exec2.py':PYDEV_FILE,
'pydevd_file_utils.py':PYDEV_FILE,
'pydevd_frame.py':PYDEV_FILE,
'pydevd_import_class.py':PYDEV_FILE,
'pydevd_io.py':PYDEV_FILE,
'pydevd_psyco_stub.py':PYDEV_FILE,
'pydevd_referrers.py':PYDEV_FILE,
'pydevd_reload.py':PYDEV_FILE,
'pydevd_resolver.py':PYDEV_FILE,
'pydevd_save_locals.py':PYDEV_FILE,
'pydevd_signature.py':PYDEV_FILE,
'pydevd_stackless.py':PYDEV_FILE,
'pydevd_traceproperty.py':PYDEV_FILE,
'pydevd_tracing.py':PYDEV_FILE,
'pydevd_utils.py':PYDEV_FILE,
'pydevd_vars.py':PYDEV_FILE,
'pydevd_vm_type.py':PYDEV_FILE,
'pydevd_xml.py':PYDEV_FILE,
}
if IS_PY3K:
# if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716)
DONT_TRACE['io.py'] = LIB_FILE
# Don't trace common encodings too
DONT_TRACE['cp1252.py'] = LIB_FILE
DONT_TRACE['utf_8.py'] = LIB_FILE
connected = False
bufferStdOutToServer = False
bufferStdErrToServer = False
remote = False
from _pydev_filesystem_encoding import getfilesystemencoding
file_system_encoding = getfilesystemencoding()
# Hack for https://sw-brainwy.rhcloud.com/tracker/PyDev/363 (i.e.: calling isAlive() can throw AssertionError under some circumstances)
# It is required to debug threads started by start_new_thread in Python 3.4
_temp = threading.Thread()
if hasattr(_temp, '_is_stopped'): # Python 3.4 has this
def isThreadAlive(t):
try:
return not t._is_stopped
except:
return t.isAlive()
elif hasattr(_temp, '_Thread__stopped'): # Python 2.7 has this
def isThreadAlive(t):
try:
return not t._Thread__stopped
except:
return t.isAlive()
else: # Haven't checked all other versions, so, let's use the regular isAlive call in this case.
def isThreadAlive(t):
return t.isAlive()
del _temp
#=======================================================================================================================
# PyDBCommandThread
#=======================================================================================================================
class PyDBCommandThread(PyDBDaemonThread):
def __init__(self, pyDb):
PyDBDaemonThread.__init__(self)
self._py_db_command_thread_event = pyDb._py_db_command_thread_event
self.pyDb = pyDb
self.setName('pydevd.CommandThread')
def OnRun(self):
for i in xrange(1, 10):
time.sleep(0.5) #this one will only start later on (because otherwise we may not have any non-daemon threads
if self.killReceived:
return
if self.dontTraceMe:
self.pyDb.SetTrace(None) # no debugging on this thread
try:
while not self.killReceived:
try:
self.pyDb.processInternalCommands()
except:
PydevdLog(0, 'Finishing debug communication...(2)')
self._py_db_command_thread_event.clear()
self._py_db_command_thread_event.wait(0.5)
except:
pydev_log.debug(sys.exc_info()[0])
#only got this error in interpreter shutdown
#PydevdLog(0, 'Finishing debug communication...(3)')
def killAllPydevThreads():
threads = DictKeys(PyDBDaemonThread.created_pydb_daemon_threads)
for t in threads:
if hasattr(t, 'doKillPydevThread'):
t.doKillPydevThread()
#=======================================================================================================================
# CheckOutputThread
# Non-daemonic thread guaranties that all data is written even if program is finished
#=======================================================================================================================
class CheckOutputThread(PyDBDaemonThread):
def __init__(self, pyDb):
PyDBDaemonThread.__init__(self)
self.pyDb = pyDb
self.setName('pydevd.CheckAliveThread')
self.daemon = False
pyDb.output_checker = self
def OnRun(self):
if self.dontTraceMe:
disable_tracing = True
if pydevd_vm_type.GetVmType() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0:
# don't run untraced threads if we're in jython 2.2.1 or lower
# jython bug: if we start a thread and another thread changes the tracing facility
# it affects other threads (it's not set only for the thread but globally)
# Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867
disable_tracing = False
if disable_tracing:
pydevd_tracing.SetTrace(None) # no debugging on this thread
while not self.killReceived:
time.sleep(0.3)
if not self.pyDb.haveAliveThreads() and self.pyDb.writer.empty() \
and not has_data_to_redirect():
try:
pydev_log.debug("No alive threads, finishing debug session")
self.pyDb.FinishDebuggingSession()
killAllPydevThreads()
except:
traceback.print_exc()
self.killReceived = True
self.pyDb.checkOutputRedirect()
def doKillPydevThread(self):
self.killReceived = True
#=======================================================================================================================
# PyDB
#=======================================================================================================================
class PyDB:
""" Main debugging class
Lots of stuff going on here:
PyDB starts two threads on startup that connect to remote debugger (RDB)
The threads continuously read & write commands to RDB.
PyDB communicates with these threads through command queues.
Every RDB command is processed by calling processNetCommand.
Every PyDB net command is sent to the net by posting NetCommand to WriterThread queue
Some commands need to be executed on the right thread (suspend/resume & friends)
These are placed on the internal command queue.
"""
def __init__(self):
SetGlobalDebugger(self)
pydevd_tracing.ReplaceSysSetTraceFunc()
self.reader = None
self.writer = None
self.output_checker = None
self.quitting = None
self.cmdFactory = NetCommandFactory()
self._cmd_queue = {} # the hash of Queues. Key is thread id, value is thread
self.breakpoints = {}
self.file_to_id_to_line_breakpoint = {}
self.file_to_id_to_plugin_breakpoint = {}
# Note: breakpoints dict should not be mutated: a copy should be created
# and later it should be assigned back (to prevent concurrency issues).
self.break_on_uncaught_exceptions = {}
self.break_on_caught_exceptions = {}
self.readyToRun = False
self._main_lock = _pydev_thread.allocate_lock()
self._lock_running_thread_ids = _pydev_thread.allocate_lock()
self._py_db_command_thread_event = threading.Event()
CustomFramesContainer._py_db_command_thread_event = self._py_db_command_thread_event
self._finishDebuggingSession = False
self._terminationEventSent = False
self.signature_factory = None
self.SetTrace = pydevd_tracing.SetTrace
self.break_on_exceptions_thrown_in_same_context = False
self.ignore_exceptions_thrown_in_lines_with_ignore_exception = True
self.project_roots = None
# Suspend debugger even if breakpoint condition raises an exception
SUSPEND_ON_BREAKPOINT_EXCEPTION = True
self.suspend_on_breakpoint_exception = SUSPEND_ON_BREAKPOINT_EXCEPTION
# By default user can step into properties getter/setter/deleter methods
self.disable_property_trace = False
self.disable_property_getter_trace = False
self.disable_property_setter_trace = False
self.disable_property_deleter_trace = False
#this is a dict of thread ids pointing to thread ids. Whenever a command is passed to the java end that
#acknowledges that a thread was created, the thread id should be passed here -- and if at some time we do not
#find that thread alive anymore, we must remove it from this list and make the java side know that the thread
#was killed.
self._running_thread_ids = {}
self._set_breakpoints_with_id = False
# This attribute holds the file-> lines which have an @IgnoreException.
self.filename_to_lines_where_exceptions_are_ignored = {}
#working with plugins (lazily initialized)
self.plugin = None
self.has_plugin_line_breaks = False
self.has_plugin_exception_breaks = False
# matplotlib support in debugger and debug console
self.mpl_in_use = False
self.mpl_hooks_in_debug_console = False
self.mpl_modules_for_patching = {}
self._filename_to_not_in_scope = {}
def get_plugin_lazy_init(self):
if self.plugin is None and SUPPORT_PLUGINS:
self.plugin = PluginManager(self)
return self.plugin
def not_in_scope(self, filename):
return pydevd_utils.is_in_project_roots(filename)
def first_appearance_in_scope(self, trace):
if trace is None or self.not_in_scope(trace.tb_frame.f_code.co_filename):
return False
else:
trace = trace.tb_next
while trace is not None:
frame = trace.tb_frame
if not self.not_in_scope(frame.f_code.co_filename):
return False
trace = trace.tb_next
return True
def haveAliveThreads(self):
for t in threadingEnumerate():
if getattr(t, 'is_pydev_daemon_thread', False):
#Important: Jython 2.5rc4 has a bug where a thread created with thread.start_new_thread won't be
#set as a daemon thread, so, we also have to check for the 'is_pydev_daemon_thread' flag.
#See: https://github.com/fabioz/PyDev.Debugger/issues/11
continue
if isinstance(t, PyDBDaemonThread):
pydev_log.error_once(
'Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
if isThreadAlive(t):
if not t.isDaemon() or hasattr(t, "__pydevd_main_thread"):
return True
return False
def FinishDebuggingSession(self):
self._finishDebuggingSession = True
def acquire(self):
if PyDBUseLocks:
self.lock.acquire()
return True
def release(self):
if PyDBUseLocks:
self.lock.release()
return True
def initializeNetwork(self, sock):
try:
sock.settimeout(None) # infinite, no timeouts from now on - jython does not have it
except:
pass
self.writer = WriterThread(sock)
self.reader = ReaderThread(sock)
self.writer.start()
self.reader.start()
time.sleep(0.1) # give threads time to start
def connect(self, host, port):
if host:
s = StartClient(host, port)
else:
s = StartServer(port)
self.initializeNetwork(s)
def getInternalQueue(self, thread_id):
""" returns internal command queue for a given thread.
if new queue is created, notify the RDB about it """
if thread_id.startswith('__frame__'):
thread_id = thread_id[thread_id.rfind('|') + 1:]
try:
return self._cmd_queue[thread_id]
except KeyError:
return self._cmd_queue.setdefault(thread_id, _queue.Queue()) #@UndefinedVariable
def postInternalCommand(self, int_cmd, thread_id):
""" if thread_id is *, post to all """
if thread_id == "*":
threads = threadingEnumerate()
for t in threads:
thread_id = GetThreadId(t)
queue = self.getInternalQueue(thread_id)
queue.put(int_cmd)
else:
queue = self.getInternalQueue(thread_id)
queue.put(int_cmd)
def checkOutputRedirect(self):
global bufferStdOutToServer
global bufferStdErrToServer
if bufferStdOutToServer:
initStdoutRedirect()
self.checkOutput(sys.stdoutBuf, 1) #@UndefinedVariable
if bufferStdErrToServer:
initStderrRedirect()
self.checkOutput(sys.stderrBuf, 2) #@UndefinedVariable
def checkOutput(self, out, outCtx):
'''Checks the output to see if we have to send some buffered output to the debug server
@param out: sys.stdout or sys.stderr
@param outCtx: the context indicating: 1=stdout and 2=stderr (to know the colors to write it)
'''
try:
v = out.getvalue()
if v:
self.cmdFactory.makeIoMessage(v, outCtx, self)
except:
traceback.print_exc()
def init_matplotlib_in_debug_console(self):
# import hook and patches for matplotlib support in debug console
from pydev_import_hook import import_hook_manager
for module in DictKeys(self.mpl_modules_for_patching):
import_hook_manager.add_module_name(module, DictPop(self.mpl_modules_for_patching, module))
def init_matplotlib_support(self):
# prepare debugger for integration with matplotlib GUI event loop
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot, do_enable_gui
# enable_gui_function in activate_matplotlib should be called in main thread. Unlike integrated console,
# in the debug console we have no interpreter instance with exec_queue, but we run this code in the main
# thread and can call it directly.
class _MatplotlibHelper:
_return_control_osc = False
def return_control():
# Some of the input hooks (e.g. Qt4Agg) check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
_MatplotlibHelper._return_control_osc = not _MatplotlibHelper._return_control_osc
return _MatplotlibHelper._return_control_osc
from pydev_ipython.inputhook import set_return_control_callback
set_return_control_callback(return_control)
self.mpl_modules_for_patching = {"matplotlib": lambda: activate_matplotlib(do_enable_gui),
"matplotlib.pyplot": activate_pyplot,
"pylab": activate_pylab }
def processInternalCommands(self):
'''This function processes internal commands
'''
self._main_lock.acquire()
try:
self.checkOutputRedirect()
curr_thread_id = GetThreadId(threadingCurrentThread())
program_threads_alive = {}
all_threads = threadingEnumerate()
program_threads_dead = []
self._lock_running_thread_ids.acquire()
try:
for t in all_threads:
thread_id = GetThreadId(t)
if getattr(t, 'is_pydev_daemon_thread', False):
pass # I.e.: skip the DummyThreads created from pydev daemon threads
elif isinstance(t, PyDBDaemonThread):
pydev_log.error_once('Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
elif isThreadAlive(t):
program_threads_alive[thread_id] = t
if not DictContains(self._running_thread_ids, thread_id):
if not hasattr(t, 'additionalInfo'):
# see http://sourceforge.net/tracker/index.php?func=detail&aid=1955428&group_id=85796&atid=577329
# Let's create the additional info right away!
t.additionalInfo = PyDBAdditionalThreadInfo()
self._running_thread_ids[thread_id] = t
self.writer.addCommand(self.cmdFactory.makeThreadCreatedMessage(t))
queue = self.getInternalQueue(thread_id)
cmdsToReadd = [] # some commands must be processed by the thread itself... if that's the case,
# we will re-add the commands to the queue after executing.
try:
while True:
int_cmd = queue.get(False)
if not self.mpl_hooks_in_debug_console and isinstance(int_cmd, InternalConsoleExec):
# add import hooks for matplotlib patches if only debug console was started
try:
self.init_matplotlib_in_debug_console()
self.mpl_in_use = True
except:
PydevdLog(2, "Matplotlib support in debug console failed", traceback.format_exc())
self.mpl_hooks_in_debug_console = True
if int_cmd.canBeExecutedBy(curr_thread_id):
PydevdLog(2, "processing internal command ", str(int_cmd))
int_cmd.doIt(self)
else:
PydevdLog(2, "NOT processing internal command ", str(int_cmd))
cmdsToReadd.append(int_cmd)
except _queue.Empty: #@UndefinedVariable
for int_cmd in cmdsToReadd:
queue.put(int_cmd)
# this is how we exit
thread_ids = list(self._running_thread_ids.keys())
for tId in thread_ids:
if not DictContains(program_threads_alive, tId):
program_threads_dead.append(tId)
finally:
self._lock_running_thread_ids.release()
for tId in program_threads_dead:
try:
self.processThreadNotAlive(tId)
except:
sys.stderr.write('Error iterating through %s (%s) - %s\n' % (
program_threads_alive, program_threads_alive.__class__, dir(program_threads_alive)))
raise
if len(program_threads_alive) == 0:
self.FinishDebuggingSession()
for t in all_threads:
if hasattr(t, 'doKillPydevThread'):
t.doKillPydevThread()
finally:
self._main_lock.release()
def setTracingForUntracedContexts(self, ignore_frame=None, overwrite_prev_trace=False):
# Enable the tracing for existing threads (because there may be frames being executed that
# are currently untraced).
threads = threadingEnumerate()
try:
for t in threads:
# TODO: optimize so that we only actually add that tracing if it's in
# the new breakpoint context.
additionalInfo = None
try:
additionalInfo = t.additionalInfo
except AttributeError:
pass # that's ok, no info currently set
if additionalInfo is not None:
for frame in additionalInfo.IterFrames():
if frame is not ignore_frame:
self.SetTraceForFrameAndParents(frame, overwrite_prev_trace=overwrite_prev_trace)
finally:
frame = None
t = None
threads = None
additionalInfo = None
def consolidate_breakpoints(self, file, id_to_breakpoint, breakpoints):
break_dict = {}
for breakpoint_id, pybreakpoint in DictIterItems(id_to_breakpoint):
break_dict[pybreakpoint.line] = pybreakpoint
breakpoints[file] = break_dict
def add_break_on_exception(
self,
exception,
notify_always,
notify_on_terminate,
notify_on_first_raise_only,
ignore_libraries=False
):
try:
eb = ExceptionBreakpoint(
exception,
notify_always,
notify_on_terminate,
notify_on_first_raise_only,
ignore_libraries
)
except ImportError:
pydev_log.error("Error unable to add break on exception for: %s (exception could not be imported)\n" % (exception,))
return None
if eb.notify_on_terminate:
cp = self.break_on_uncaught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.error("Exceptions to hook on terminate: %s\n" % (cp,))
self.break_on_uncaught_exceptions = cp
if eb.notify_always:
cp = self.break_on_caught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.error("Exceptions to hook always: %s\n" % (cp,))
self.break_on_caught_exceptions = cp
return eb
def update_after_exceptions_added(self, added):
updated_on_caught = False
updated_on_uncaught = False
for eb in added:
if not updated_on_uncaught and eb.notify_on_terminate:
updated_on_uncaught = True
update_exception_hook(self)
if not updated_on_caught and eb.notify_always:
updated_on_caught = True
self.setTracingForUntracedContexts()
def processNetCommand(self, cmd_id, seq, text):
'''Processes a command received from the Java side
@param cmd_id: the id of the command
@param seq: the sequence of the command
@param text: the text received in the command
@note: this method is run as a big switch... after doing some tests, it's not clear whether changing it for
a dict id --> function call will have better performance result. A simple test with xrange(10000000) showed
that the gains from having a fast access to what should be executed are lost because of the function call in
a way that if we had 10 elements in the switch the if..elif are better -- but growing the number of choices
makes the solution with the dispatch look better -- so, if this gets more than 20-25 choices at some time,
it may be worth refactoring it (actually, reordering the ifs so that the ones used mostly come before
probably will give better performance).
'''
#print(ID_TO_MEANING[str(cmd_id)], repr(text))
self._main_lock.acquire()
try:
try:
cmd = None
if cmd_id == CMD_RUN:
self.readyToRun = True
elif cmd_id == CMD_VERSION:
# response is version number
# ide_os should be 'WINDOWS' or 'UNIX'.
ide_os = 'WINDOWS'
# Breakpoints can be grouped by 'LINE' or by 'ID'.
breakpoints_by = 'LINE'
splitted = text.split('\t')
if len(splitted) == 1:
_local_version = splitted
elif len(splitted) == 2:
_local_version, ide_os = splitted
elif len(splitted) == 3:
_local_version, ide_os, breakpoints_by = splitted
if breakpoints_by == 'ID':
self._set_breakpoints_with_id = True
else:
self._set_breakpoints_with_id = False
pydevd_file_utils.set_ide_os(ide_os)
cmd = self.cmdFactory.makeVersionMessage(seq)
elif cmd_id == CMD_LIST_THREADS:
# response is a list of threads
cmd = self.cmdFactory.makeListThreadsMessage(seq)
elif cmd_id == CMD_THREAD_KILL:
int_cmd = InternalTerminateThread(text)
self.postInternalCommand(int_cmd, text)
elif cmd_id == CMD_THREAD_SUSPEND:
# Yes, thread suspend is still done at this point, not through an internal command!
t = PydevdFindThreadById(text)
if t:
additionalInfo = None
try:
additionalInfo = t.additionalInfo
except AttributeError:
pass # that's ok, no info currently set
if additionalInfo is not None:
for frame in additionalInfo.IterFrames():
self.SetTraceForFrameAndParents(frame)
del frame
self.setSuspend(t, CMD_THREAD_SUSPEND)
elif text.startswith('__frame__:'):
sys.stderr.write("Can't suspend tasklet: %s\n" % (text,))
elif cmd_id == CMD_THREAD_RUN:
t = PydevdFindThreadById(text)
if t:
thread_id = GetThreadId(t)
int_cmd = InternalRunThread(thread_id)
self.postInternalCommand(int_cmd, thread_id)
elif text.startswith('__frame__:'):
sys.stderr.write("Can't make tasklet run: %s\n" % (text,))
elif cmd_id == CMD_STEP_INTO or cmd_id == CMD_STEP_OVER or cmd_id == CMD_STEP_RETURN or \
cmd_id == CMD_STEP_INTO_MY_CODE:
# we received some command to make a single step
t = PydevdFindThreadById(text)
if t:
thread_id = GetThreadId(t)
int_cmd = InternalStepThread(thread_id, cmd_id)
self.postInternalCommand(int_cmd, thread_id)
elif text.startswith('__frame__:'):
sys.stderr.write("Can't make tasklet step command: %s\n" % (text,))
elif cmd_id == CMD_RUN_TO_LINE or cmd_id == CMD_SET_NEXT_STATEMENT or cmd_id == CMD_SMART_STEP_INTO:
# we received some command to make a single step
thread_id, line, func_name = text.split('\t', 2)
t = PydevdFindThreadById(thread_id)
if t:
int_cmd = InternalSetNextStatementThread(thread_id, cmd_id, line, func_name)
self.postInternalCommand(int_cmd, thread_id)
elif thread_id.startswith('__frame__:'):
sys.stderr.write("Can't set next statement in tasklet: %s\n" % (thread_id,))
elif cmd_id == CMD_RELOAD_CODE:
# we received some command to make a reload of a module
module_name = text.strip()
thread_id = '*' # Any thread
# Note: not going for the main thread because in this case it'd only do the load
# when we stopped on a breakpoint.
# for tid, t in self._running_thread_ids.items(): #Iterate in copy
# thread_name = t.getName()
#
# print thread_name, GetThreadId(t)
# #Note: if possible, try to reload on the main thread
# if thread_name == 'MainThread':
# thread_id = tid
int_cmd = ReloadCodeCommand(module_name, thread_id)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_CHANGE_VARIABLE:
# the text is: thread\tstackframe\tFRAME|GLOBAL\tattribute_to_change\tvalue_to_change
try:
thread_id, frame_id, scope, attr_and_value = text.split('\t', 3)
tab_index = attr_and_value.rindex('\t')
attr = attr_and_value[0:tab_index].replace('\t', '.')
value = attr_and_value[tab_index + 1:]
int_cmd = InternalChangeVariable(seq, thread_id, frame_id, scope, attr, value)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_VARIABLE:
# we received some command to get a variable
# the text is: thread_id\tframe_id\tFRAME|GLOBAL\tattributes*
try:
thread_id, frame_id, scopeattrs = text.split('\t', 2)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, attrs = scopeattrs.split('\t', 1)
else:
scope, attrs = (scopeattrs, None)
int_cmd = InternalGetVariable(seq, thread_id, frame_id, scope, attrs)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_ARRAY:
# we received some command to get an array variable
# the text is: thread_id\tframe_id\tFRAME|GLOBAL\tname\ttemp\troffs\tcoffs\trows\tcols\tformat
try:
roffset, coffset, rows, cols, format, thread_id, frame_id, scopeattrs = text.split('\t', 7)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, attrs = scopeattrs.split('\t', 1)
else:
scope, attrs = (scopeattrs, None)
int_cmd = InternalGetArray(seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_COMPLETIONS:
# we received some command to get a variable
# the text is: thread_id\tframe_id\tactivation token
try:
thread_id, frame_id, scope, act_tok = text.split('\t', 3)
int_cmd = InternalGetCompletions(seq, thread_id, frame_id, act_tok)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_FRAME:
thread_id, frame_id, scope = text.split('\t', 2)
int_cmd = InternalGetFrame(seq, thread_id, frame_id)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_SET_BREAK:
# func name: 'None': match anything. Empty: match global, specified: only method context.
# command to add some breakpoint.
# text is file\tline. Add to breakpoints dictionary
if self._set_breakpoints_with_id:
breakpoint_id, type, file, line, func_name, condition, expression = text.split('\t', 6)
breakpoint_id = int(breakpoint_id)
line = int(line)
# We must restore new lines and tabs as done in
# AbstractDebugTarget.breakpointAdded
condition = condition.replace("@_@NEW_LINE_CHAR@_@", '\n').\
replace("@_@TAB_CHAR@_@", '\t').strip()
expression = expression.replace("@_@NEW_LINE_CHAR@_@", '\n').\
replace("@_@TAB_CHAR@_@", '\t').strip()
else:
#Note: this else should be removed after PyCharm migrates to setting
#breakpoints by id (and ideally also provides func_name).
type, file, line, func_name, condition, expression = text.split('\t', 5)
# If we don't have an id given for each breakpoint, consider
# the id to be the line.
breakpoint_id = line = int(line)
condition = condition.replace("@_@NEW_LINE_CHAR@_@", '\n'). \
replace("@_@TAB_CHAR@_@", '\t').strip()
expression = expression.replace("@_@NEW_LINE_CHAR@_@", '\n'). \
replace("@_@TAB_CHAR@_@", '\t').strip()
if not IS_PY3K: # In Python 3, the frame object will have unicode for the file, whereas on python 2 it has a byte-array encoded with the filesystem encoding.
file = file.encode(file_system_encoding)
file = NormFileToServer(file)
if not pydevd_file_utils.exists(file):
sys.stderr.write('pydev debugger: warning: trying to add breakpoint'\
' to file that does not exist: %s (will have no effect)\n' % (file,))
sys.stderr.flush()
if len(condition) <= 0 or condition is None or condition == "None":
condition = None
if len(expression) <= 0 or expression is None or expression == "None":
expression = None
supported_type = False
if type == 'python-line':
breakpoint = LineBreakpoint(line, condition, func_name, expression)
breakpoints = self.breakpoints
file_to_id_to_breakpoint = self.file_to_id_to_line_breakpoint
supported_type = True
else:
result = None
plugin = self.get_plugin_lazy_init()
if plugin is not None:
result = plugin.add_breakpoint('add_line_breakpoint', self, type, file, line, condition, expression, func_name)
if result is not None:
supported_type = True
breakpoint, breakpoints = result
file_to_id_to_breakpoint = self.file_to_id_to_plugin_breakpoint
else:
supported_type = False
if not supported_type:
raise NameError(type)
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.debug('Added breakpoint:%s - line:%s - func_name:%s\n' % (file, line, func_name.encode('utf-8')))
sys.stderr.flush()
if DictContains(file_to_id_to_breakpoint, file):
id_to_pybreakpoint = file_to_id_to_breakpoint[file]
else:
id_to_pybreakpoint = file_to_id_to_breakpoint[file] = {}
id_to_pybreakpoint[breakpoint_id] = breakpoint
self.consolidate_breakpoints(file, id_to_pybreakpoint, breakpoints)
if self.plugin is not None:
self.has_plugin_line_breaks = self.plugin.has_line_breaks()
self.setTracingForUntracedContexts(overwrite_prev_trace=True)
elif cmd_id == CMD_REMOVE_BREAK:
#command to remove some breakpoint
#text is type\file\tid. Remove from breakpoints dictionary
breakpoint_type, file, breakpoint_id = text.split('\t', 2)
if not IS_PY3K: # In Python 3, the frame object will have unicode for the file, whereas on python 2 it has a byte-array encoded with the filesystem encoding.
file = file.encode(file_system_encoding)
file = NormFileToServer(file)
try:
breakpoint_id = int(breakpoint_id)
except ValueError:
pydev_log.error('Error removing breakpoint. Expected breakpoint_id to be an int. Found: %s' % (breakpoint_id,))
else:
file_to_id_to_breakpoint = None
if breakpoint_type == 'python-line':
breakpoints = self.breakpoints
file_to_id_to_breakpoint = self.file_to_id_to_line_breakpoint
elif self.get_plugin_lazy_init() is not None:
result = self.plugin.get_breakpoints(self, breakpoint_type)
if result is not None:
file_to_id_to_breakpoint = self.file_to_id_to_plugin_breakpoint
breakpoints = result
if file_to_id_to_breakpoint is None:
pydev_log.error('Error removing breakpoint. Cant handle breakpoint of type %s' % breakpoint_type)
else:
try:
id_to_pybreakpoint = file_to_id_to_breakpoint.get(file, {})
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
existing = id_to_pybreakpoint[breakpoint_id]
sys.stderr.write('Removed breakpoint:%s - line:%s - func_name:%s (id: %s)\n' % (
file, existing.line, existing.func_name.encode('utf-8'), breakpoint_id))
del id_to_pybreakpoint[breakpoint_id]
self.consolidate_breakpoints(file, id_to_pybreakpoint, breakpoints)
if self.plugin is not None:
self.has_plugin_line_breaks = self.plugin.has_line_breaks()
except KeyError:
pydev_log.error("Error removing breakpoint: Breakpoint id not found: %s id: %s. Available ids: %s\n" % (
file, breakpoint_id, DictKeys(id_to_pybreakpoint)))
elif cmd_id == CMD_EVALUATE_EXPRESSION or cmd_id == CMD_EXEC_EXPRESSION:
#command to evaluate the given expression
#text is: thread\tstackframe\tLOCAL\texpression
thread_id, frame_id, scope, expression, trim = text.split('\t', 4)
int_cmd = InternalEvaluateExpression(seq, thread_id, frame_id, expression,
cmd_id == CMD_EXEC_EXPRESSION, int(trim) == 1)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_CONSOLE_EXEC:
#command to exec expression in console, in case expression is only partially valid 'False' is returned
#text is: thread\tstackframe\tLOCAL\texpression
thread_id, frame_id, scope, expression = text.split('\t', 3)
int_cmd = InternalConsoleExec(seq, thread_id, frame_id, expression)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_SET_PY_EXCEPTION:
# Command which receives set of exceptions on which user wants to break the debugger
# text is: break_on_uncaught;break_on_caught;TypeError;ImportError;zipimport.ZipImportError;
# This API is optional and works 'in bulk' -- it's possible
# to get finer-grained control with CMD_ADD_EXCEPTION_BREAK/CMD_REMOVE_EXCEPTION_BREAK
# which allows setting caught/uncaught per exception.
#
splitted = text.split(';')
self.break_on_uncaught_exceptions = {}
self.break_on_caught_exceptions = {}
added = []
if len(splitted) >= 4:
if splitted[0] == 'true':
break_on_uncaught = True
else:
break_on_uncaught = False
if splitted[1] == 'true':
break_on_caught = True
else:
break_on_caught = False
if splitted[2] == 'true':
self.break_on_exceptions_thrown_in_same_context = True
else:
self.break_on_exceptions_thrown_in_same_context = False
if splitted[3] == 'true':
self.ignore_exceptions_thrown_in_lines_with_ignore_exception = True
else:
self.ignore_exceptions_thrown_in_lines_with_ignore_exception = False
for exception_type in splitted[4:]:
exception_type = exception_type.strip()
if not exception_type:
continue
exception_breakpoint = self.add_break_on_exception(
exception_type,
notify_always=break_on_caught,
notify_on_terminate=break_on_uncaught,
notify_on_first_raise_only=False,
)
if exception_breakpoint is None:
continue
added.append(exception_breakpoint)
self.update_after_exceptions_added(added)
else:
sys.stderr.write("Error when setting exception list. Received: %s\n" % (text,))
elif cmd_id == CMD_GET_FILE_CONTENTS:
if not IS_PY3K: # In Python 3, the frame object will have unicode for the file, whereas on python 2 it has a byte-array encoded with the filesystem encoding.
text = text.encode(file_system_encoding)
if os.path.exists(text):
f = open(text, 'r')
try:
source = f.read()
finally:
f.close()
cmd = self.cmdFactory.makeGetFileContents(seq, source)
elif cmd_id == CMD_SET_PROPERTY_TRACE:
# Command which receives whether to trace property getter/setter/deleter
# text is feature_state(true/false);disable_getter/disable_setter/disable_deleter
if text != "":
splitted = text.split(';')
if len(splitted) >= 3:
if self.disable_property_trace is False and splitted[0] == 'true':
# Replacing property by custom property only when the debugger starts
pydevd_traceproperty.replace_builtin_property()
self.disable_property_trace = True
# Enable/Disable tracing of the property getter
if splitted[1] == 'true':
self.disable_property_getter_trace = True
else:
self.disable_property_getter_trace = False
# Enable/Disable tracing of the property setter
if splitted[2] == 'true':
self.disable_property_setter_trace = True
else:
self.disable_property_setter_trace = False
# Enable/Disable tracing of the property deleter
if splitted[3] == 'true':
self.disable_property_deleter_trace = True
else:
self.disable_property_deleter_trace = False
else:
# User hasn't configured any settings for property tracing
pass
elif cmd_id == CMD_ADD_EXCEPTION_BREAK:
if text.find('\t') != -1:
exception, notify_always, notify_on_terminate, ignore_libraries = text.split('\t', 3)
else:
exception, notify_always, notify_on_terminate, ignore_libraries = text, 0, 0, 0
if exception.find('-') != -1:
type, exception = exception.split('-')
else:
type = 'python'
if type == 'python':
if int(notify_always) == 1:
pydev_log.warn("Deprecated parameter: 'notify always' policy removed in PyCharm\n")
exception_breakpoint = self.add_break_on_exception(
exception,
notify_always=int(notify_always) > 0,
notify_on_terminate = int(notify_on_terminate) == 1,
notify_on_first_raise_only=int(notify_always) == 2,
ignore_libraries=int(ignore_libraries) > 0
)
if exception_breakpoint is not None:
self.update_after_exceptions_added([exception_breakpoint])
else:
supported_type = False
plugin = self.get_plugin_lazy_init()
if plugin is not None:
supported_type = plugin.add_breakpoint('add_exception_breakpoint', self, type, exception)
if supported_type:
self.has_plugin_exception_breaks = self.plugin.has_exception_breaks()
else:
raise NameError(type)
elif cmd_id == CMD_REMOVE_EXCEPTION_BREAK:
exception = text
if exception.find('-') != -1:
type, exception = exception.split('-')
else:
type = 'python'
if type == 'python':
try:
cp = self.break_on_uncaught_exceptions.copy()
DictPop(cp, exception, None)
self.break_on_uncaught_exceptions = cp
cp = self.break_on_caught_exceptions.copy()
DictPop(cp, exception, None)
self.break_on_caught_exceptions = cp
except:
pydev_log.debug("Error while removing exception %s"%sys.exc_info()[0])
update_exception_hook(self)
else:
supported_type = False
# I.e.: no need to initialize lazy (if we didn't have it in the first place, we can't remove
# anything from it anyways).
plugin = self.plugin
if plugin is not None:
supported_type = plugin.remove_exception_breakpoint(self, type, exception)
if supported_type:
self.has_plugin_exception_breaks = self.plugin.has_exception_breaks()
else:
raise NameError(type)
elif cmd_id == CMD_LOAD_SOURCE:
path = text
try:
f = open(path, 'r')
source = f.read()
self.cmdFactory.makeLoadSourceMessage(seq, source, self)
except:
return self.cmdFactory.makeErrorMessage(seq, pydevd_tracing.GetExceptionTracebackStr())
elif cmd_id == CMD_ADD_DJANGO_EXCEPTION_BREAK:
exception = text
plugin = self.get_plugin_lazy_init()
if plugin is not None:
plugin.add_breakpoint('add_exception_breakpoint', self, 'django', exception)
self.has_plugin_exception_breaks = self.plugin.has_exception_breaks()
elif cmd_id == CMD_REMOVE_DJANGO_EXCEPTION_BREAK:
exception = text
# I.e.: no need to initialize lazy (if we didn't have it in the first place, we can't remove
# anything from it anyways).
plugin = self.plugin
if plugin is not None:
plugin.remove_exception_breakpoint(self, 'django', exception)
self.has_plugin_exception_breaks = self.plugin.has_exception_breaks()
elif cmd_id == CMD_EVALUATE_CONSOLE_EXPRESSION:
# Command which takes care for the debug console communication
if text != "":
thread_id, frame_id, console_command = text.split('\t', 2)
console_command, line = console_command.split('\t')
if console_command == 'EVALUATE':
int_cmd = InternalEvaluateConsoleExpression(seq, thread_id, frame_id, line)
elif console_command == 'GET_COMPLETIONS':
int_cmd = InternalConsoleGetCompletions(seq, thread_id, frame_id, line)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_RUN_CUSTOM_OPERATION:
# Command which runs a custom operation
if text != "":
try:
location, custom = text.split('||', 1)
except:
sys.stderr.write('Custom operation now needs a || separator. Found: %s\n' % (text,))
raise
thread_id, frame_id, scopeattrs = location.split('\t', 2)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, attrs = scopeattrs.split('\t', 1)
else:
scope, attrs = (scopeattrs, None)
# : style: EXECFILE or EXEC
# : encoded_code_or_file: file to execute or code
# : fname: name of function to be executed in the resulting namespace
style, encoded_code_or_file, fnname = custom.split('\t', 3)
int_cmd = InternalRunCustomOperation(seq, thread_id, frame_id, scope, attrs,
style, encoded_code_or_file, fnname)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_IGNORE_THROWN_EXCEPTION_AT:
if text:
replace = 'REPLACE:' # Not all 3.x versions support u'REPLACE:', so, doing workaround.
if not IS_PY3K:
replace = unicode(replace)
if text.startswith(replace):
text = text[8:]
self.filename_to_lines_where_exceptions_are_ignored.clear()
if text:
for line in text.split('||'): # Can be bulk-created (one in each line)
filename, line_number = line.split('|')
if not IS_PY3K:
filename = filename.encode(file_system_encoding)
filename = NormFileToServer(filename)
if os.path.exists(filename):
lines_ignored = self.filename_to_lines_where_exceptions_are_ignored.get(filename)
if lines_ignored is None:
lines_ignored = self.filename_to_lines_where_exceptions_are_ignored[filename] = {}
lines_ignored[int(line_number)] = 1
else:
sys.stderr.write('pydev debugger: warning: trying to ignore exception thrown'\
' on file that does not exist: %s (will have no effect)\n' % (filename,))
elif cmd_id == CMD_ENABLE_DONT_TRACE:
if text:
true_str = 'true' # Not all 3.x versions support u'str', so, doing workaround.
if not IS_PY3K:
true_str = unicode(true_str)
mode = text.strip() == true_str
pydevd_dont_trace.trace_filter(mode)
else:
#I have no idea what this is all about
cmd = self.cmdFactory.makeErrorMessage(seq, "unexpected command " + str(cmd_id))
if cmd is not None:
self.writer.addCommand(cmd)
del cmd
except Exception:
traceback.print_exc()
cmd = self.cmdFactory.makeErrorMessage(seq,
"Unexpected exception in processNetCommand.\nInitial params: %s" % ((cmd_id, seq, text),))
self.writer.addCommand(cmd)
finally:
self._main_lock.release()
def processThreadNotAlive(self, threadId):
""" if thread is not alive, cancel trace_dispatch processing """
self._lock_running_thread_ids.acquire()
try:
thread = self._running_thread_ids.pop(threadId, None)
if thread is None:
return
wasNotified = thread.additionalInfo.pydev_notify_kill
if not wasNotified:
thread.additionalInfo.pydev_notify_kill = True
finally:
self._lock_running_thread_ids.release()
cmd = self.cmdFactory.makeThreadKilledMessage(threadId)
self.writer.addCommand(cmd)
def setSuspend(self, thread, stop_reason):
thread.additionalInfo.suspend_type = PYTHON_SUSPEND
thread.additionalInfo.pydev_state = STATE_SUSPEND
thread.stop_reason = stop_reason
# If conditional breakpoint raises any exception during evaluation send details to Java
if stop_reason == CMD_SET_BREAK and self.suspend_on_breakpoint_exception:
self.sendBreakpointConditionException(thread)
def sendBreakpointConditionException(self, thread):
"""If conditional breakpoint raises an exception during evaluation
send exception details to java
"""
thread_id = GetThreadId(thread)
conditional_breakpoint_exception_tuple = thread.additionalInfo.conditional_breakpoint_exception
# conditional_breakpoint_exception_tuple - should contain 2 values (exception_type, stacktrace)
if conditional_breakpoint_exception_tuple and len(conditional_breakpoint_exception_tuple) == 2:
exc_type, stacktrace = conditional_breakpoint_exception_tuple
int_cmd = InternalGetBreakpointException(thread_id, exc_type, stacktrace)
# Reset the conditional_breakpoint_exception details to None
thread.additionalInfo.conditional_breakpoint_exception = None
self.postInternalCommand(int_cmd, thread_id)
def sendCaughtExceptionStack(self, thread, arg, curr_frame_id):
"""Sends details on the exception which was caught (and where we stopped) to the java side.
arg is: exception type, description, traceback object
"""
thread_id = GetThreadId(thread)
int_cmd = InternalSendCurrExceptionTrace(thread_id, arg, curr_frame_id)
self.postInternalCommand(int_cmd, thread_id)
def sendCaughtExceptionStackProceeded(self, thread):
"""Sends that some thread was resumed and is no longer showing an exception trace.
"""
thread_id = GetThreadId(thread)
int_cmd = InternalSendCurrExceptionTraceProceeded(thread_id)
self.postInternalCommand(int_cmd, thread_id)
self.processInternalCommands()
def doWaitSuspend(self, thread, frame, event, arg): #@UnusedVariable
""" busy waits until the thread state changes to RUN
it expects thread's state as attributes of the thread.
Upon running, processes any outstanding Stepping commands.
"""
self.processInternalCommands()
message = getattr(thread.additionalInfo, "message", None)
cmd = self.cmdFactory.makeThreadSuspendMessage(GetThreadId(thread), frame, thread.stop_reason, message)
self.writer.addCommand(cmd)
CustomFramesContainer.custom_frames_lock.acquire()
try:
from_this_thread = []
for frame_id, custom_frame in DictIterItems(CustomFramesContainer.custom_frames):
if custom_frame.thread_id == thread.ident:
# print >> sys.stderr, 'Frame created: ', frame_id
self.writer.addCommand(self.cmdFactory.makeCustomFrameCreatedMessage(frame_id, custom_frame.name))
self.writer.addCommand(self.cmdFactory.makeThreadSuspendMessage(frame_id, custom_frame.frame, CMD_THREAD_SUSPEND, ""))
from_this_thread.append(frame_id)
finally:
CustomFramesContainer.custom_frames_lock.release()
imported = False
info = thread.additionalInfo
if info.pydev_state == STATE_SUSPEND and not self._finishDebuggingSession:
# before every stop check if matplotlib modules were imported inside script code
if len(self.mpl_modules_for_patching) > 0:
for module in DictKeys(self.mpl_modules_for_patching):
if module in sys.modules:
activate_function = DictPop(self.mpl_modules_for_patching, module)
activate_function()
self.mpl_in_use = True
while info.pydev_state == STATE_SUSPEND and not self._finishDebuggingSession:
if self.mpl_in_use:
# call input hooks if only matplotlib is in use
try:
if not imported:
from pydev_ipython.inputhook import get_inputhook
imported = True
inputhook = get_inputhook()
if inputhook:
inputhook()
except:
pass
self.processInternalCommands()
time.sleep(0.01)
# process any stepping instructions
if info.pydev_step_cmd == CMD_STEP_INTO or info.pydev_step_cmd == CMD_STEP_INTO_MY_CODE:
info.pydev_step_stop = None
info.pydev_smart_step_stop = None
elif info.pydev_step_cmd == CMD_STEP_OVER:
info.pydev_step_stop = frame
info.pydev_smart_step_stop = None
self.SetTraceForFrameAndParents(frame)
elif info.pydev_step_cmd == CMD_SMART_STEP_INTO:
self.SetTraceForFrameAndParents(frame)
info.pydev_step_stop = None
info.pydev_smart_step_stop = frame
elif info.pydev_step_cmd == CMD_RUN_TO_LINE or info.pydev_step_cmd == CMD_SET_NEXT_STATEMENT :
self.SetTraceForFrameAndParents(frame)
if event == 'line' or event == 'exception':
#If we're already in the correct context, we have to stop it now, because we can act only on
#line events -- if a return was the next statement it wouldn't work (so, we have this code
#repeated at pydevd_frame).
stop = False
curr_func_name = frame.f_code.co_name
#global context is set with an empty name
if curr_func_name in ('?', '<module>'):
curr_func_name = ''
if curr_func_name == info.pydev_func_name:
line = info.pydev_next_line
if frame.f_lineno == line:
stop = True
else :
if frame.f_trace is None:
frame.f_trace = self.trace_dispatch
frame.f_lineno = line
frame.f_trace = None
stop = True
if stop:
info.pydev_state = STATE_SUSPEND
self.doWaitSuspend(thread, frame, event, arg)
return
elif info.pydev_step_cmd == CMD_STEP_RETURN:
back_frame = frame.f_back
if back_frame is not None:
# steps back to the same frame (in a return call it will stop in the 'back frame' for the user)
info.pydev_step_stop = frame
self.SetTraceForFrameAndParents(frame)
else:
# No back frame?!? -- this happens in jython when we have some frame created from an awt event
# (the previous frame would be the awt event, but this doesn't make part of 'jython', only 'java')
# so, if we're doing a step return in this situation, it's the same as just making it run
info.pydev_step_stop = None
info.pydev_step_cmd = None
info.pydev_state = STATE_RUN
del frame
cmd = self.cmdFactory.makeThreadRunMessage(GetThreadId(thread), info.pydev_step_cmd)
self.writer.addCommand(cmd)
CustomFramesContainer.custom_frames_lock.acquire()
try:
# The ones that remained on last_running must now be removed.
for frame_id in from_this_thread:
# print >> sys.stderr, 'Removing created frame: ', frame_id
self.writer.addCommand(self.cmdFactory.makeThreadKilledMessage(frame_id))
finally:
CustomFramesContainer.custom_frames_lock.release()
def handle_post_mortem_stop(self, additionalInfo, t):
pydev_log.debug("We are stopping in post-mortem\n")
frame, frames_byid = additionalInfo.pydev_force_stop_at_exception
thread_id = GetThreadId(t)
pydevd_vars.addAdditionalFrameById(thread_id, frames_byid)
try:
try:
add_exception_to_frame(frame, additionalInfo.exception)
self.setSuspend(t, CMD_ADD_EXCEPTION_BREAK)
self.doWaitSuspend(t, frame, 'exception', None)
except:
pydev_log.error("We've got an error while stopping in post-mortem: %s\n"%sys.exc_info()[0])
finally:
additionalInfo.pydev_force_stop_at_exception = None
pydevd_vars.removeAdditionalFrameById(thread_id)
def trace_dispatch(self, frame, event, arg):
''' This is the callback used when we enter some context in the debugger.
We also decorate the thread we are in with info about the debugging.
The attributes added are:
pydev_state
pydev_step_stop
pydev_step_cmd
pydev_notify_kill
'''
try:
if self._finishDebuggingSession and not self._terminationEventSent:
#that was not working very well because jython gave some socket errors
try:
if self.output_checker is None:
killAllPydevThreads()
except:
traceback.print_exc()
self._terminationEventSent = True
return None
filename, base = GetFilenameAndBase(frame)
is_file_to_ignore = DictContains(DONT_TRACE, base) #we don't want to debug threading or anything related to pydevd
#print('trace_dispatch', base, frame.f_lineno, event, frame.f_code.co_name, is_file_to_ignore)
if is_file_to_ignore:
if DONT_TRACE[base] == LIB_FILE:
if self.not_in_scope(filename):
return None
else:
return None
try:
#this shouldn't give an exception, but it could happen... (python bug)
#see http://mail.python.org/pipermail/python-bugs-list/2007-June/038796.html
#and related bug: http://bugs.python.org/issue1733757
t = threadingCurrentThread()
except:
frame.f_trace = self.trace_dispatch
return self.trace_dispatch
try:
additionalInfo = t.additionalInfo
if additionalInfo is None:
raise AttributeError()
except:
t.additionalInfo = PyDBAdditionalThreadInfo()
additionalInfo = t.additionalInfo
if additionalInfo is None:
return None
if additionalInfo.is_tracing:
f = frame
while f is not None:
if 'trace_dispatch' == f.f_code.co_name:
_fname, bs = GetFilenameAndBase(f)
if bs == 'pydevd_frame.py':
return None #we don't wan't to trace code invoked from pydevd_frame.trace_dispatch
f = f.f_back
# if thread is not alive, cancel trace_dispatch processing
if not isThreadAlive(t):
self.processThreadNotAlive(GetThreadId(t))
return None # suspend tracing
# each new frame...
return additionalInfo.CreateDbFrame((self, filename, additionalInfo, t, frame)).trace_dispatch(frame, event, arg)
except SystemExit:
return None
except Exception:
# Log it
try:
if traceback is not None:
# This can actually happen during the interpreter shutdown in Python 2.7
traceback.print_exc()
except:
# Error logging? We're really in the interpreter shutdown...
# (https://github.com/fabioz/PyDev.Debugger/issues/8)
pass
return None
if USE_PSYCO_OPTIMIZATION:
try:
import psyco
trace_dispatch = psyco.proxy(trace_dispatch)
processNetCommand = psyco.proxy(processNetCommand)
processInternalCommands = psyco.proxy(processInternalCommands)
doWaitSuspend = psyco.proxy(doWaitSuspend)
getInternalQueue = psyco.proxy(getInternalQueue)
except ImportError:
if hasattr(sys, 'exc_clear'): # jython does not have it
sys.exc_clear() # don't keep the traceback (let's keep it clear for when we go to the point of executing client code)
if not IS_PY3K and not IS_PY27 and not IS_64_BITS and not sys.platform.startswith("java") and not sys.platform.startswith("cli"):
sys.stderr.write("pydev debugger: warning: psyco not available for speedups (the debugger will still work correctly, but a bit slower)\n")
sys.stderr.flush()
def SetTraceForFrameAndParents(self, frame, also_add_to_passed_frame=True, overwrite_prev_trace=False, dispatch_func=None):
if dispatch_func is None:
dispatch_func = self.trace_dispatch
if also_add_to_passed_frame:
self.update_trace(frame, dispatch_func, overwrite_prev_trace)
frame = frame.f_back
while frame:
self.update_trace(frame, dispatch_func, overwrite_prev_trace)
frame = frame.f_back
del frame
def update_trace(self, frame, dispatch_func, overwrite_prev):
if frame.f_trace is None:
frame.f_trace = dispatch_func
else:
if overwrite_prev:
frame.f_trace = dispatch_func
else:
try:
#If it's the trace_exception, go back to the frame trace dispatch!
if frame.f_trace.im_func.__name__ == 'trace_exception':
frame.f_trace = frame.f_trace.im_self.trace_dispatch
except AttributeError:
pass
frame = frame.f_back
del frame
def prepareToRun(self):
''' Shared code to prepare debugging by installing traces and registering threads '''
self.patch_threads()
pydevd_tracing.SetTrace(self.trace_dispatch)
PyDBCommandThread(self).start()
if self.signature_factory is not None:
# we need all data to be sent to IDE even after program finishes
CheckOutputThread(self).start()
def patch_threads(self):
try:
# not available in jython!
threading.settrace(self.trace_dispatch) # for all future threads
except:
pass
from pydev_monkey import patch_thread_modules
patch_thread_modules()
def get_fullname(self, mod_name):
try:
loader = pkgutil.get_loader(mod_name)
except:
return None
if loader is not None:
for attr in ("get_filename", "_get_filename"):
meth = getattr(loader, attr, None)
if meth is not None:
return meth(mod_name)
return None
def run(self, file, globals=None, locals=None, module=False, set_trace=True):
if module:
filename = self.get_fullname(file)
if filename is None:
sys.stderr.write("No module named %s\n" % file)
return
else:
file = filename
if os.path.isdir(file):
new_target = os.path.join(file, '__main__.py')
if os.path.isfile(new_target):
file = new_target
if globals is None:
m = save_main_module(file, 'pydevd')
globals = m.__dict__
try:
globals['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
if locals is None:
locals = globals
if set_trace:
# Predefined (writable) attributes: __name__ is the module's name;
# __doc__ is the module's documentation string, or None if unavailable;
# __file__ is the pathname of the file from which the module was loaded,
# if it was loaded from a file. The __file__ attribute is not present for
# C modules that are statically linked into the interpreter; for extension modules
# loaded dynamically from a shared library, it is the pathname of the shared library file.
# I think this is an ugly hack, bug it works (seems to) for the bug that says that sys.path should be the same in
# debug and run.
if m.__file__.startswith(sys.path[0]):
# print >> sys.stderr, 'Deleting: ', sys.path[0]
del sys.path[0]
# now, the local directory has to be added to the pythonpath
# sys.path.insert(0, os.getcwd())
# Changed: it's not the local directory, but the directory of the file launched
# The file being run ust be in the pythonpath (even if it was not before)
sys.path.insert(0, os.path.split(file)[0])
self.prepareToRun()
while not self.readyToRun:
time.sleep(0.1) # busy wait until we receive run command
try:
self.init_matplotlib_support()
except:
sys.stderr.write("Matplotlib support in debugger failed\n")
traceback.print_exc()
pydev_imports.execfile(file, globals, locals) # execute the script
def exiting(self):
sys.stdout.flush()
sys.stderr.flush()
self.checkOutputRedirect()
cmd = self.cmdFactory.makeExitMessage()
self.writer.addCommand(cmd)
def wait_for_commands(self, globals):
thread = threading.currentThread()
import pydevd_frame_utils
frame = pydevd_frame_utils.Frame(None, -1, pydevd_frame_utils.FCode("Console",
os.path.abspath(os.path.dirname(__file__))), globals, globals)
thread_id = GetThreadId(thread)
import pydevd_vars
pydevd_vars.addAdditionalFrameById(thread_id, {id(frame): frame})
cmd = self.cmdFactory.makeShowConsoleMessage(thread_id, frame)
self.writer.addCommand(cmd)
while True:
self.processInternalCommands()
time.sleep(0.01)
def set_debug(setup):
setup['DEBUG_RECORD_SOCKET_READS'] = True
setup['DEBUG_TRACE_BREAKPOINTS'] = 1
setup['DEBUG_TRACE_LEVEL'] = 3
def processCommandLine(argv):
""" parses the arguments.
removes our arguments from the command line """
setup = {}
setup['client'] = ''
setup['server'] = False
setup['port'] = 0
setup['file'] = ''
setup['multiproc'] = False #Used by PyCharm (reuses connection: ssh tunneling)
setup['multiprocess'] = False # Used by PyDev (creates new connection to ide)
setup['save-signatures'] = False
setup['print-in-debugger-startup'] = False
setup['cmd-line'] = False
setup['module'] = False
i = 0
del argv[0]
while (i < len(argv)):
if argv[i] == '--port':
del argv[i]
setup['port'] = int(argv[i])
del argv[i]
elif argv[i] == '--vm_type':
del argv[i]
setup['vm_type'] = argv[i]
del argv[i]
elif argv[i] == '--client':
del argv[i]
setup['client'] = argv[i]
del argv[i]
elif argv[i] == '--server':
del argv[i]
setup['server'] = True
elif argv[i] == '--file':
del argv[i]
setup['file'] = argv[i]
i = len(argv) # pop out, file is our last argument
elif argv[i] == '--DEBUG_RECORD_SOCKET_READS':
del argv[i]
setup['DEBUG_RECORD_SOCKET_READS'] = True
elif argv[i] == '--DEBUG':
del argv[i]
set_debug(setup)
elif argv[i] == '--multiproc':
del argv[i]
setup['multiproc'] = True
elif argv[i] == '--multiprocess':
del argv[i]
setup['multiprocess'] = True
elif argv[i] == '--save-signatures':
del argv[i]
setup['save-signatures'] = True
elif argv[i] == '--print-in-debugger-startup':
del argv[i]
setup['print-in-debugger-startup'] = True
elif (argv[i] == '--cmd-line'):
del argv[i]
setup['cmd-line'] = True
elif (argv[i] == '--module'):
del argv[i]
setup['module'] = True
else:
raise ValueError("unexpected option " + argv[i])
return setup
def usage(doExit=0):
sys.stdout.write('Usage:\n')
sys.stdout.write('pydevd.py --port=N [(--client hostname) | --server] --file executable [file_options]\n')
if doExit:
sys.exit(0)
def initStdoutRedirect():
if not getattr(sys, 'stdoutBuf', None):
sys.stdoutBuf = pydevd_io.IOBuf()
sys.stdout_original = sys.stdout
sys.stdout = pydevd_io.IORedirector(sys.stdout, sys.stdoutBuf) #@UndefinedVariable
def initStderrRedirect():
if not getattr(sys, 'stderrBuf', None):
sys.stderrBuf = pydevd_io.IOBuf()
sys.stderr_original = sys.stderr
sys.stderr = pydevd_io.IORedirector(sys.stderr, sys.stderrBuf) #@UndefinedVariable
def has_data_to_redirect():
if getattr(sys, 'stdoutBuf', None):
if not sys.stdoutBuf.empty():
return True
if getattr(sys, 'stderrBuf', None):
if not sys.stderrBuf.empty():
return True
return False
#=======================================================================================================================
# settrace
#=======================================================================================================================
def settrace(
host=None,
stdoutToServer=False,
stderrToServer=False,
port=5678,
suspend=True,
trace_only_current_thread=False,
overwrite_prev_trace=False,
patch_multiprocessing=False,
):
'''Sets the tracing function with the pydev debug function and initializes needed facilities.
@param host: the user may specify another host, if the debug server is not in the same machine (default is the local
host)
@param stdoutToServer: when this is true, the stdout is passed to the debug server
@param stderrToServer: when this is true, the stderr is passed to the debug server
so that they are printed in its console and not in this process console.
@param port: specifies which port to use for communicating with the server (note that the server must be started
in the same port). @note: currently it's hard-coded at 5678 in the client
@param suspend: whether a breakpoint should be emulated as soon as this function is called.
@param trace_only_current_thread: determines if only the current thread will be traced or all current and future
threads will also have the tracing enabled.
@param overwrite_prev_trace: if True we'll reset the frame.f_trace of frames which are already being traced
@param patch_multiprocessing: if True we'll patch the functions which create new processes so that launched
processes are debugged.
'''
_set_trace_lock.acquire()
try:
_locked_settrace(
host,
stdoutToServer,
stderrToServer,
port,
suspend,
trace_only_current_thread,
overwrite_prev_trace,
patch_multiprocessing,
)
finally:
_set_trace_lock.release()
_set_trace_lock = _pydev_thread.allocate_lock()
def _locked_settrace(
host,
stdoutToServer,
stderrToServer,
port,
suspend,
trace_only_current_thread,
overwrite_prev_trace,
patch_multiprocessing,
):
if patch_multiprocessing:
try:
import pydev_monkey #Jython 2.1 can't use it...
except:
pass
else:
pydev_monkey.patch_new_process_functions()
if host is None:
import pydev_localhost
host = pydev_localhost.get_localhost()
global connected
global bufferStdOutToServer
global bufferStdErrToServer
if not connected :
pydevd_vm_type.SetupType()
debugger = PyDB()
debugger.connect(host, port) # Note: connect can raise error.
# Mark connected only if it actually succeeded.
connected = True
bufferStdOutToServer = stdoutToServer
bufferStdErrToServer = stderrToServer
if bufferStdOutToServer:
initStdoutRedirect()
if bufferStdErrToServer:
initStderrRedirect()
debugger.SetTraceForFrameAndParents(GetFrame(), False, overwrite_prev_trace=overwrite_prev_trace)
CustomFramesContainer.custom_frames_lock.acquire()
try:
for _frameId, custom_frame in DictIterItems(CustomFramesContainer.custom_frames):
debugger.SetTraceForFrameAndParents(custom_frame.frame, False)
finally:
CustomFramesContainer.custom_frames_lock.release()
t = threadingCurrentThread()
try:
additionalInfo = t.additionalInfo
except AttributeError:
additionalInfo = PyDBAdditionalThreadInfo()
t.additionalInfo = additionalInfo
while not debugger.readyToRun:
time.sleep(0.1) # busy wait until we receive run command
# note that we do that through pydevd_tracing.SetTrace so that the tracing
# is not warned to the user!
pydevd_tracing.SetTrace(debugger.trace_dispatch)
if not trace_only_current_thread:
# Trace future threads?
debugger.patch_threads()
# As this is the first connection, also set tracing for any untraced threads
debugger.setTracingForUntracedContexts(ignore_frame=GetFrame(), overwrite_prev_trace=overwrite_prev_trace)
# Stop the tracing as the last thing before the actual shutdown for a clean exit.
atexit.register(stoptrace)
PyDBCommandThread(debugger).start()
CheckOutputThread(debugger).start()
#Suspend as the last thing after all tracing is in place.
if suspend:
debugger.setSuspend(t, CMD_THREAD_SUSPEND)
else:
# ok, we're already in debug mode, with all set, so, let's just set the break
debugger = GetGlobalDebugger()
debugger.SetTraceForFrameAndParents(GetFrame(), False)
t = threadingCurrentThread()
try:
additionalInfo = t.additionalInfo
except AttributeError:
additionalInfo = PyDBAdditionalThreadInfo()
t.additionalInfo = additionalInfo
pydevd_tracing.SetTrace(debugger.trace_dispatch)
if not trace_only_current_thread:
# Trace future threads?
debugger.patch_threads()
if suspend:
debugger.setSuspend(t, CMD_THREAD_SUSPEND)
def stoptrace():
global connected
if connected:
pydevd_tracing.RestoreSysSetTraceFunc()
sys.settrace(None)
try:
#not available in jython!
threading.settrace(None) # for all future threads
except:
pass
from pydev_monkey import undo_patch_thread_modules
undo_patch_thread_modules()
debugger = GetGlobalDebugger()
if debugger:
debugger.SetTraceForFrameAndParents(
GetFrame(), also_add_to_passed_frame=True, overwrite_prev_trace=True, dispatch_func=lambda *args:None)
debugger.exiting()
killAllPydevThreads()
connected = False
class Dispatcher(object):
def __init__(self):
self.port = None
def connect(self, host, port):
self.host = host
self.port = port
self.client = StartClient(self.host, self.port)
self.reader = DispatchReader(self)
self.reader.dontTraceMe = False #we run reader in the same thread so we don't want to loose tracing
self.reader.run()
def close(self):
try:
self.reader.doKillPydevThread()
except :
pass
class DispatchReader(ReaderThread):
def __init__(self, dispatcher):
self.dispatcher = dispatcher
ReaderThread.__init__(self, self.dispatcher.client)
def OnRun(self):
dummy_thread = threading.currentThread()
dummy_thread.is_pydev_daemon_thread = False
return ReaderThread.OnRun(self)
def handleExcept(self):
ReaderThread.handleExcept(self)
def processCommand(self, cmd_id, seq, text):
if cmd_id == 99:
self.dispatcher.port = int(text)
self.killReceived = True
DISPATCH_APPROACH_NEW_CONNECTION = 1 # Used by PyDev
DISPATCH_APPROACH_EXISTING_CONNECTION = 2 # Used by PyCharm
DISPATCH_APPROACH = DISPATCH_APPROACH_NEW_CONNECTION
def dispatch():
setup = SetupHolder.setup
host = setup['client']
port = setup['port']
if DISPATCH_APPROACH == DISPATCH_APPROACH_EXISTING_CONNECTION:
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
port = dispatcher.port
finally:
dispatcher.close()
return host, port
def settrace_forked():
'''
When creating a fork from a process in the debugger, we need to reset the whole debugger environment!
'''
host, port = dispatch()
import pydevd_tracing
pydevd_tracing.RestoreSysSetTraceFunc()
if port is not None:
global connected
connected = False
CustomFramesContainerInit()
settrace(
host,
port=port,
suspend=False,
trace_only_current_thread=False,
overwrite_prev_trace=True,
patch_multiprocessing=True,
)
#=======================================================================================================================
# SetupHolder
#=======================================================================================================================
class SetupHolder:
setup = None
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
# parse the command line. --file is our last argument that is required
try:
sys.original_argv = sys.argv[:]
setup = processCommandLine(sys.argv)
SetupHolder.setup = setup
except ValueError:
traceback.print_exc()
usage(1)
if setup['print-in-debugger-startup']:
try:
pid = ' (pid: %s)' % os.getpid()
except:
pid = ''
sys.stderr.write("pydev debugger: starting%s\n" % pid)
fix_getpass.fixGetpass()
pydev_log.debug("Executing file %s" % setup['file'])
pydev_log.debug("arguments: %s"% str(sys.argv))
pydevd_vm_type.SetupType(setup.get('vm_type', None))
if os.getenv('PYCHARM_DEBUG'):
set_debug(setup)
DebugInfoHolder.DEBUG_RECORD_SOCKET_READS = setup.get('DEBUG_RECORD_SOCKET_READS', False)
DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS = setup.get('DEBUG_TRACE_BREAKPOINTS', -1)
DebugInfoHolder.DEBUG_TRACE_LEVEL = setup.get('DEBUG_TRACE_LEVEL', -1)
port = setup['port']
host = setup['client']
f = setup['file']
fix_app_engine_debug = False
try:
import pydev_monkey
except:
pass #Not usable on jython 2.1
else:
if setup['multiprocess']: # PyDev
pydev_monkey.patch_new_process_functions()
elif setup['multiproc']: # PyCharm
pydev_log.debug("Started in multiproc mode\n")
# Note: we're not inside method, so, no need for 'global'
DISPATCH_APPROACH = DISPATCH_APPROACH_EXISTING_CONNECTION
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
if dispatcher.port is not None:
port = dispatcher.port
pydev_log.debug("Received port %d\n" %port)
pydev_log.info("pydev debugger: process %d is connecting\n"% os.getpid())
try:
pydev_monkey.patch_new_process_functions()
except:
pydev_log.error("Error patching process functions\n")
traceback.print_exc()
else:
pydev_log.error("pydev debugger: couldn't get port for new debug process\n")
finally:
dispatcher.close()
else:
pydev_log.info("pydev debugger: starting\n")
try:
pydev_monkey.patch_new_process_functions_with_warning()
except:
pydev_log.error("Error patching process functions\n")
traceback.print_exc()
# Only do this patching if we're not running with multiprocess turned on.
if f.find('dev_appserver.py') != -1:
if os.path.basename(f).startswith('dev_appserver.py'):
appserver_dir = os.path.dirname(f)
version_file = os.path.join(appserver_dir, 'VERSION')
if os.path.exists(version_file):
try:
stream = open(version_file, 'r')
try:
for line in stream.read().splitlines():
line = line.strip()
if line.startswith('release:'):
line = line[8:].strip()
version = line.replace('"', '')
version = version.split('.')
if int(version[0]) > 1:
fix_app_engine_debug = True
elif int(version[0]) == 1:
if int(version[1]) >= 7:
# Only fix from 1.7 onwards
fix_app_engine_debug = True
break
finally:
stream.close()
except:
traceback.print_exc()
try:
# In the default run (i.e.: run directly on debug mode), we try to patch stackless as soon as possible
# on a run where we have a remote debug, we may have to be more careful because patching stackless means
# that if the user already had a stackless.set_schedule_callback installed, he'd loose it and would need
# to call it again (because stackless provides no way of getting the last function which was registered
# in set_schedule_callback).
#
# So, ideally, if there's an application using stackless and the application wants to use the remote debugger
# and benefit from stackless debugging, the application itself must call:
#
# import pydevd_stackless
# pydevd_stackless.patch_stackless()
#
# itself to be able to benefit from seeing the tasklets created before the remote debugger is attached.
import pydevd_stackless
pydevd_stackless.patch_stackless()
except:
pass # It's ok not having stackless there...
debugger = PyDB()
is_module = setup['module']
if fix_app_engine_debug:
sys.stderr.write("pydev debugger: google app engine integration enabled\n")
curr_dir = os.path.dirname(__file__)
app_engine_startup_file = os.path.join(curr_dir, 'pydev_app_engine_debug_startup.py')
sys.argv.insert(1, '--python_startup_script=' + app_engine_startup_file)
import json
setup['pydevd'] = __file__
sys.argv.insert(2, '--python_startup_args=%s' % json.dumps(setup),)
sys.argv.insert(3, '--automatic_restart=no')
sys.argv.insert(4, '--max_module_instances=1')
# Run the dev_appserver
debugger.run(setup['file'], None, None, is_module, set_trace=False)
else:
# as to get here all our imports are already resolved, the psyco module can be
# changed and we'll still get the speedups in the debugger, as those functions
# are already compiled at this time.
try:
import psyco
except ImportError:
if hasattr(sys, 'exc_clear'): # jython does not have it
sys.exc_clear() # don't keep the traceback -- clients don't want to see it
pass # that's ok, no need to mock psyco if it's not available anyways
else:
# if it's available, let's change it for a stub (pydev already made use of it)
import pydevd_psyco_stub
sys.modules['psyco'] = pydevd_psyco_stub
if setup['save-signatures']:
if pydevd_vm_type.GetVmType() == pydevd_vm_type.PydevdVmType.JYTHON:
sys.stderr.write("Collecting run-time type information is not supported for Jython\n")
else:
# Only import it if we're going to use it!
from pydevd_signature import SignatureFactory
debugger.signature_factory = SignatureFactory()
try:
debugger.connect(host, port)
except:
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
traceback.print_exc()
sys.exit(1)
connected = True # Mark that we're connected when started from inside ide.
globals = debugger.run(setup['file'], None, None, is_module)
if setup['cmd-line']:
debugger.wait_for_commands(globals)
| apache-2.0 |
jeplus/jEPlus | scripts/scatter_plot_3.py | 1 | 2402 | from collections import defaultdict
import re
import csv # to get data from the file
import matplotlib.pyplot as plt # to make the plot
def load_data(filename, *keys):
result = defaultdict(list)
with open(filename, 'r') as f: # initialise the file handle
reader = csv.DictReader(f, skipinitialspace=True) # wrap it with a csv DictReader
for row in reader: # loop over the data rows - each is passed into the row variable as a dictionary
if row['Message'] == 'EnergyPlus Completed Successfully': # ignore missing data
for key in keys: # loop over the provided keys
value = float(row[key]) # get string from row and convert to float
result[key].append(value) # grow the data
return result
def plot_data(data, outfilename, *keys):
nkeys = len(keys)
fig, axarr = plt.subplots(nkeys, nkeys) # create a figure with lots of axes
for x, x_key in enumerate(keys):
for y, y_key in enumerate(keys):
ax = axarr[y][x]
ax.scatter(data[x_key], data[y_key], s=10, alpha=0.25) # scatter of cooling against heating
if x == 0:
ax.set_ylabel(axis_label(y_key), fontsize=10)
if y == nkeys - 1:
ax.set_xlabel(axis_label(x_key), fontsize=10)
ax.grid(True)
fig.tight_layout()
#plt.savefig(outfilename)
plt.show()
def axis_label(key):
pattern = re.compile("(\w+):(\w+)\s*\[(\w+)\]")
match = pattern.match(key)
name, unit = match.group(1, 3)
return "%s (%s)" % (name, unit)
def load_and_plot(infile, outfile, *keys):
data = load_data(infile, *keys)
plot_data(data, outfile, *keys)
if __name__ == "__main__":
import sys
usage = "Usage: %s output_folder key1 [key2 [key3 ..]]" % sys.argv[0]
args = sys.argv[1:]
outfile = 'example.png'
if len(args) <= 1:
heat_key = 'Heating:DistrictHeating [J](RunPeriod)'
cool_key = 'Cooling:DistrictCooling [J](RunPeriod)'
equip_key = 'InteriorEquipment:Electricity [J](RunPeriod)'
light_key = 'InteriorLights:Electricity [J](RunPeriod)'
keys = [heat_key, cool_key, equip_key, light_key]
else:
keys = args[1:]
# keys for getting data (column headings become dictionary keys)
load_and_plot(sys.argv[1] + 'AllCombinedResults.csv', outfile, *keys)
| gpl-3.0 |
stwunsch/gnuradio | gr-filter/examples/channelize.py | 58 | 7003 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 2000000 # number of samples to use
self._fs = 1000 # initial sampling rate
self._M = M = 9 # Number of channels to channelize
self._ifs = M*self._fs # initial sampling rate
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
# Create a set of signals at different frequencies
# freqs lists the frequencies of the signals that get stored
# in the list "signals", which then get summed together
self.signals = list()
self.add = blocks.add_cc()
freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80]
for i in xrange(len(freqs)):
f = freqs[i] + (M/2-M+i+1)*self._fs
self.signals.append(analog.sig_source_c(self._ifs, analog.GR_SIN_WAVE, f, 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Use this to play with the channel mapping
#self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4])
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
Ns = 1000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._ifs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(X))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
jjx02230808/project0223 | sklearn/metrics/tests/test_classification.py | 20 | 50188 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.exceptions import UndefinedMetricWarning
from scipy.spatial.distance import hamming as sp_hamming
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
w = np.array([1, 3])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
assert_equal(hamming_loss(y1, y2, sample_weight=w), 1. / 12)
assert_equal(hamming_loss(y1, 1-y2, sample_weight=w), 11. / 12)
assert_equal(hamming_loss(y1, np.zeros_like(y1), sample_weight=w), 2. / 3)
# sp_hamming only works with 1-D arrays
assert_equal(hamming_loss(y1[0], y2[0]), sp_hamming(y1[0], y2[0]))
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
sbxzy/pygks | pygks/ui_isoinn.py | 1 | 8603 | from . import isoinn2
from pygraph.classes.graph import graph
from pygraph.algorithms.minmax import cut_tree
from pygraph.algorithms.accessibility import connected_components
from .utils import __dict_reverse as dict_reverse
import itertools
import time
from numpy import array,sum,sqrt
class data_block:
"""This is the programming and user interface for SOINN. data is the training dataset, should be array or list, with each element numpy arrays with the same dimension.
no_label is True or False. While set to False, the last element of each array in data will be treated as labels.
The rest of the variables are training settings for SOINN."""
nodes = [] #:Weight of neurons.
gr = {} #:Topology structures.
def __init__(self,data,no_label = True,age_max = 200,nn_lambda = 70,alpha = 2.0,del_noise = True,un_label = 0):
isoinn2.set_parameter(age_max,nn_lambda,alpha,0,del_noise)
timecost = time.time()
t = 0
gr = graph()
if no_label:
for n_point in data:
t += 1
isoinn2.step(n_point,un_label,t)
else:
for n_point in data:
t += 1
n_data = list(n_point)
n_X = array(n_data[0:-1])
n_Y = n_data[-1]
isoinn2.step(n_X,n_Y,t)
isoinn2.step(array([]),0,-1)
print('time cost',time.time() - timecost)
self.nodes = isoinn2.setN
self.gr = isoinn2.gr
print(len(self.nodes))
def output_graph(self):
"""Return the topology structure as a python-graph."""
return self.gr
def output_nodes(self):
"""Return the list of neuron weights."""
return self.nodes
def graph_features(self):
"""Generating topological features including vertice orders for future use."""
gr_nodes = self.gr.nodes()
gr_edges = self.gr.edges()
node_count = len(gr_nodes)
edge_count = len(gr_edges) / 2.0
average_order = 0.0
clustering_coefficient = 0.0
max_order = 0
for each_node in gr_nodes:
#for orders
current_node_order = self.gr.node_order(each_node)
average_order += current_node_order
max_order = max(max_order,current_node_order)
#now for clustering coefficient
direct_neighbors = self.gr.neighbors(each_node)
tmp_v_edge_count = 0.0
tmp_r_edge_count = 0.0
for virtual_edge in itertools.product(direct_neighbors,direct_neighbors):
if virtual_edge[0] != virtual_edge[1]:
tmp_v_edge_count += 1.0
if self.gr.has_edge(tuple(virtual_edge)):
tmp_r_edge_count += 1.0
if tmp_v_edge_count == 0:
clustering_coefficient += 0.0
else:
clustering_coefficient += (tmp_r_edge_count / tmp_v_edge_count)
clustering_coefficient /= float(node_count)
average_order /= float(node_count)
#for kernel order
cut_dict = cut_tree(self.gr)
cut_places = set(cut_dict.values())
how_many_kernel_orders = list(range(5))
kernel_orders = []
bloods = 0.0
for kernel_tick in how_many_kernel_orders:
if kernel_tick in cut_places:
bloods += 1.0
kernel_orders.append(bloods)
#for redundant edges and missing edges
redundant_edges = 0.0
missing_edges = 0.0
for each_edge in gr_edges:
node0 = each_edge[0]
node1 = each_edge[1]
#find common set of nodes' neighbors
common_set = set(self.gr.neighbors(node0)).intersection(set(self.gr.neighbors(node1)))
if len(common_set) == 0:
missing_edges += 1.0
elif len(common_set) > 1:
in_cell_edges = list(itertools.combinations(list(common_set),2))
cell_judge = True
for cell_edge in in_cell_edges:
if self.gr.has_edge(cell_edge):
cell_judge = False
if cell_judge == False:
redundant_edges += 1.0
if edge_count != 0.0:
redundant_edges /= float(edge_count)
missing_edges /= float(edge_count)
#average edge lenghth
total_length = 0.0
for each_edge in gr_edges:
node0 = each_edge[0]
node1 = each_edge[1]
total_length += sqrt(sum((self.nodes[node0] - self.nodes[node1])**2))
if len(gr_edges) == 0:
average_length = 0.0
else:
average_length = total_length / float(len(gr_edges))
return [average_length,node_count,edge_count,average_order,max_order,redundant_edges,missing_edges] + kernel_orders
def draw_2d(self, scale = 1, axis_ = False):
"""Draws the topology structure and neurons. scale is real number, it can be set arbitrarily to adjust the size
of drawed neuron clusters. axis is True or False, and means weither to enable axis in the final drawings.
In this method, MDS is used for drawing high dimensional Euclidean graphs. If you do not use this method, sklearn is
not a prerequisite for running the pygks software."""
groups = connected_components(self.gr)
if len(self.nodes[0]) != 2:
print('using MDS for none 2d drawing')
from sklearn import manifold
from sklearn.metrics import euclidean_distances
similarities = euclidean_distances(self.nodes)
for i in range(len(self.nodes)):
for j in range(len(self.nodes)):
if groups[i] == groups[j]:
similarities[i,j] *= scale
mds = manifold.MDS(n_components=2, max_iter=500, eps=1e-7,dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
draw_nodes = pos
else:
draw_nodes = self.nodes
print('now_drawing')
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
node_count = len(draw_nodes)
for i in range(node_count):
for j in range(i,node_count):
if self.gr.has_edge((i,j)):
ax.plot([draw_nodes[i][0], draw_nodes[j][0]],[draw_nodes[i][1], draw_nodes[j][1]], color='k', linestyle='-', linewidth=1)
group_counts = len(set(groups.values()))
style_tail = ['.','o','x','^','s','+']
style_head = ['b','r','g','k']
style_list = []
for each in itertools.product(style_head,style_tail):
style_list.append(each[0]+each[1])
i = 0
for each in draw_nodes:
ax.plot(each[0],each[1],style_list[groups[i]-1])
i += 1
if not axis_:
plt.axis('off')
plt.show()
def outlier_nn(self,positive = 1,negative = -1):
"""This method finds the largest neuron cluster. If a neuron belongs to this cluster, a label specified by positive will
be added to this neuron, else this neuron will be labeled by negative variable. The labeled results will be outputed in a
list as labels_final."""
groups = connected_components(self.gr)
#find the largest group
group_counts = dict_reverse(groups)
max_count = 0
for keys,values in list(group_counts.items()):
if len(values) > max_count:
max_count = len(values)
max_group = keys
affines = {}
for keys,values in list(groups.items()):
if values == max_group:
affines[values] = positive
else:
affines[values] = negative
#this is only for outlier detection
for values in list(groups.values()):
if values not in list(affines.keys()):
affines[values] = -1
for keys,values in list(groups.items()):
groups[keys] = affines[values]
labels_final = []
for i in range(len(self.nodes)):
labels_final.append(groups[i])
print(labels_final)
return self.nodes, labels_final
def counts(self):
"""Output the winning times of each neuron and the accumulated errors of the SOINN network."""
return isoinn2.accumulated, isoinn2.numbers
if __name__ == '__main__':
print('sb')
| bsd-3-clause |
JFriel/honours_project | networkx/doc/make_gallery.py | 35 | 2453 | """
Generate a thumbnail gallery of examples.
"""
from __future__ import print_function
import os, glob, re, shutil, sys
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot
import matplotlib.image
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
examples_source_dir = '../examples/drawing'
examples_dir = 'examples/drawing'
template_dir = 'source/templates'
static_dir = 'source/static/examples'
pwd=os.getcwd()
rows = []
template = """
{%% extends "layout.html" %%}
{%% set title = "Gallery" %%}
{%% block body %%}
<h3>Click on any image to see source code</h3>
<br/>
%s
{%% endblock %%}
"""
link_template = """
<a href="%s"><img src="%s" border="0" alt="%s"/></a>
"""
if not os.path.exists(static_dir):
os.makedirs(static_dir)
os.chdir(examples_source_dir)
all_examples=sorted(glob.glob("*.py"))
# check for out of date examples
stale_examples=[]
for example in all_examples:
png=example.replace('py','png')
png_static=os.path.join(pwd,static_dir,png)
if (not os.path.exists(png_static) or
os.stat(png_static).st_mtime < os.stat(example).st_mtime):
stale_examples.append(example)
for example in stale_examples:
print(example, end=" ")
png=example.replace('py','png')
matplotlib.pyplot.figure(figsize=(6,6))
stdout=sys.stdout
sys.stdout=open('/dev/null','w')
try:
execfile(example)
sys.stdout=stdout
print(" OK")
except ImportError as strerr:
sys.stdout=stdout
sys.stdout.write(" FAIL: %s\n" % strerr)
continue
matplotlib.pyplot.clf()
im=matplotlib.image.imread(png)
fig = Figure(figsize=(2.5, 2.5))
canvas = FigureCanvas(fig)
ax = fig.add_axes([0,0,1,1], aspect='auto', frameon=False, xticks=[], yticks
=[])
# basename, ext = os.path.splitext(basename)
ax.imshow(im, aspect='auto', resample=True, interpolation='bilinear')
thumbfile=png.replace(".png","_thumb.png")
fig.savefig(thumbfile)
shutil.copy(thumbfile,os.path.join(pwd,static_dir,thumbfile))
shutil.copy(png,os.path.join(pwd,static_dir,png))
basename, ext = os.path.splitext(example)
link = '%s/%s.html'%(examples_dir, basename)
rows.append(link_template%(link, os.path.join('_static/examples',thumbfile), basename))
os.chdir(pwd)
fh = open(os.path.join(template_dir,'gallery.html'), 'w')
fh.write(template%'\n'.join(rows))
fh.close()
| gpl-3.0 |
kjordahl/xray | doc/conf.py | 3 | 13424 | # -*- coding: utf-8 -*-
#
# xray documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 6 18:57:54 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
print "python exec:", sys.executable
print "sys.path:", sys.path
try:
import numpy
print "numpy: %s, %s" % (numpy.__version__, numpy.__file__)
except ImportError:
print "no numpy"
try:
import scipy
print "scipy: %s, %s" % (scipy.__version__, scipy.__file__)
except ImportError:
print "no scipy"
try:
import pandas
print "pandas: %s, %s" % (pandas.__version__, pandas.__file__)
except ImportError:
print "no pandas"
try:
import matplotlib
print "matplotlib: %s, %s" % (matplotlib.__version__, matplotlib.__file__)
except ImportError:
print "no matplotlib"
try:
import IPython
print "ipython: %s, %s" % (IPython.__version__, IPython.__file__)
except ImportError:
print "no ipython"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Monkey patch inspect.findsource to work around a Python bug that manifests on
# RTD. Copied from IPython.core.ultratb.
# Reference: https://github.com/ipython/ipython/issues/1456
import linecache
import re
from inspect import getsourcefile, getfile, getmodule,\
ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved.
FIXED version with which we monkeypatch the stdlib to work around a bug."""
file = getsourcefile(object) or getfile(object)
# If the object is a frame, then trying to get the globals dict from its
# module won't work. Instead, the frame object itself has the globals
# dictionary.
globals_dict = None
if inspect.isframe(object):
# XXX: can this ever be false?
globals_dict = object.f_globals
else:
module = getmodule(object, file)
if module:
globals_dict = module.__dict__
lines = linecache.getlines(file, globals_dict)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
pmatch = pat.match
# fperez - fix: sometimes, co_firstlineno can give a number larger than
# the length of lines, which causes an error. Safeguard against that.
lnum = min(object.co_firstlineno,len(lines))-1
while lnum > 0:
if pmatch(lines[lnum]): break
lnum -= 1
return lines, lnum
raise IOError('could not find code object')
import inspect
inspect.findsource = findsource
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.mathjax',
'numpydoc',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
]
extlinks = {'issue': ('https://github.com/xray/xray/issues/%s', 'GH')}
autosummary_generate = True
numpydoc_class_members_toctree = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'xray'
copyright = u'2014, xray Developers'
import xray
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = xray.version.short_version
# The full version, including alpha/beta/rc tags.
release = xray.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'xraydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'xray.tex', u'xray Documentation',
u'xray Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'xray', u'xray Documentation',
[u'xray Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'xray', u'xray Documentation',
u'xray Developers', 'xray', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/2.7/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'iris': ('http://scitools.org.uk/iris/docs/latest/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
}
| apache-2.0 |
ghortiz-pena/BrownDwarfModel | Model.py | 1 | 3987 | def header(f_name):
from numpy import pi
f = open(f_name)
line = f.readline()
l = line.split()
while l[0] == "#":
line = f.readline()
l = line.split()
n = int(l[0])
distr = l[1]
line = f.readline()
l = line.split()
P = int(l[0])
B = float(l[1])
beta = float(l[2])
inc = float(l[3])*pi/180
Ls = []
ds = []
lngs = []
for i in range(n):
line = f.readline()
l = line.split()
Ls.append(float(l[0]))
ds.append(float(l[1])*pi/180)
lngs.append(float(l[2])*pi/180)
return n, distr, P, B, beta, inc, Ls, ds, lngs
def main():
import numpy as np
import matplotlib
matplotlib.use('PS')
import matplotlib.pyplot as plt
import Coordinates as coords
# First, reading in physical constants in MKS units
m = 9.11e-31 # mass of an electron, in kg
e_0 = 1.6e-19 # charge of an electron, in Coulombs
pi = np.pi # pi
n, distr, P, B_0, beta, inc, L, d, lng = header('dynsim.in')
# the local electron cyclotron frequency at the radius of the star, for v/c = 0.1; in Hz
f_0 = e_0 * (B_0/(2*pi*m))
pos = np.array([])
phase = np.array([i/float(P) for i in range(2*P)]) # phase, to plot the results
I = np.array([])
f = np.array([])
for t in range(2*P):
phis = np.array([(i*5) * 2*pi/P for i in range(5*n)])
# making 5 relevant lines per magnetic loop, offset by 5 seconds
for i in range(n):
phis[i*5 : (i+1)*5] = phis[i*5 : (i+1)*5] + (t%P)*((2*pi)/P) + lng[i]
# Each line will be at r = L, theta = i in the rotating frame
lines = np.array([coords.Spherical([L[i/5], inc, phis[i]]) for i in range(5*n)])
for i in range(5*n):
lines[i] = lines[i].rotate(d[i/5]) # First the coordinates are rotated into the dipole frame
# Then the equation for the L-shell is used to determine the 'true' distance from the center
lines[i].q['r'] = lines[i].q['r'] * np.cos(pi/2 - lines[i].q['theta'])**2
# calculating the frequency relative to the local electron cyclotron frequency at the radius of the star
# determining the beaming angle of the emission
if distr == "shell":
beam = pi/2 - np.arccos(2 * np.cos(lines[i].q['theta']) / np.sqrt(1 + 3 * np.cos(lines[i].q['theta'])**2))
f_i = min(1, np.sqrt(1 + 3 * np.cos(lines[i].q['theta'])**2) / (lines[i].q['r']**(3)) * np.sqrt(1 - beta**2))
if distr == "cone":
f_i = min(1, np.sqrt(1 + 3 * np.cos(lines[i].q['theta'])**2) / (lines[i].q['r']**(3)) / np.sqrt(1 - beta**2))
beam = np.arccos(beta / np.sqrt(1 - f_i))
f = np.append(f, f_i)
mu = abs((lines[i].q['theta'] - beam)/(2*pi))
# Determining circular polarization
if lines[i].q['theta'] < pi/2:
CP = 1
else:
CP = -1
I = np.append(I, CP * np.exp(-np.power(mu, 2.) / (2 * np.power(0.01, 2.)))/np.sqrt(2*pi*0.0001))
pos = np.append(pos, lines)
pos.shape = (2*P, 5*n)
f.shape = (2*P, 5*n)
I.shape = (2*P, 5*n)
# This section is simply constructing the 2-dimensional array so it's simple to view using matplotlib
f = f * 1000
t = phase*P
flux = np.zeros((1001, 2*P))
for i in range(2*P):
for j in range(5*n):
flux[f[i, j], t[i]] = I[i, j]
print np.amax(f) / 1000 * f_0 / 1e9
dims = [0, 2, 0, f_0 / 1e9]
plt.imshow(flux, aspect = 'auto', origin = 'lower', extent = dims, cmap='gray')
#
plt.ylabel(r"Frequency (GHz)")
plt.xlabel(r"Phase")
plt.title("Relative Intensity")
plt.colorbar()
plt.savefig('../../Desktop/DynSim/Spectrum')
#plt.plot(phase, I[:, 1])
plt.show()
if __name__ == '__main__':
main()
| mit |
wazeerzulfikar/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 124 | 1877 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
l1_color = "navy"
l2_color = "c"
elastic_net_color = "darkorange"
lw = 2
plt.plot(xs, l1(xs), color=l1_color, label="L1", lw=lw)
plt.plot(xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(xs, l2(xs), color=l2_color, label="L2", lw=lw)
plt.plot(xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(xs, el(xs, alpha), color=elastic_net_color, label="Elastic Net", lw=lw)
plt.plot(xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
meteorcloudy/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 30 | 60826 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class ActivationFunctionTest(test.TestCase):
def _getModelForActivation(self, activation_fn):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
'activation_fn': activation_fn,
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
return dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testValidActivation(self):
_ = self._getModelForActivation('relu')
def testRaisesOnBadActivationName(self):
with self.assertRaisesRegexp(ValueError,
'Activation name should be one of'):
self._getModelForActivation('max_pool')
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertClassificationPredictions(
self, expected_len, n_classes, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, range(n_classes))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
fc_core.embedding_column(language_column, dimension=1),
fc_core.numeric_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=300)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, n_classes, predicted_proba)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def _assertRegressionOutputs(
self, predictions, expected_shape):
predictions_nparray = np.array(predictions)
self.assertAllEqual(expected_shape, predictions_nparray.shape)
self.assertTrue(np.issubdtype(predictions_nparray.dtype, np.floating))
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self._assertRegressionOutputs(predicted_scores, [3])
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self._assertRegressionOutputs(predicted_scores, [3])
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
SenGonzo/ia_tools | Analysis_sketchpad.py | 1 | 3517 |
import pandas as pd
import Attack_Calc as atk
def data_input():
# import and fold data
df = pd.read_csv('input_data/units.csv')
# df = df[(df['faction'] == 'scum')]
# df = df[(df['single model health'] >= 5)]
df.sort_values(by=['name'], ascending=[True], inplace=True)
rez_df = pd.DataFrame(columns=('name', 'cost', 'type', 'group', 'blk_grp_ev', 'blk_cost_eff', 'wht_grp_ev', 'wht_cost_eff',
'health', 'hit_ef'))
x = 0
for index, row in df.iterrows():
print(row['name'])
surge_1 = [int(i) for i in row['surge 1'].split(',')]
surge_2 = [int(i) for i in row['surge 2'].split(',')]
surge_3 = [int(i) for i in row['surge 3'].split(',')]
surge_4 = [int(i) for i in row['surge 4'].split(',')]
attribute_array = [row['damage att'], row['surge att'], row['acc att'], 0, 0, 0]
if row['deadly'] == 1:
deadly = True
else:
deadly = False
blk_ev, blk_var, blk_x_array, blk_y_array = atk.results_calc(row['name'],
row['dice'].split(', '), ['black'],
surge_array=[surge_1, surge_2, surge_3, surge_4],
attribute_array=attribute_array,
distance=0,
deadly=deadly,
number_of_attacks=1,
atk_reroll_attack=row['reroll attack'],
atk_reroll_def=row['reroll def'])
wht_ev, wht_var, wht_x_array, wht_y_array = atk.results_calc(row['name'],
row['dice'].split(', '), ['white'],
surge_array=[surge_1, surge_2, surge_3, surge_4],
attribute_array=attribute_array,
distance=0,
deadly=deadly,
number_of_attacks=1,
atk_reroll_attack=row['reroll attack'],
atk_reroll_def=row['reroll def'])
blk_grp_ev = blk_ev
blk_cost_eff = blk_grp_ev / row['cost']
wht_grp_ev = wht_ev
wht_cost_eff = wht_grp_ev / row['cost']
hit_ef = row['group'] * row['single model health'] * 1.0000 / row['cost']
rez_df.loc[x] = [row['name'], row['cost'], row['type'], row['group'], blk_grp_ev, blk_cost_eff, wht_grp_ev, wht_cost_eff,
row['single model health'], hit_ef]
x += 1
return rez_df
def create_stack_rank(rez_df):
rez_df['target'] = (rez_df['blk_grp_ev'] + rez_df['wht_grp_ev'])/(rez_df['health'])
rez_df.sort_values(by=['target'], ascending=[False], inplace=True)
rez_df.to_csv('output_data/target_order.csv')
| mit |
dilawar/moose-full | moose-examples/_travis/test_all_scripts.py | 2 | 2516 | #!/usr/bin/python
"""test_all_scripts.py:
This script tests scripts in this repository.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2015, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "[email protected]"
__status__ = "Development"
import os
from collections import defaultdict
import subprocess
import logging
import tempfile
import shutils
blacklisted_files_ = []
interactive_files_ = []
with open("BLACKLISTED", "r") as bf:
blacklisted_files_ = bf.read().split("\n")
with open("INTERACTIVE", "r") as bf:
interactive_files_ = bf.read().split("\n")
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='test.log',
filemode='w'
)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
_logger = logging.getLogger('')
scripts_ = defaultdict(list)
def run_file(directory, f):
_logger.info("Running file %s" % filepath)
# Copy matplotlibrc file to running directory
output = None
try:
command = ["python", filepath]
output = subprocess.check_output(command)
except:
logging.warn("Failed to run: %s" % output)
def test_dir(directory, fileList):
shutils.copytree(directory, '/tmp')
for f in fileList:
filepath = os.path.join(directory, f)
with open(filepath, "r") as f:
filetext = f.read()
if "main(" in filetext:
_logger.info("This script contains main function")
run_file(directory, f)
else:
_logger.debug("Does not contain main function. Don't run")
def testDirectories():
global scripts_
for d in scripts_:
_logger.info("Testing in directory %s" % d)
test_dir(d, scripts_[d])
def main():
for d, sd, fs in os.walk('.'):
for f in fs:
if ".py" in f:
scripts_[d].append(f)
testDirectories()
if __name__ == '__main__':
main()
| gpl-2.0 |
abimannans/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |